diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml
new file mode 100644
index 00000000..6e08fc4f
--- /dev/null
+++ b/.github/workflows/go.yml
@@ -0,0 +1,42 @@
+name: Go
+
+on:
+ push:
+ branches: [ master ]
+ pull_request:
+ branches: [ master ]
+
+jobs:
+
+ build:
+ name: Build
+ runs-on: ubuntu-latest
+ steps:
+
+ - name: Set up Go 1.x
+ uses: actions/setup-go@v2
+ with:
+ go-version: 1.13.12
+ id: go
+
+ - name: Setup C++ environment
+ uses: kurocha/setup-cpp@v1
+
+ - name: Check out code into the Go module directory
+ uses: actions/checkout@v2
+
+ - name: Get dependencies
+ run: |
+ sudo apt-get install libsnappy1v5 libsnappy-dev libjemalloc2 libjemalloc-dev
+ git clone https://github.com/absolute8511/rocksdb.git /tmp/rocksdb
+ pushd /tmp/rocksdb && git checkout v6.4.6-patched && PORTABLE=1 WITH_JEMALLOC_FLAG=1 JEMALLOC=1 make static_lib && popd
+
+ - name: Test
+ run: |
+ wget -c https://github.com/coreos/etcd/releases/download/v2.3.8/etcd-v2.3.8-linux-amd64.tar.gz
+ tar -xvzf etcd-v2.3.8-linux-amd64.tar.gz
+ ./etcd-v2.3.8-linux-amd64/etcd -name=test-etcd0 -initial-advertise-peer-urls=http://127.0.0.1:2380 -listen-client-urls=http://127.0.0.1:2379 -advertise-client-urls=http://127.0.0.1:2379 -listen-peer-urls=http://127.0.0.1:2380 -initial-cluster="test-etcd0=http://127.0.0.1:2380" -initial-cluster-state=new --data-dir ./test-etcd > etcd.log 2>&1 &
+ ROCKSDB=/tmp/rocksdb ./test.sh
+
+ - name: Codecov
+ uses: codecov/codecov-action@v1.0.7
diff --git a/.gitignore b/.gitignore
index fa26fd59..12342824 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,3 +3,5 @@
.DS_Store
.vscode
build/*
+vendor/github.com
+vendor/gopkg.in
\ No newline at end of file
diff --git a/.travis.yml b/.travis.yml
index 3a041787..f870ac88 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,7 +1,6 @@
language: go
go:
- - 1.8.x
- - 1.9.x
+ - 1.13.x
env:
- GOARCH=amd64 TEST_RACE=false
- GOARCH=amd64 TEST_RACE=true
@@ -17,20 +16,14 @@ addons:
- g++-4.9
install:
- export CXX="g++-4.9" CC="gcc-4.9"
- - sudo apt-get install libsnappy1 libsnappy-dev
+ - sudo apt-get install libsnappy1v5 libsnappy-dev libjemalloc2 libjemalloc-dev
- git clone https://github.com/absolute8511/rocksdb.git /tmp/rocksdb
- - pushd /tmp/rocksdb && git checkout v5.8.8-share-rate-limiter && USE_SSE=1 make static_lib && popd
+ - pushd /tmp/rocksdb && git checkout v6.4.6-patched && PORTABLE=1 WITH_JEMALLOC_FLAG=1 JEMALLOC=1 make static_lib && popd
script:
- - CGO_CFLAGS="-I/tmp/rocksdb/include" CGO_LDFLAGS="-L/tmp/rocksdb -lrocksdb -lstdc++ -lm -lsnappy -lrt" go get github.com/absolute8511/gorocksdb
- - CGO_CFLAGS="-I/tmp/rocksdb/include" CGO_LDFLAGS="-L/tmp/rocksdb -lrocksdb -lstdc++ -lm -lsnappy -lrt" go install -race github.com/absolute8511/gorocksdb
- - curl -s https://raw.githubusercontent.com/pote/gpm/v1.4.0/bin/gpm > gpm
- - chmod +x gpm
- - ./gpm install || true
- wget -c https://github.com/coreos/etcd/releases/download/v2.3.8/etcd-v2.3.8-linux-amd64.tar.gz
- tar -xvzf etcd-v2.3.8-linux-amd64.tar.gz
- ./etcd-v2.3.8-linux-amd64/etcd -name=test-etcd0 -initial-advertise-peer-urls=http://127.0.0.1:2380 -listen-client-urls=http://127.0.0.1:2379 -advertise-client-urls=http://127.0.0.1:2379 -listen-peer-urls=http://127.0.0.1:2380 -initial-cluster="test-etcd0=http://127.0.0.1:2380" -initial-cluster-state=new --data-dir ./test-etcd > etcd.log 2>&1 &
- - go get -u golang.org/x/sys/...
- - ./test.sh
+ - ROCKSDB=/tmp/rocksdb ./test.sh
notifications:
email: false
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 00000000..1b216364
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,9 @@
+FROM busybox
+
+RUN mkdir -p /data/logs/zankv/ && yum install -y rsync snappy jemalloc
+ADD dist/docker/bin/ /opt/zankv/bin/
+ADD scripts/ /opt/zankv/scripts/
+
+EXPOSE 18001 12380 12381 12379
+
+VOLUME /data
diff --git a/Godeps b/Godeps
deleted file mode 100644
index 06e293fb..00000000
--- a/Godeps
+++ /dev/null
@@ -1,26 +0,0 @@
-github.com/absolute8511/glog
-github.com/absolute8511/go-zanredisdb
-github.com/absolute8511/xlock2
-github.com/gogo/protobuf 342cbe0a04158f6dcb03ca0079991a51a4248c02
-google.golang.org/grpc 6b51017f791ae1cfbec89c52efdf444b13b550ef
-github.com/ugorji/go 708a42d246822952f38190a8d8c4e6b16a0e600c
-github.com/coreos/etcd dd0d5902177b6336b8cf344e6cbf1962b5981dde
-github.com/coreos/go-semver/semver
-github.com/coreos/go-systemd/journal
-github.com/coreos/pkg
-github.com/gobwas/glob
-github.com/judwhite/go-svc/svc
-github.com/julienschmidt/httprouter
-github.com/prometheus/client_golang/prometheus
-github.com/spaolacci/murmur3
-github.com/absolute8511/redcon
-github.com/xiang90/probing
-github.com/siddontang/goredis
-github.com/mreiferson/go-options
-github.com/BurntSushi/toml
-github.com/shirou/gopsutil
-github.com/stretchr/testify/assert
-github.com/tidwall/gjson
-github.com/tidwall/sjson
-github.com/absolute8511/hyperloglog
-github.com/hashicorp/golang-lru
diff --git a/Makefile b/Makefile
index 2d942252..abf7b98a 100644
--- a/Makefile
+++ b/Makefile
@@ -1,11 +1,18 @@
PREFIX=/usr/local
DESTDIR=
BINDIR=${PREFIX}/bin
-PROJECT?=github.com/absolute8511/ZanRedisDB
-VERBINARY?= 0.3.2
+PROJECT?=github.com/youzan/ZanRedisDB
+VERBINARY?= 0.9.4
COMMIT?=$(shell git rev-parse --short HEAD)
BUILD_TIME?=$(shell date '+%Y-%m-%d_%H:%M:%S-%Z')
-GOFLAGS=-ldflags "-s -w -X ${PROJECT}/common.VerBinary=${VERBINARY} -X ${PROJECT}/common.Commit=${COMMIT} -X ${PROJECT}/common.BuildTime=${BUILD_TIME}"
+GOFLAGS=-ldflags "-X ${PROJECT}/common.VerBinary=${VERBINARY} -X ${PROJECT}/common.Commit=${COMMIT} -X ${PROJECT}/common.BuildTime=${BUILD_TIME}"
+
+CGO_CFLAGS="-I${ROCKSDB}/include"
+CGO_LDFLAGS="-L${ROCKSDB} -lrocksdb -lstdc++ -lm -lsnappy -ljemalloc"
+
+ifeq (${GOOS},linux)
+ CGO_LDFLAGS="-L${ROCKSDB} -lrocksdb -lstdc++ -lm -lsnappy -lrt -ljemalloc -ldl"
+endif
BLDDIR = build
EXT=
@@ -23,7 +30,10 @@ $(BLDDIR)/restore: $(wildcard apps/restore/*.go)
$(BLDDIR)/%:
@mkdir -p $(dir $@)
- go build -i ${GOFLAGS} -o $@ ./apps/$*
+ @echo $(GOOS)
+ @echo $(CGO_LDFLAGS)
+ CGO_CFLAGS=${CGO_CFLAGS} CGO_LDFLAGS=${CGO_LDFLAGS} GO111MODULE=on go get github.com/youzan/gorocksdb
+ CGO_CFLAGS=${CGO_CFLAGS} CGO_LDFLAGS=${CGO_LDFLAGS} GO111MODULE=on go build ${GOFLAGS} -o $@ ./apps/$*
$(APPS): %: $(BLDDIR)/%
diff --git a/README.md b/README.md
index 19a00e4a..e0f6fed6 100644
--- a/README.md
+++ b/README.md
@@ -1,7 +1,7 @@
# ZanRedisDB
-[![Codacy Badge](https://api.codacy.com/project/badge/Grade/3288ed77f27c4f8a998e35ef936edc6f)](https://www.codacy.com/app/cool8511/ZanRedisDB?utm_source=github.com&utm_medium=referral&utm_content=absolute8511/ZanRedisDB&utm_campaign=badger)
-[![Build Status](https://travis-ci.org/absolute8511/ZanRedisDB.svg?branch=master)](https://travis-ci.org/absolute8511/ZanRedisDB) [![GitHub release](https://img.shields.io/github/release/absolute8511/ZanRedisDB.svg)](https://github.com/absolute8511/ZanRedisDB/releases/latest) [![codecov](https://codecov.io/gh/absolute8511/ZanRedisDB/branch/master/graph/badge.svg)](https://codecov.io/gh/absolute8511/ZanRedisDB) [![Go Report Card](https://goreportcard.com/badge/github.com/absolute8511/ZanRedisDB)](https://goreportcard.com/report/github.com/absolute8511/ZanRedisDB) [![Documentation Status](https://readthedocs.org/projects/zanredisdb/badge/?version=latest)](http://zanredisdb.readthedocs.io/en/latest/?badge=latest)
+[![Codacy Badge](https://api.codacy.com/project/badge/Grade/5bb2847636f343e79edf048a0394de04)](https://www.codacy.com/app/cool8511/youzan_ZanRedisDB?utm_source=github.com&utm_medium=referral&utm_content=youzan/ZanRedisDB&utm_campaign=Badge_Grade)
+![Go](https://github.com/youzan/ZanRedisDB/workflows/Go/badge.svg)[![Build Status](https://travis-ci.com/youzan/ZanRedisDB.svg?branch=master)](https://travis-ci.com/youzan/ZanRedisDB) [![GitHub release](https://img.shields.io/github/release/youzan/ZanRedisDB.svg)](https://github.com/youzan/ZanRedisDB/releases/latest) [![codecov](https://codecov.io/gh/youzan/ZanRedisDB/branch/master/graph/badge.svg)](https://codecov.io/gh/youzan/ZanRedisDB) [![Go Report Card](https://goreportcard.com/badge/github.com/youzan/ZanRedisDB)](https://goreportcard.com/report/github.com/youzan/ZanRedisDB) [![Documentation Status](https://readthedocs.org/projects/youzan-zanredisdb/badge/?version=latest)](http://youzan-zanredisdb.readthedocs.io/en/latest/?badge=latest)
## What is ZanRedisDB
@@ -21,36 +21,32 @@ apt-get install libsnappy1 libsnappy-dev (for Debian/Ubuntu)
brew install snappy (for Mac)
-Build the rocksdb
+Build the rocksdb with jemalloc
git clone https://github.com/absolute8511/rocksdb.git
cd rocksdb
-USE_SSE=1 make static_lib
+git checkout v6.4.6-patched
+PORTABLE=1 USE_SSE=1 USE_PCLMUL=1 WITH_JEMALLOC_FLAG=1 JEMALLOC=1 make static_lib
-Install the dependency:
+Install the dependency (for old go version only, if using go1.13+, it will be done in go modules):
-CGO_CFLAGS="-I/path/to/rocksdb/include" CGO_LDFLAGS="-L/path/to/rocksdb -lrocksdb -lstdc++ -lm -lsnappy -lrt" go get github.com/absolute8511/gorocksdb
+CGO_CFLAGS="-I/path/to/rocksdb/include" CGO_LDFLAGS="-L/path/to/rocksdb -lrocksdb -lstdc++ -lm -lsnappy -lrt -ljemalloc" go get github.com/youzan/gorocksdb
-CGO_CFLAGS="-I/path/to/rocksdb/include" CGO_LDFLAGS="-L/path/to/rocksdb -lrocksdb -lstdc++ -lm -lsnappy" go get github.com/absolute8511/gorocksdb (for MacOS)
-
-
-use the `gpm` to install other dependencies
-
-wget https://raw.githubusercontent.com/pote/gpm/v1.4.0/bin/gpm && chmod +x gpm && sudo mv gpm /usr/local/bin
-gpm get
+CGO_CFLAGS="-I/path/to/rocksdb/include" CGO_LDFLAGS="-L/path/to/rocksdb -lrocksdb -lstdc++ -lm -lsnappy -ljemalloc" go get github.com/youzan/gorocksdb (for MacOS)
+use the `dep ensure` to install other dependencies or use go modules for go1.13+
-Build zankv and placedriver from the source (only support go version 1.8+, gcc 4.9+ or xcode-command-line-tools on Mac):
+Build zankv and placedriver from the source (only support go version 1.10+, gcc 4.9+ or xcode-command-line-tools on Mac):
-make
+ROCKSDB=/path/to/rocksdb make
If you want package the binary release run the scripts
./pre-dist.sh
-./dist.sh
+ROCKSDB=/path/to/rocksdb ./dist.sh
## Deploy
@@ -59,6 +55,16 @@ If you want package the binary release run the scripts
* Deploy etcd cluster which is needed for the meta data for the namespaces
* Deploy the placedriver which is used for data placement: `placedriver -config=/path/to/config`
* Deploy the zankv for data storage server `zankv -config=/path/to/config`
+ * Init a namespace using the create the namespace API in placedriver
+
+## OS-Level Tuning
+
+ * Setting `vm.swappiness=0`
+ * Setting `vm.min_free_kbytes` to at least 1GB (8GB on larger memory system)
+ * Disable NUMA zone reclaim with `vm.zone_reclaim_mode=0`
+ * Disable THP(transparent huge pages)
+ * Avoid the tcp delay ack by `echo 4 > /proc/sys/net/ipv4/tcp_delack_min` (for old OS only)
+
## API
placedriver has several HTTP APIs to manager the namespace
@@ -114,16 +120,21 @@ based on this golang sdk if you want use the redis client in other language.
- [ ] Full text search support
* Operation
- [x] Backup and restore for cluster
- - [ ] More stats for read/write performance and errors.
+ - [x] More stats for read/write performance and errors.
* Client
- [x] High available for redis commands (Retry on fail)
- [ ] Extand redis commands to support index and search
- [x] Extand redis commands for advance scan
* Others (maybe)
- [ ] Support configure for Column storage friendly for OLAP
- - [ ] BoltDB as storage engine (read/range optimize)
- - [ ] Lua scripts support
+ - [x] Pebble as the storage engine
+ - [x] Support other memory storage engine
- [ ] Support export data to other systems
-[client-sdk]: https://github.com/absolute8511/go-zanredisdb
+[client-sdk]: https://github.com/youzan/go-zanredisdb
+
+## Thanks
+
+Many thanks for these great projects which make this project possible: etcd, RocksDB, ledisdb, pika.
+
diff --git a/apps/backup/main.go b/apps/backup/main.go
index f627464f..82bc8a94 100644
--- a/apps/backup/main.go
+++ b/apps/backup/main.go
@@ -11,7 +11,7 @@ import (
"sync"
"time"
- sdk "github.com/absolute8511/go-zanredisdb"
+ sdk "github.com/youzan/go-zanredisdb"
)
var (
@@ -22,6 +22,7 @@ var (
table = flagSet.String("table", "", "table name of backup")
backType = flagSet.String("type", "all", "which type you want to backup,split by ',' for multiple")
qps = flagSet.Int("qps", 1000, "qps")
+ readable = flagSet.Bool("readable", false, "output can be readable as text")
pass = flagSet.String("pass", "", "password of zankv")
)
@@ -126,19 +127,23 @@ func backupCommon(tp []byte, ch chan interface{}, file *os.File, f writeFunc) {
lenBuf := make([]byte, 4)
key := item[0].([]byte)
keyLen := len(key)
- binary.BigEndian.PutUint32(lenBuf, uint32(keyLen))
- n, err := file.Write(lenBuf)
- if err != nil {
- fmt.Printf("write key's len error.[ns=%s, table=%s, key=%s, err=%v]\n", *ns, *table, string(key), err)
- return
- }
-
- if n != 4 {
- fmt.Printf("write key's len length error. [ns=%s, table=%s, key=%s, len=%d]\n", *ns, *table, string(key), n)
- return
- }
-
- n, err = file.Write(key)
+ if *readable {
+ file.WriteString("\n")
+ } else {
+ binary.BigEndian.PutUint32(lenBuf, uint32(keyLen))
+ n, err := file.Write(lenBuf)
+ if err != nil {
+ fmt.Printf("write key's len error.[ns=%s, table=%s, key=%s, err=%v]\n", *ns, *table, string(key), err)
+ return
+ }
+
+ if n != 4 {
+ fmt.Printf("write key's len length error. [ns=%s, table=%s, key=%s, len=%d]\n", *ns, *table, string(key), n)
+ return
+ }
+ }
+
+ n, err := file.Write(key)
if err != nil {
fmt.Printf("write key error. [ns=%s, table=%s, key=%s, err=%v]\n", *ns, *table, string(key), err)
return
@@ -165,21 +170,25 @@ func backupCommon(tp []byte, ch chan interface{}, file *os.File, f writeFunc) {
func kvbackup(ch chan interface{}, file *os.File, client *sdk.ZanRedisClient) {
tp := []byte{0}
backupCommon(tp, ch, file, func(key []byte, item []interface{}, file *os.File) error {
- lenBuf := make([]byte, 4)
value := item[0].([]byte)
valLen := len(value)
- binary.BigEndian.PutUint32(lenBuf, uint32(valLen))
- n, err := file.Write(lenBuf)
- if err != nil {
- fmt.Printf("write val's len error. [ns=%s, table=%s, key=%s, val=%v, err=%v]\n", *ns, *table, string(key), value, err)
- return err
- }
- if n != 4 {
- fmt.Printf("write val's len length error. [ns=%s, table=%s, key=%s, val=%v, len=%d]\n", *ns, *table, string(key), value, n)
- return errWriteLen
+ if *readable {
+ file.WriteString("\n")
+ } else {
+ lenBuf := make([]byte, 4)
+ binary.BigEndian.PutUint32(lenBuf, uint32(valLen))
+ n, err := file.Write(lenBuf)
+ if err != nil {
+ fmt.Printf("write val's len error. [ns=%s, table=%s, key=%s, val=%v, err=%v]\n", *ns, *table, string(key), value, err)
+ return err
+ }
+ if n != 4 {
+ fmt.Printf("write val's len length error. [ns=%s, table=%s, key=%s, val=%v, len=%d]\n", *ns, *table, string(key), value, n)
+ return errWriteLen
+ }
}
- n, err = file.Write(value)
+ n, err := file.Write(value)
if err != nil {
fmt.Printf("write val error. [ns=%s, table=%s, key=%s, val=%v, err=%v]\n", *ns, *table, string(key), value, err)
return err
@@ -196,36 +205,44 @@ func hbackup(ch chan interface{}, file *os.File, client *sdk.ZanRedisClient) {
tp := []byte{1}
backupCommon(tp, ch, file, func(key []byte, item []interface{}, file *os.File) error {
lenBuf := make([]byte, 4)
- binary.BigEndian.PutUint32(lenBuf, uint32(len(item)))
- n, err := file.Write(lenBuf)
-
- if err != nil {
- fmt.Printf("write field-value count error.[ns=%s, table=%s, key=%s, err=%v]\n", *ns, *table, string(key), err)
- return err
- }
-
- if n != 4 {
- fmt.Printf("write field-value count length error. [ns=%s, table=%s, key=%s, count=%d, len=%d]\n",
- *ns, *table, string(key), len(item), n)
- return errWriteLen
- }
+ if *readable {
+ file.WriteString("\n")
+ } else {
+ binary.BigEndian.PutUint32(lenBuf, uint32(len(item)))
+ n, err := file.Write(lenBuf)
- for i := 0; i < len(item); i++ {
- fv := item[i].([]interface{})
- field := fv[0].([]byte)
- fieldLen := len(field)
- binary.BigEndian.PutUint32(lenBuf, uint32(fieldLen))
- n, err = file.Write(lenBuf)
if err != nil {
- fmt.Printf("write field's len error. [ns=%s, table=%s, key=%s, field=%v, err=%v]\n", *ns, *table, string(key), field, err)
+ fmt.Printf("write field-value count error.[ns=%s, table=%s, key=%s, err=%v]\n", *ns, *table, string(key), err)
return err
}
+
if n != 4 {
- fmt.Printf("write field's len length error. [ns=%s, table=%s, key=%s, field=%v, len=%d]\n", *ns, *table, string(key), field, n)
+ fmt.Printf("write field-value count length error. [ns=%s, table=%s, key=%s, count=%d, len=%d]\n",
+ *ns, *table, string(key), len(item), n)
return errWriteLen
}
+ }
- n, err = file.Write(field)
+ for i := 0; i < len(item); i++ {
+ fv := item[i].([]interface{})
+ field := fv[0].([]byte)
+ fieldLen := len(field)
+ if *readable {
+ file.WriteString("\n")
+ } else {
+ binary.BigEndian.PutUint32(lenBuf, uint32(fieldLen))
+ n, err := file.Write(lenBuf)
+ if err != nil {
+ fmt.Printf("write field's len error. [ns=%s, table=%s, key=%s, field=%v, err=%v]\n", *ns, *table, string(key), field, err)
+ return err
+ }
+ if n != 4 {
+ fmt.Printf("write field's len length error. [ns=%s, table=%s, key=%s, field=%v, len=%d]\n", *ns, *table, string(key), field, n)
+ return errWriteLen
+ }
+ }
+
+ n, err := file.Write(field)
if err != nil {
fmt.Printf("write field error. [ns=%s, table=%s, key=%s, field=%v, err=%v]\n", *ns, *table, string(key), field, err)
return err
@@ -237,15 +254,19 @@ func hbackup(ch chan interface{}, file *os.File, client *sdk.ZanRedisClient) {
value := fv[1].([]byte)
valLen := len(value)
- binary.BigEndian.PutUint32(lenBuf, uint32(valLen))
- n, err = file.Write(lenBuf)
- if err != nil {
- fmt.Printf("write val's len error. [ns=%s, table=%s, key=%s, val=%v, err=%v]\n", *ns, *table, string(key), value, err)
- return err
- }
- if n != 4 {
- fmt.Printf("write val's len length error. [ns=%s, table=%s, key=%s, val=%v, len=%d]\n", *ns, *table, string(key), value, n)
- return errWriteLen
+ if *readable {
+ file.WriteString("\n")
+ } else {
+ binary.BigEndian.PutUint32(lenBuf, uint32(valLen))
+ n, err = file.Write(lenBuf)
+ if err != nil {
+ fmt.Printf("write val's len error. [ns=%s, table=%s, key=%s, val=%v, err=%v]\n", *ns, *table, string(key), value, err)
+ return err
+ }
+ if n != 4 {
+ fmt.Printf("write val's len length error. [ns=%s, table=%s, key=%s, val=%v, len=%d]\n", *ns, *table, string(key), value, n)
+ return errWriteLen
+ }
}
n, err = file.Write(value)
@@ -268,35 +289,43 @@ func lbackup(ch chan interface{}, file *os.File, client *sdk.ZanRedisClient) {
tp := []byte{2}
backupCommon(tp, ch, file, func(key []byte, item []interface{}, file *os.File) error {
lenBuf := make([]byte, 4)
- binary.BigEndian.PutUint32(lenBuf, uint32(len(item)))
- n, err := file.Write(lenBuf)
-
- if err != nil {
- fmt.Printf("write list count error.[ns=%s, table=%s, key=%s, err=%v]\n", *ns, *table, string(key), err)
- return err
- }
-
- if n != 4 {
- fmt.Printf("write list count length error. [ns=%s, table=%s, key=%s, count=%d, len=%d]\n",
- *ns, *table, string(key), len(item), n)
- return errWriteLen
- }
+ if *readable {
+ file.WriteString("\n")
+ } else {
+ binary.BigEndian.PutUint32(lenBuf, uint32(len(item)))
+ n, err := file.Write(lenBuf)
- for i := 0; i < len(item); i++ {
- value := item[i].([]byte)
- valLen := len(value)
- binary.BigEndian.PutUint32(lenBuf, uint32(valLen))
- n, err = file.Write(lenBuf)
if err != nil {
- fmt.Printf("write val's len error. [ns=%s, table=%s, key=%s, val=%v, err=%v]\n", *ns, *table, string(key), value, err)
+ fmt.Printf("write list count error.[ns=%s, table=%s, key=%s, err=%v]\n", *ns, *table, string(key), err)
return err
}
+
if n != 4 {
- fmt.Printf("write val's len length error. [ns=%s, table=%s, key=%s, val=%v, len=%d]\n", *ns, *table, string(key), value, n)
+ fmt.Printf("write list count length error. [ns=%s, table=%s, key=%s, count=%d, len=%d]\n",
+ *ns, *table, string(key), len(item), n)
return errWriteLen
}
+ }
- n, err = file.Write(value)
+ for i := 0; i < len(item); i++ {
+ value := item[i].([]byte)
+ valLen := len(value)
+ if *readable {
+ file.WriteString("\n")
+ } else {
+ binary.BigEndian.PutUint32(lenBuf, uint32(valLen))
+ n, err := file.Write(lenBuf)
+ if err != nil {
+ fmt.Printf("write val's len error. [ns=%s, table=%s, key=%s, val=%v, err=%v]\n", *ns, *table, string(key), value, err)
+ return err
+ }
+ if n != 4 {
+ fmt.Printf("write val's len length error. [ns=%s, table=%s, key=%s, val=%v, len=%d]\n", *ns, *table, string(key), value, n)
+ return errWriteLen
+ }
+ }
+
+ n, err := file.Write(value)
if err != nil {
fmt.Printf("write val error. [ns=%s, table=%s, key=%s, val=%v, err=%v]\n", *ns, *table, string(key), value, err)
return err
@@ -316,34 +345,42 @@ func sbackup(ch chan interface{}, file *os.File, client *sdk.ZanRedisClient) {
tp := []byte{3}
backupCommon(tp, ch, file, func(key []byte, item []interface{}, file *os.File) error {
lenBuf := make([]byte, 4)
- binary.BigEndian.PutUint32(lenBuf, uint32(len(item)))
- n, err := file.Write(lenBuf)
-
- if err != nil {
- fmt.Printf("write member count error.[ns=%s, table=%s, key=%s, err=%v]\n", *ns, *table, string(key), err)
- return err
- }
-
- if n != 4 {
- fmt.Printf("write member count length error. [ns=%s, table=%s, key=%s, count=%d, len=%d]\n", *ns, *table, string(key), len(item), n)
- return errWriteLen
- }
+ if *readable {
+ file.WriteString("\n")
+ } else {
+ binary.BigEndian.PutUint32(lenBuf, uint32(len(item)))
+ n, err := file.Write(lenBuf)
- for i := 0; i < len(item); i++ {
- member := item[i].([]byte)
- memberLen := len(member)
- binary.BigEndian.PutUint32(lenBuf, uint32(memberLen))
- n, err = file.Write(lenBuf)
if err != nil {
- fmt.Printf("write member's len error. [ns=%s, table=%s, key=%s, member=%v, err=%v]\n", *ns, *table, string(key), member, err)
+ fmt.Printf("write member count error.[ns=%s, table=%s, key=%s, err=%v]\n", *ns, *table, string(key), err)
return err
}
+
if n != 4 {
- fmt.Printf("write member's len length error. [ns=%s, table=%s, key=%s, member=%v, len=%d]\n", *ns, *table, string(key), member, n)
+ fmt.Printf("write member count length error. [ns=%s, table=%s, key=%s, count=%d, len=%d]\n", *ns, *table, string(key), len(item), n)
return errWriteLen
}
+ }
- n, err = file.Write(member)
+ for i := 0; i < len(item); i++ {
+ member := item[i].([]byte)
+ memberLen := len(member)
+ if *readable {
+ file.WriteString("\n")
+ } else {
+ binary.BigEndian.PutUint32(lenBuf, uint32(memberLen))
+ n, err := file.Write(lenBuf)
+ if err != nil {
+ fmt.Printf("write member's len error. [ns=%s, table=%s, key=%s, member=%v, err=%v]\n", *ns, *table, string(key), member, err)
+ return err
+ }
+ if n != 4 {
+ fmt.Printf("write member's len length error. [ns=%s, table=%s, key=%s, member=%v, len=%d]\n", *ns, *table, string(key), member, n)
+ return errWriteLen
+ }
+ }
+
+ n, err := file.Write(member)
if err != nil {
fmt.Printf("write member error. [ns=%s, table=%s, key=%s, member=%v, err=%v]\n", *ns, *table, string(key), member, err)
return err
@@ -363,36 +400,44 @@ func zbackup(ch chan interface{}, file *os.File, client *sdk.ZanRedisClient) {
tp := []byte{4}
backupCommon(tp, ch, file, func(key []byte, item []interface{}, file *os.File) error {
lenBuf := make([]byte, 4)
- binary.BigEndian.PutUint32(lenBuf, uint32(len(item)))
- n, err := file.Write(lenBuf)
-
- if err != nil {
- fmt.Printf("write member-score count error.[ns=%s, table=%s, key=%s, err=%v]\n", *ns, *table, string(key), err)
- return err
- }
-
- if n != 4 {
- fmt.Printf("write member-score count length error. [ns=%s, table=%s, key=%s, count=%d, len=%d]\n",
- *ns, *table, string(key), len(item), n)
- return errWriteLen
- }
+ if *readable {
+ file.WriteString("\n")
+ } else {
+ binary.BigEndian.PutUint32(lenBuf, uint32(len(item)))
+ n, err := file.Write(lenBuf)
- for i := 0; i < len(item); i++ {
- ms := item[i].([]interface{})
- member := ms[0].([]byte)
- memberLen := len(member)
- binary.BigEndian.PutUint32(lenBuf, uint32(memberLen))
- n, err = file.Write(lenBuf)
if err != nil {
- fmt.Printf("write member's len error. [ns=%s, table=%s, key=%s, member=%v, err=%v]\n", *ns, *table, string(key), member, err)
+ fmt.Printf("write member-score count error.[ns=%s, table=%s, key=%s, err=%v]\n", *ns, *table, string(key), err)
return err
}
+
if n != 4 {
- fmt.Printf("write member's len length error. [ns=%s, table=%s, key=%s, member=%v, len=%d]\n", *ns, *table, string(key), member, n)
+ fmt.Printf("write member-score count length error. [ns=%s, table=%s, key=%s, count=%d, len=%d]\n",
+ *ns, *table, string(key), len(item), n)
return errWriteLen
}
+ }
- n, err = file.Write(member)
+ for i := 0; i < len(item); i++ {
+ ms := item[i].([]interface{})
+ member := ms[0].([]byte)
+ memberLen := len(member)
+ if *readable {
+ file.WriteString("\n")
+ } else {
+ binary.BigEndian.PutUint32(lenBuf, uint32(memberLen))
+ n, err := file.Write(lenBuf)
+ if err != nil {
+ fmt.Printf("write member's len error. [ns=%s, table=%s, key=%s, member=%v, err=%v]\n", *ns, *table, string(key), member, err)
+ return err
+ }
+ if n != 4 {
+ fmt.Printf("write member's len length error. [ns=%s, table=%s, key=%s, member=%v, len=%d]\n", *ns, *table, string(key), member, n)
+ return errWriteLen
+ }
+ }
+
+ n, err := file.Write(member)
if err != nil {
fmt.Printf("write member error. [ns=%s, table=%s, key=%s, member=%v, err=%v]\n", *ns, *table, string(key), member, err)
return err
@@ -404,15 +449,19 @@ func zbackup(ch chan interface{}, file *os.File, client *sdk.ZanRedisClient) {
score := ms[1].([]byte)
scoreLen := len(score)
- binary.BigEndian.PutUint32(lenBuf, uint32(scoreLen))
- n, err = file.Write(lenBuf)
- if err != nil {
- fmt.Printf("write score's len error. [ns=%s, table=%s, key=%s, score=%v, err=%v]\n", *ns, *table, string(key), score, err)
- return err
- }
- if n != 4 {
- fmt.Printf("write score's len length error. [ns=%s, table=%s, key=%s, score=%v, len=%d]\n", *ns, *table, string(key), score, n)
- return errWriteLen
+ if *readable {
+ file.WriteString("\n")
+ } else {
+ binary.BigEndian.PutUint32(lenBuf, uint32(scoreLen))
+ n, err = file.Write(lenBuf)
+ if err != nil {
+ fmt.Printf("write score's len error. [ns=%s, table=%s, key=%s, score=%v, err=%v]\n", *ns, *table, string(key), score, err)
+ return err
+ }
+ if n != 4 {
+ fmt.Printf("write score's len length error. [ns=%s, table=%s, key=%s, score=%v, len=%d]\n", *ns, *table, string(key), score, n)
+ return errWriteLen
+ }
}
n, err = file.Write(score)
@@ -444,7 +493,10 @@ func backup(t string) {
Namespace: *ns,
Password: *pass,
}
- client := sdk.NewZanRedisClient(conf)
+ client, err := sdk.NewZanRedisClient(conf)
+ if err != nil {
+ panic(err)
+ }
client.Start()
defer client.Stop()
@@ -453,7 +505,7 @@ func backup(t string) {
ch := client.DoFullScanChannel(t, *table, stopCh)
path := path.Join(*dataDir, fmt.Sprintf("%s:%s:%s:%s.db", t, time.Now().Format("2006-01-02"), *ns, *table))
var file *os.File
- _, err := os.Stat(path)
+ _, err = os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
file, err = os.Create(path)
diff --git a/apps/placedriver/main.go b/apps/placedriver/main.go
index 15ee541c..8a961f86 100644
--- a/apps/placedriver/main.go
+++ b/apps/placedriver/main.go
@@ -5,15 +5,14 @@ import (
"fmt"
"log"
"os"
+ "path"
"path/filepath"
"syscall"
- "time"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/pdserver"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/pdserver"
"github.com/BurntSushi/toml"
- "github.com/absolute8511/glog"
"github.com/judwhite/go-svc/svc"
"github.com/mreiferson/go-options"
)
@@ -22,9 +21,11 @@ var (
flagSet = flag.NewFlagSet("placedriver", flag.ExitOnError)
config = flagSet.String("config", "", "path to config file")
+ logAge = flagSet.Int("logage", 0, "the max age (day) log will keep")
showVersion = flagSet.Bool("version", false, "print version string")
httpAddress = flagSet.String("http-address", "0.0.0.0:18001", ": to listen on for HTTP clients")
+ metricAddress = flagSet.String("metric-address", ":8800", ": to listen on for HTTP metric clients")
broadcastAddress = flagSet.String("broadcast-address", "", "address of this lookupd node, (default to the OS hostname)")
broadcastInterface = flagSet.String("broadcast-interface", "", "address of this lookupd node, (default to the OS hostname)")
reverseProxyPort = flagSet.String("reverse-proxy-port", "", " for reverse proxy")
@@ -34,11 +35,13 @@ var (
clusterID = flagSet.String("cluster-id", "test-cluster", "the cluster id used for separating different cluster.")
autoBalance = flagSet.Bool("auto-balance-and-migrate", false, "auto balance and migrate the data while unstable")
- logLevel = flagSet.Int("log-level", 1, "log verbose level")
- logDir = flagSet.String("log-dir", "", "directory for log file")
- dataDir = flagSet.String("data-dir", "", "directory for data")
- learnerRole = flagSet.String("learner-role", "", "learner role for pd")
- balanceInterval = common.StringArray{}
+ logLevel = flagSet.Int("log-level", 1, "log verbose level")
+ logDir = flagSet.String("log-dir", "", "directory for log file")
+ dataDir = flagSet.String("data-dir", "", "directory for data")
+ learnerRole = flagSet.String("learner-role", "", "learner role for pd")
+ filterNamespaces = flagSet.String("filter-namespaces", "", "filter namespaces while in learner role for pd")
+ balanceVer = flagSet.String("balance-ver", "", "balance strategy version")
+ balanceInterval = common.StringArray{}
)
func init() {
@@ -50,7 +53,7 @@ type program struct {
}
func main() {
- defer glog.Flush()
+ defer common.FlushZapDefault()
prg := &program{}
if err := svc.Run(prg, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGINT); err != nil {
log.Fatal(err)
@@ -66,8 +69,6 @@ func (p *program) Init(env svc.Environment) error {
}
func (p *program) Start() error {
- glog.InitWithFlag(flagSet)
-
flagSet.Parse(os.Args[1:])
fmt.Println(common.VerString("placedriver"))
if *showVersion {
@@ -84,11 +85,11 @@ func (p *program) Start() error {
opts := pdserver.NewServerConfig()
options.Resolve(opts, flagSet, cfg)
- if opts.LogDir != "" {
- glog.SetGLogDir(opts.LogDir)
+ common.SetZapRotateOptions(false, true, path.Join(opts.LogDir, "placedriver.log"), 0, 0, *logAge)
+ daemon, err := pdserver.NewServer(opts)
+ if err != nil {
+ return err
}
- glog.StartWorker(time.Second * 2)
- daemon := pdserver.NewServer(opts)
daemon.Start()
p.placedriver = daemon
diff --git a/apps/placedriver/main_test.go b/apps/placedriver/main_test.go
new file mode 100644
index 00000000..23c97442
--- /dev/null
+++ b/apps/placedriver/main_test.go
@@ -0,0 +1,46 @@
+package main
+
+import (
+ "os"
+ "path"
+ "runtime"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/BurntSushi/toml"
+ "github.com/mreiferson/go-options"
+ "github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/pdserver"
+)
+
+func TestAppConfigParse(t *testing.T) {
+ flagSet.Parse([]string{})
+
+ configFile := "../../pdserver/pdconf.example.conf"
+ var cfg map[string]interface{}
+ _, err := toml.DecodeFile(configFile, &cfg)
+ if err != nil {
+ t.Fatalf("ERROR: failed to load config file %s - %s", configFile, err.Error())
+ }
+ opts := pdserver.NewServerConfig()
+ options.Resolve(opts, flagSet, cfg)
+ opts.LogDir = path.Join(os.TempDir(), strconv.Itoa(int(time.Now().UnixNano())))
+ os.MkdirAll(opts.LogDir, 0755)
+ common.SetZapRotateOptions(false, true, path.Join(opts.LogDir, "test.log"), 0, 0, 0)
+ if runtime.GOOS == "darwin" {
+ opts.BroadcastInterface = "lo0"
+ } else {
+ opts.BroadcastInterface = "lo"
+ }
+ s, err := pdserver.NewServer(opts)
+ t.Log(err)
+ assert.Equal(t, "v2", opts.BalanceVer)
+
+ s.Start()
+ t.Log(opts.LogDir)
+
+ time.Sleep(time.Second)
+ s.Stop()
+}
diff --git a/apps/restore/main.go b/apps/restore/main.go
index dfe92de4..b929a653 100644
--- a/apps/restore/main.go
+++ b/apps/restore/main.go
@@ -11,8 +11,8 @@ import (
"strings"
"time"
- sdk "github.com/absolute8511/go-zanredisdb"
"github.com/absolute8511/redigo/redis"
+ sdk "github.com/youzan/go-zanredisdb"
)
var (
@@ -350,7 +350,10 @@ func restore() {
Namespace: oriNS,
Password: *pass,
}
- client := sdk.NewZanRedisClient(conf)
+ client, err := sdk.NewZanRedisClient(conf)
+ if err != nil {
+ panic(err)
+ }
client.Start()
defer client.Stop()
diff --git a/apps/zankv/main.go b/apps/zankv/main.go
index 6969aa4a..e242d177 100644
--- a/apps/zankv/main.go
+++ b/apps/zankv/main.go
@@ -2,6 +2,7 @@ package main
import (
"encoding/json"
+ "errors"
"flag"
"fmt"
"io/ioutil"
@@ -12,15 +13,16 @@ import (
"syscall"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/node"
- "github.com/absolute8511/ZanRedisDB/server"
"github.com/judwhite/go-svc/svc"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/node"
+ "github.com/youzan/ZanRedisDB/server"
)
var (
flagSet = flag.NewFlagSet("zanredisdb", flag.ExitOnError)
configFilePath = flagSet.String("config", "", "the config file path to read")
+ logAge = flagSet.Int("logage", 0, "the max age (day) log will keep")
showVersion = flagSet.Bool("version", false, "print version string and exit")
)
@@ -30,6 +32,7 @@ type program struct {
func main() {
defer log.Printf("main exit")
+ defer common.FlushZapDefault()
prg := &program{}
if err := svc.Run(prg, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGINT); err != nil {
log.Panic(err)
@@ -56,49 +59,53 @@ func (p *program) Start() error {
if *configFilePath != "" {
d, err := ioutil.ReadFile(*configFilePath)
if err != nil {
- panic(err)
+ return err
}
err = json.Unmarshal(d, &configFile)
if err != nil {
- panic(err)
+ return err
}
}
if configFile.ServerConf.DataDir == "" {
tmpDir, err := ioutil.TempDir("", fmt.Sprintf("rocksdb-test-%d", time.Now().UnixNano()))
if err != nil {
- panic(err)
+ return err
}
configFile.ServerConf.DataDir = tmpDir
}
serverConf := configFile.ServerConf
+ common.SetZapRotateOptions(false, true, path.Join(serverConf.LogDir, "zankv.log"), 0, 0, *logAge)
loadConf, _ := json.MarshalIndent(configFile, "", " ")
fmt.Printf("loading with conf:%v\n", string(loadConf))
bip := server.GetIPv4ForInterfaceName(serverConf.BroadcastInterface)
if bip == "" || bip == "0.0.0.0" {
- panic("broadcast ip can not be found")
+ return errors.New("broadcast ip can not be found")
} else {
serverConf.BroadcastAddr = bip
}
fmt.Printf("broadcast ip is :%v\n", bip)
- app := server.NewServer(serverConf)
+ app, err := server.NewServer(serverConf)
+ if err != nil {
+ return err
+ }
for _, nsNodeConf := range serverConf.Namespaces {
nsFile := path.Join(configDir, nsNodeConf.Name)
d, err := ioutil.ReadFile(nsFile)
if err != nil {
- panic(err)
+ return err
}
var nsConf node.NamespaceConfig
err = json.Unmarshal(d, &nsConf)
if err != nil {
- panic(err)
+ return err
}
if nsConf.Name != nsNodeConf.Name {
- panic("namespace name not match the config file")
+ return errors.New("namespace name not match the config file")
}
if nsConf.Replicator <= 0 {
- panic("namespace replicator should be set")
+ return errors.New("namespace replicator should be set")
}
id := nsNodeConf.LocalReplicaID
diff --git a/apps/zankv/main_test.go b/apps/zankv/main_test.go
new file mode 100644
index 00000000..fad119f8
--- /dev/null
+++ b/apps/zankv/main_test.go
@@ -0,0 +1,35 @@
+package main
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "os"
+ "path"
+ "runtime"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/server"
+)
+
+func TestAppConfigParse(t *testing.T) {
+ flagSet.Parse([]string{})
+
+ var configFile server.ConfigFile
+ d, err := ioutil.ReadFile("../../default.conf")
+ assert.Nil(t, err)
+ err = json.Unmarshal(d, &configFile)
+ assert.Nil(t, err)
+
+ serverConf := configFile.ServerConf
+ serverConf.LogDir = path.Join(os.TempDir(), strconv.Itoa(int(time.Now().UnixNano())))
+ if runtime.GOOS == "darwin" {
+ serverConf.BroadcastInterface = "lo0"
+ } else {
+ serverConf.BroadcastInterface = "lo"
+ }
+ _, err = server.NewServer(serverConf)
+ assert.Nil(t, err)
+}
diff --git a/build-pb.sh b/build-pb.sh
index 06ae8c14..f7b58631 100755
--- a/build-pb.sh
+++ b/build-pb.sh
@@ -7,6 +7,6 @@ echo $GOROOT
echo $GOPATH
for dir in ${DIRS}; do
pushd ${dir}
- protoc --proto_path=$GOPATH:$GOGOPATH:./ --gofast_out=plugins=grpc:. *.proto
+ protoc --proto_path=$GOPATH:$GOGOPATH:./ --gogofaster_out=plugins=grpc:. *.proto
popd
done
diff --git a/cluster/common.go b/cluster/common.go
index c4beef17..cf7cfb53 100644
--- a/cluster/common.go
+++ b/cluster/common.go
@@ -4,10 +4,13 @@ import (
"bytes"
"strconv"
"strings"
+ "time"
)
const (
ErrFailedOnNotLeader = "E_FAILED_ON_NOT_LEADER"
+ APIShortTo = time.Second * 3
+ APILongTo = time.Second * 10
)
const (
@@ -165,6 +168,7 @@ var (
ErrNamespaceConfInvalid = NewCoordErrWithCode("namespace config is invalid", CoordClusterErr, RpcNoErr)
ErrNamespaceWaitingSync = NewCoordErrWithCode("namespace is still waiting sync", CoordTmpErr, RpcNoErr)
ErrRegisterServiceUnstable = NewCoordErr("the register service is unstable", CoordTmpErr)
+ ErrNoCoordRegister = NewCoordErr("pd coordinator register is not set", CoordLocalErr)
)
func GenNodeID(n *NodeInfo, extra string) string {
@@ -251,5 +255,7 @@ type Options struct {
AutoBalanceAndMigrate bool
BalanceStart int
BalanceEnd int
+ BalanceVer string
DataDir string
+ FilterNamespaces string
}
diff --git a/cluster/datanode_coord/data_learner_coord.go b/cluster/datanode_coord/data_learner_coord.go
index 3534f6af..f7d5b2ac 100644
--- a/cluster/datanode_coord/data_learner_coord.go
+++ b/cluster/datanode_coord/data_learner_coord.go
@@ -5,11 +5,51 @@ import (
"sync/atomic"
"time"
- "github.com/absolute8511/ZanRedisDB/cluster"
- "github.com/absolute8511/ZanRedisDB/common"
- node "github.com/absolute8511/ZanRedisDB/node"
+ "github.com/youzan/ZanRedisDB/cluster"
+ "github.com/youzan/ZanRedisDB/common"
+ node "github.com/youzan/ZanRedisDB/node"
)
+// GetCurrentNsWithLearners will return all currently namespaces which have running learners,
+// we can use this to check if any remaining learners still running in the namespace (even the register is removed).
+func (dc *DataCoordinator) GetCurrentNsWithLearners() ([]string, error) {
+ nsList := make([]string, 0)
+ tmpChecks := dc.localNSMgr.GetNamespaces()
+ for name, ns := range tmpChecks {
+ if ns.Node == nil {
+ continue
+ }
+ if ns.Node.GetLearnerRole() != "" {
+ nsList = append(nsList, name)
+ }
+ }
+ if dc.register == nil {
+ return nsList, nil
+ }
+ // check both local running and the meta for sure all learners will be included
+ namespaceMap, _, err := dc.register.GetAllNamespaces()
+ if err != nil {
+ if err == cluster.ErrKeyNotFound {
+ return nsList, nil
+ }
+ return nil, err
+ }
+ for ns, parts := range namespaceMap {
+ for pid, pinfo := range parts {
+ lrns := pinfo.LearnerNodes
+ if lrns == nil {
+ continue
+ }
+ nodes, ok := lrns[dc.learnerRole]
+ if !ok || len(nodes) == 0 {
+ continue
+ }
+ nsList = append(nsList, common.GetNsDesp(ns, pid))
+ }
+ }
+ return nsList, nil
+}
+
func (dc *DataCoordinator) loadLocalNamespaceForLearners() error {
if dc.localNSMgr == nil {
cluster.CoordLog().Infof("no namespace manager")
@@ -23,6 +63,7 @@ func (dc *DataCoordinator) loadLocalNamespaceForLearners() error {
return err
}
sortedParts := make(PartitionList, 0)
+ // TODO: allow loading learner concurrent for different namespace and partitions
for namespaceName, namespaceParts := range namespaceMap {
sortedParts = sortedParts[:0]
for _, part := range namespaceParts {
@@ -50,11 +91,6 @@ func (dc *DataCoordinator) loadLocalNamespaceForLearners() error {
if namespaceName == "" {
continue
}
- checkErr := dc.checkLocalNamespaceMagicCode(&nsInfo, true)
- if checkErr != nil {
- cluster.CoordLog().Errorf("failed to check namespace :%v, err:%v", nsInfo.GetDesp(), checkErr)
- continue
- }
localNamespace, coordErr := dc.updateLocalNamespace(&nsInfo, false)
if coordErr != nil {
@@ -62,8 +98,6 @@ func (dc *DataCoordinator) loadLocalNamespaceForLearners() error {
continue
}
- dyConf := &node.NamespaceDynamicConf{}
- localNamespace.SetDynamicInfo(*dyConf)
localErr := dc.checkAndFixLocalNamespaceData(&nsInfo, localNamespace)
if localErr != nil {
cluster.CoordLog().Errorf("check local namespace %v data need to be fixed:%v", nsInfo.GetDesp(), localErr)
@@ -89,7 +123,9 @@ func (dc *DataCoordinator) ensureJoinNamespaceGroupForLearner(nsInfo cluster.Par
localNamespace.SwitchForLearnerLeader(false)
}
- dyConf := &node.NamespaceDynamicConf{}
+ dyConf := &node.NamespaceDynamicConf{
+ nsInfo.Replica,
+ }
localNamespace.SetDynamicInfo(*dyConf)
if localNamespace.IsDataNeedFix() {
// clean local data
@@ -219,7 +255,7 @@ func (dc *DataCoordinator) checkForUnsyncedLogSyncers() {
}
if !dc.isInLearnerGroup(*nsInfo, localNamespace) {
- cluster.CoordLog().Infof("namespace %v removed since not in learner group", name, nsInfo.LearnerNodes)
+ cluster.CoordLog().Infof("namespace %v removed since not in learner group: %v", name, nsInfo.LearnerNodes)
dc.forceRemoveLocalNamespace(localNamespace)
continue
}
diff --git a/cluster/datanode_coord/data_node_coordinator.go b/cluster/datanode_coord/data_node_coordinator.go
index d6044473..1c71bb76 100644
--- a/cluster/datanode_coord/data_node_coordinator.go
+++ b/cluster/datanode_coord/data_node_coordinator.go
@@ -4,6 +4,7 @@ import (
"bytes"
"encoding/json"
"errors"
+ "fmt"
"net"
"net/http"
"path/filepath"
@@ -13,9 +14,9 @@ import (
"sync/atomic"
"time"
- "github.com/absolute8511/ZanRedisDB/cluster"
- "github.com/absolute8511/ZanRedisDB/common"
- node "github.com/absolute8511/ZanRedisDB/node"
+ "github.com/youzan/ZanRedisDB/cluster"
+ "github.com/youzan/ZanRedisDB/common"
+ node "github.com/youzan/ZanRedisDB/node"
)
var (
@@ -23,11 +24,11 @@ var (
ErrNamespaceNotReady = cluster.NewCoordErr("namespace node is not ready", cluster.CoordLocalErr)
ErrNamespaceInvalid = errors.New("namespace name is invalid")
ErrNamespaceNotFound = errors.New("namespace is not found")
- // the wait interval while check transfering leader between different partitions
+ // the wait interval while check transferring leader between different partitions
// to avoid too much partitions do the leader transfer in short time.
TransferLeaderWait = time.Second * 20
CheckUnsyncedInterval = time.Minute * 5
- EnsureJoinCheckWait = time.Second * 30
+ EnsureJoinCheckWait = time.Second * 20
// the wait interval allowed while changing leader in the same raft group
// to avoid change the leader in the same raft too much
ChangeLeaderInRaftWait = time.Minute
@@ -44,7 +45,8 @@ func ChangeIntervalForTest() {
}
const (
- MaxRaftJoinRunning = 2
+ // allow more running since the rsync has a limit on the network traffic
+ MaxRaftJoinRunning = 20
)
func GetNamespacePartitionFileName(namespace string, partition int, suffix string) string {
@@ -121,10 +123,12 @@ func (dc *DataCoordinator) SetRegister(l cluster.DataNodeRegister) error {
cluster.CoordLog().Errorf("failed to init node register id: %v", err)
return err
}
- err = dc.localNSMgr.SaveMachineRegID(dc.myNode.RegID)
- if err != nil {
- cluster.CoordLog().Errorf("failed to save register id: %v", err)
- return err
+ if dc.localNSMgr != nil {
+ err = dc.localNSMgr.SaveMachineRegID(dc.myNode.RegID)
+ if err != nil {
+ cluster.CoordLog().Errorf("failed to save register id: %v", err)
+ return err
+ }
}
}
if dc.learnerRole == "" {
@@ -137,6 +141,53 @@ func (dc *DataCoordinator) SetRegister(l cluster.DataNodeRegister) error {
return nil
}
+func (dc *DataCoordinator) UpdateSyncerWriteOnly(v bool) error {
+ return dc.updateRegisterKV("syncer_write_only", v)
+}
+
+func (dc *DataCoordinator) GetSyncerWriteOnly() (bool, error) {
+ return dc.getRegisterKV("syncer_write_only")
+}
+
+func (dc *DataCoordinator) UpdateSyncerNormalInit(v bool) error {
+ return dc.updateRegisterKV("syncer_normal_init", v)
+}
+
+func (dc *DataCoordinator) GetSyncerNormalInit() (bool, error) {
+ return dc.getRegisterKV("syncer_normal_init")
+}
+
+func (dc *DataCoordinator) updateRegisterKV(k string, v bool) error {
+ if dc.register == nil {
+ return errors.New("missing register")
+ }
+ key := dc.myNode.GetID() + "_kv:" + k
+ vstr := "false"
+ if v {
+ vstr = "true"
+ }
+ return dc.register.SaveKV(key, vstr)
+}
+
+func (dc *DataCoordinator) getRegisterKV(k string) (bool, error) {
+ if dc.register == nil {
+ return false, errors.New("missing register")
+ }
+ key := dc.myNode.GetID() + "_kv:" + k
+ v, err := dc.register.GetKV(key)
+ if err != nil {
+ return false, err
+ }
+
+ if v == "true" {
+ return true, nil
+ }
+ if v == "false" {
+ return false, nil
+ }
+ return false, fmt.Errorf("invalid value : " + v)
+}
+
func (dc *DataCoordinator) Start() error {
atomic.StoreInt32(&dc.stopping, 0)
dc.stopChan = make(chan struct{})
@@ -154,6 +205,30 @@ func (dc *DataCoordinator) Start() error {
}
if dc.localNSMgr != nil {
dc.localNSMgr.Start()
+ localNsMagics := dc.localNSMgr.CheckLocalNamespaces()
+ checkFailed := false
+ for ns, localMagic := range localNsMagics {
+ namespace, _ := common.GetNamespaceAndPartition(ns)
+ if namespace == "" {
+ continue
+ }
+ // check if the magic code mismatch or if already removed by cluster
+ nsMeta, err := dc.register.GetNamespaceMetaInfo(namespace)
+ if err != nil && err != cluster.ErrKeyNotFound {
+ cluster.CoordLog().Warningf("failed to get namespace meta %s from register : %v", ns, err.Error())
+ return err
+ }
+ if err == cluster.ErrKeyNotFound {
+ dc.localNSMgr.CleanSharedNsFiles(namespace)
+ } else if nsMeta.MagicCode > 0 && localMagic > 0 && localMagic != nsMeta.MagicCode {
+ cluster.CoordLog().Errorf("clean left namespace %v data, since magic code not match : %v, %v", ns, localMagic, nsMeta.MagicCode)
+ // we can not clean shared namespace data here, since it may only parts of namespace mismatched
+ checkFailed = true
+ }
+ }
+ if checkFailed {
+ return errors.New("start failed since local data check failed")
+ }
}
dc.wg.Add(1)
go dc.watchPD()
@@ -168,7 +243,7 @@ func (dc *DataCoordinator) Start() error {
dc.wg.Add(1)
go dc.checkForUnsyncedNamespaces()
- } else if dc.learnerRole == common.LearnerRoleLogSyncer {
+ } else if common.IsRoleLogSyncer(dc.learnerRole) {
dc.loadLocalNamespaceForLearners()
dc.wg.Add(1)
go dc.checkForUnsyncedLogSyncers()
@@ -215,18 +290,6 @@ func (dc *DataCoordinator) watchPD() {
}
}
-func (dc *DataCoordinator) checkLocalNamespaceMagicCode(nsInfo *cluster.PartitionMetaInfo, tryFix bool) error {
- if nsInfo.MagicCode <= 0 {
- return nil
- }
- err := dc.localNSMgr.CheckMagicCode(nsInfo.GetDesp(), nsInfo.MagicCode, tryFix)
- if err != nil {
- cluster.CoordLog().Infof("namespace %v check magic code error: %v", nsInfo.GetDesp(), err)
- return err
- }
- return nil
-}
-
func (dc *DataCoordinator) loadLocalNamespaceData() error {
if dc.localNSMgr == nil {
cluster.CoordLog().Infof("no namespace manager")
@@ -253,7 +316,7 @@ func (dc *DataCoordinator) loadLocalNamespaceData() error {
shouldLoad := dc.isNamespaceShouldStart(nsInfo, localNamespace)
if !shouldLoad {
if len(nsInfo.GetISR()) >= nsInfo.Replica && localNamespace != nil {
- dc.removeLocalNamespaceFromRaft(localNamespace, false)
+ dc.forceRemoveLocalNamespace(localNamespace)
}
if localNamespace != nil {
cluster.CoordLog().Infof("%v namespace %v ignore to load ", dc.GetMyID(), nsInfo.GetDesp())
@@ -270,15 +333,10 @@ func (dc *DataCoordinator) loadLocalNamespaceData() error {
cluster.CoordLog().Debugf("%v namespace %v already loaded", dc.GetMyID(), nsInfo.GetDesp())
continue
}
- cluster.CoordLog().Infof("loading namespace: %v", nsInfo.GetDesp())
+ cluster.CoordLog().Infof("mynode %v loading namespace: %v, %v", dc.GetMyID(), nsInfo.GetDesp(), nsInfo)
if namespaceName == "" {
continue
}
- checkErr := dc.checkLocalNamespaceMagicCode(&nsInfo, true)
- if checkErr != nil {
- cluster.CoordLog().Errorf("failed to check namespace :%v, err:%v", nsInfo.GetDesp(), checkErr)
- continue
- }
localNamespace, coordErr := dc.updateLocalNamespace(&nsInfo, false)
if coordErr != nil {
@@ -286,8 +344,6 @@ func (dc *DataCoordinator) loadLocalNamespaceData() error {
continue
}
- dyConf := &node.NamespaceDynamicConf{}
- localNamespace.SetDynamicInfo(*dyConf)
localErr := dc.checkAndFixLocalNamespaceData(&nsInfo, localNamespace)
if localErr != nil {
cluster.CoordLog().Errorf("check local namespace %v data need to be fixed:%v", nsInfo.GetDesp(), localErr)
@@ -315,7 +371,7 @@ func (dc *DataCoordinator) isLocalRaftInRaftGroup(nsInfo *cluster.PartitionMetaI
var rsp []*common.MemberInfo
code, err := common.APIRequest("GET",
"http://"+destAddress+common.APIGetMembers+"/"+nsInfo.GetDesp(),
- nil, time.Second*3, &rsp)
+ nil, cluster.APIShortTo, &rsp)
if err != nil {
cluster.CoordLog().Infof("failed to get members from %v for namespace: %v, %v", destAddress, nsInfo.GetDesp(), err)
if code == http.StatusNotFound {
@@ -486,7 +542,7 @@ func (dc *DataCoordinator) getNamespaceRaftLeader(nsInfo *cluster.PartitionMetaI
return m.NodeID
}
-func (dc *DataCoordinator) transferMyNamespaceLeader(nsInfo *cluster.PartitionMetaInfo, nid string, force bool, checkAll bool) bool {
+func (dc *DataCoordinator) TransferMyNamespaceLeader(nsInfo *cluster.PartitionMetaInfo, nid string, force bool, checkAll bool) bool {
nsNode := dc.localNSMgr.GetNamespaceNode(nsInfo.GetDesp())
if nsNode == nil {
return false
@@ -523,7 +579,7 @@ func (dc *DataCoordinator) isReplicaReadyForRaft(nsNode *node.NamespaceNode, toR
nip, _, _, httpPort := cluster.ExtractNodeInfoFromID(nodeID)
code, err := common.APIRequest("GET",
"http://"+net.JoinHostPort(nip, httpPort)+common.APINodeAllReady,
- nil, time.Second, nil)
+ nil, cluster.APILongTo, nil)
if err != nil {
cluster.CoordLog().Infof("not ready from %v for transfer leader: %v, %v", nip, code, err.Error())
return false
@@ -618,6 +674,9 @@ func (dc *DataCoordinator) checkForUnsyncedNamespaces() {
}
// check local namespaces with cluster to remove the unneed data
+ if dc.localNSMgr == nil {
+ return
+ }
tmpChecks := dc.localNSMgr.GetNamespaces()
allIndexSchemas := make(map[string]map[string]*common.IndexSchema)
allNamespaces, _, err := dc.register.GetAllNamespaces()
@@ -662,6 +721,7 @@ func (dc *DataCoordinator) checkForUnsyncedNamespaces() {
_, err = dc.register.GetNamespaceMetaInfo(namespace)
if err == cluster.ErrKeyNotFound {
dc.forceRemoveLocalNamespace(localNamespace)
+ dc.localNSMgr.CleanSharedNsFiles(namespace)
}
} else {
dc.tryCheckNamespacesIn(time.Second * 5)
@@ -688,7 +748,14 @@ func (dc *DataCoordinator) checkForUnsyncedNamespaces() {
dc.GetMyID(), localRID, namespaceMeta.GetDesp())
inRaft = true
}
- dc.removeLocalNamespaceFromRaft(localNamespace, inRaft)
+ if inRaft {
+ dc.removeLocalNamespaceFromRaft(localNamespace)
+ } else {
+ // since this node will be joined in other raft id, maybe we can just stop without clean old data
+ if localNamespace != nil {
+ localNamespace.Close()
+ }
+ }
}
continue
}
@@ -713,7 +780,7 @@ func (dc *DataCoordinator) checkForUnsyncedNamespaces() {
// removing node should transfer leader immediately since others partitions may wait for ready ,
// so it may never all ready for transfer. we transfer only check local partition.
_, removed := namespaceMeta.Removings[dc.GetMyID()]
- done = dc.transferMyNamespaceLeader(namespaceMeta, isrList[0], false, !removed)
+ done = dc.TransferMyNamespaceLeader(namespaceMeta, isrList[0], false, !removed)
lastTransferCheckedTime = time.Now()
}
if !done {
@@ -731,7 +798,7 @@ func (dc *DataCoordinator) checkForUnsyncedNamespaces() {
// also we should avoid transfer leader while some node is catchuping while recover from restart
done := false
if time.Since(lastTransferCheckedTime) >= TransferLeaderWait {
- done = dc.transferMyNamespaceLeader(namespaceMeta, isrList[0], false, true)
+ done = dc.TransferMyNamespaceLeader(namespaceMeta, isrList[0], false, true)
lastTransferCheckedTime = time.Now()
}
if !done {
@@ -849,31 +916,29 @@ func (dc *DataCoordinator) checkForUnsyncedNamespaces() {
}
func (dc *DataCoordinator) forceRemoveLocalNamespace(localNamespace *node.NamespaceNode) {
+ if localNamespace == nil {
+ return
+ }
+ cluster.CoordLog().Infof("force remove local data: %v", localNamespace.FullName())
err := localNamespace.Destroy()
if err != nil {
cluster.CoordLog().Infof("failed to force remove local data: %v", err)
}
}
-func (dc *DataCoordinator) removeLocalNamespaceFromRaft(localNamespace *node.NamespaceNode, removeFromRaft bool) *cluster.CoordErr {
- if removeFromRaft {
- if !localNamespace.IsReady() {
- return ErrNamespaceNotReady
- }
- m := localNamespace.Node.GetLocalMemberInfo()
- cluster.CoordLog().Infof("propose remove %v from namespace : %v", m.ID, m.GroupName)
+func (dc *DataCoordinator) removeLocalNamespaceFromRaft(localNamespace *node.NamespaceNode) *cluster.CoordErr {
+ if !localNamespace.IsReady() {
+ return ErrNamespaceNotReady
+ }
+ m := localNamespace.Node.GetLocalMemberInfo()
+ cluster.CoordLog().Infof("propose remove %v from namespace : %v", m.ID, m.GroupName)
- localErr := localNamespace.Node.ProposeRemoveMember(*m)
- if localErr != nil {
- cluster.CoordLog().Infof("propose remove dc %v failed : %v", m, localErr)
- return &cluster.CoordErr{ErrMsg: localErr.Error(), ErrCode: cluster.RpcCommonErr, ErrType: cluster.CoordLocalErr}
- }
- } else {
- if localNamespace == nil {
- return cluster.ErrNamespaceNotCreated
- }
- localNamespace.Close()
+ localErr := localNamespace.Node.ProposeRemoveMember(*m)
+ if localErr != nil {
+ cluster.CoordLog().Infof("propose remove dc %v failed : %v", m, localErr)
+ return &cluster.CoordErr{ErrMsg: localErr.Error(), ErrCode: cluster.RpcCommonErr, ErrType: cluster.CoordLocalErr}
}
+
return nil
}
@@ -885,7 +950,8 @@ func (dc *DataCoordinator) getRaftAddrForNode(nid string) (string, *cluster.Coor
return node.RaftTransportAddr, nil
}
-func (dc *DataCoordinator) prepareNamespaceConf(nsInfo *cluster.PartitionMetaInfo, raftID uint64, join bool) (*node.NamespaceConfig, *cluster.CoordErr) {
+func (dc *DataCoordinator) prepareNamespaceConf(nsInfo *cluster.PartitionMetaInfo, raftID uint64,
+ join bool, forceStandaloneCluster bool) (*node.NamespaceConfig, *cluster.CoordErr) {
var err *cluster.CoordErr
nsConf := node.NewNSConfig()
nsConf.BaseName = nsInfo.Name
@@ -897,6 +963,9 @@ func (dc *DataCoordinator) prepareNamespaceConf(nsInfo *cluster.PartitionMetaInf
if nsInfo.ExpirationPolicy != "" {
nsConf.ExpirationPolicy = nsInfo.ExpirationPolicy
}
+ if nsInfo.DataVersion != "" {
+ nsConf.DataVersion = nsInfo.DataVersion
+ }
if nsInfo.SnapCount > 100 {
nsConf.SnapCount = nsInfo.SnapCount
nsConf.SnapCatchup = nsInfo.SnapCount / 4
@@ -931,7 +1000,12 @@ func (dc *DataCoordinator) prepareNamespaceConf(nsInfo *cluster.PartitionMetaInf
}
if !join && len(nsConf.RaftGroupConf.SeedNodes) <= nsInfo.Replica/2 {
cluster.CoordLog().Warningf("seed nodes for namespace %v not enough: %v", nsInfo.GetDesp(), nsConf.RaftGroupConf)
- return nil, cluster.ErrNamespaceConfInvalid
+ // we should allow single node as new raft cluster while in disaster re-init mode.
+ // In this case, we only have one seed node while init. (Notice, it may not allow write while init
+ // since the raft node is small than half of the replicator )
+ if !forceStandaloneCluster {
+ return nil, cluster.ErrNamespaceConfInvalid
+ }
}
return nsConf, nil
}
@@ -956,7 +1030,7 @@ func (dc *DataCoordinator) requestJoinNamespaceGroup(raftID uint64, nsInfo *clus
}
_, err := common.APIRequest("POST",
uri,
- bytes.NewReader(d), time.Second*3, nil)
+ bytes.NewReader(d), cluster.APIShortTo, nil)
if err != nil {
cluster.CoordLog().Infof("failed to request join namespace: %v", err)
return err
@@ -1006,7 +1080,9 @@ func (dc *DataCoordinator) ensureJoinNamespaceGroup(nsInfo cluster.PartitionMeta
return cluster.ErrCatchupRunningBusy
}
- dyConf := &node.NamespaceDynamicConf{}
+ dyConf := &node.NamespaceDynamicConf{
+ nsInfo.Replica,
+ }
localNamespace.SetDynamicInfo(*dyConf)
if localNamespace.IsDataNeedFix() {
// clean local data
@@ -1102,7 +1178,7 @@ func (dc *DataCoordinator) updateLocalNamespace(nsInfo *cluster.PartitionMetaInf
// check namespace exist and prepare on local.
raftID, ok := nsInfo.RaftIDs[dc.GetMyID()]
if !ok {
- cluster.CoordLog().Warningf("namespace %v has no raft id for local", nsInfo.GetDesp(), nsInfo.RaftIDs)
+ cluster.CoordLog().Warningf("namespace %v has no raft id for local: %v", nsInfo.GetDesp(), nsInfo.RaftIDs)
return nil, cluster.ErrNamespaceConfInvalid
}
@@ -1128,7 +1204,7 @@ func (dc *DataCoordinator) updateLocalNamespace(nsInfo *cluster.PartitionMetaInf
if forceStandaloneCluster {
join = false
}
- nsConf, err := dc.prepareNamespaceConf(nsInfo, raftID, join)
+ nsConf, err := dc.prepareNamespaceConf(nsInfo, raftID, join, forceStandaloneCluster)
if err != nil {
go dc.tryCheckNamespaces()
cluster.CoordLog().Warningf("prepare join namespace %v failed: %v", nsInfo.GetDesp(), err)
@@ -1144,15 +1220,25 @@ func (dc *DataCoordinator) updateLocalNamespace(nsInfo *cluster.PartitionMetaInf
}
if localNode == nil || localErr != nil {
cluster.CoordLog().Warningf("local namespace %v init failed: %v", nsInfo.GetDesp(), localErr)
- return nil, cluster.ErrLocalInitNamespaceFailed
+ if localNode == nil {
+ return nil, cluster.ErrLocalInitNamespaceFailed
+ }
+ if localErr != node.ErrNamespaceAlreadyExist {
+ return nil, cluster.ErrLocalInitNamespaceFailed
+ }
}
localErr = localNode.SetMagicCode(nsInfo.MagicCode)
if localErr != nil {
cluster.CoordLog().Warningf("local namespace %v init magic code failed: %v", nsInfo.GetDesp(), localErr)
+ if localErr == node.ErrLocalMagicCodeConflict {
+ dc.forceRemoveLocalNamespace(localNode)
+ }
return localNode, cluster.ErrLocalInitNamespaceFailed
}
- dyConf := &node.NamespaceDynamicConf{}
+ dyConf := &node.NamespaceDynamicConf{
+ nsConf.Replicator,
+ }
localNode.SetDynamicInfo(*dyConf)
if err := localNode.Start(forceStandaloneCluster); err != nil {
return nil, cluster.ErrLocalInitNamespaceFailed
@@ -1284,15 +1370,29 @@ func (dc *DataCoordinator) prepareLeavingCluster() {
if leader != dc.GetMyRegID() {
continue
}
+ // try not force for the first time, if failed try force again
+ transferSuccess := false
for _, newLeader := range nsInfo.GetISR() {
if newLeader == dc.GetMyID() {
continue
}
- done := dc.transferMyNamespaceLeader(nsInfo.GetCopy(), newLeader, true, false)
+ done := dc.TransferMyNamespaceLeader(nsInfo.GetCopy(), newLeader, false, true)
if done {
+ transferSuccess = true
break
}
}
+ if !transferSuccess {
+ for _, newLeader := range nsInfo.GetISR() {
+ if newLeader == dc.GetMyID() {
+ continue
+ }
+ done := dc.TransferMyNamespaceLeader(nsInfo.GetCopy(), newLeader, true, false)
+ if done {
+ break
+ }
+ }
+ }
}
}
if dc.register != nil {
@@ -1301,7 +1401,9 @@ func (dc *DataCoordinator) prepareLeavingCluster() {
}
cluster.CoordLog().Infof("prepare leaving finished.")
- dc.localNSMgr.Stop()
+ if dc.localNSMgr != nil {
+ dc.localNSMgr.Stop()
+ }
}
func (dc *DataCoordinator) Stats(namespace string, part int) *cluster.CoordStats {
diff --git a/cluster/datanode_coord/data_node_coordinator_test.go b/cluster/datanode_coord/data_node_coordinator_test.go
new file mode 100644
index 00000000..4f5ac6f1
--- /dev/null
+++ b/cluster/datanode_coord/data_node_coordinator_test.go
@@ -0,0 +1,70 @@
+package datanode_coord
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/cluster"
+)
+
+var testEtcdServers = "http://127.0.0.1:2379"
+
+func TestDataCoordKV(t *testing.T) {
+ ChangeIntervalForTest()
+ cluster.SetLogger(0, nil)
+ ninfo := &cluster.NodeInfo{
+ NodeIP: "127.0.0.1",
+ Hostname: "localhost",
+ RedisPort: "1234",
+ HttpPort: "1235",
+ RpcPort: "1236",
+ RaftTransportAddr: "127.0.0.1:2379",
+ }
+ r, err := cluster.NewDNEtcdRegister(testEtcdServers)
+ assert.Nil(t, err)
+
+ dc := NewDataCoordinator("unit-test-cluster", ninfo, nil)
+
+ err = dc.SetRegister(r)
+ assert.Nil(t, err)
+ dc.Start()
+ defer dc.Stop()
+
+ v, err := dc.GetSyncerWriteOnly()
+ assert.Equal(t, cluster.ErrKeyNotFound, err)
+ err = dc.UpdateSyncerWriteOnly(true)
+ assert.Nil(t, err)
+ v, err = dc.GetSyncerWriteOnly()
+ assert.Nil(t, err)
+ assert.Equal(t, true, v)
+
+ ninfo2 := &cluster.NodeInfo{
+ NodeIP: "127.0.0.1",
+ Hostname: "localhost",
+ RedisPort: "2234",
+ HttpPort: "2235",
+ RpcPort: "2236",
+ RaftTransportAddr: "127.0.0.1:2379",
+ }
+ dc2 := NewDataCoordinator("unit-test-cluster", ninfo2, nil)
+
+ r2, err := cluster.NewDNEtcdRegister(testEtcdServers)
+ assert.Nil(t, err)
+ err = dc2.SetRegister(r2)
+ assert.Nil(t, err)
+ dc2.Start()
+ defer dc2.Stop()
+ _, err = dc2.GetSyncerWriteOnly()
+ assert.Equal(t, cluster.ErrKeyNotFound, err)
+
+ err = dc2.UpdateSyncerWriteOnly(false)
+ assert.Nil(t, err)
+ v2, err := dc2.GetSyncerWriteOnly()
+ assert.Nil(t, err)
+ assert.Equal(t, false, v2)
+
+ // should not affect the node1
+ v, err = dc.GetSyncerWriteOnly()
+ assert.Nil(t, err)
+ assert.Equal(t, true, v)
+}
diff --git a/cluster/datanode_coord/dn_schema_coord.go b/cluster/datanode_coord/dn_schema_coord.go
index 3b9cc9dc..6459a1e8 100644
--- a/cluster/datanode_coord/dn_schema_coord.go
+++ b/cluster/datanode_coord/dn_schema_coord.go
@@ -3,9 +3,9 @@ package datanode_coord
import (
"encoding/json"
- "github.com/absolute8511/ZanRedisDB/cluster"
- "github.com/absolute8511/ZanRedisDB/common"
- node "github.com/absolute8511/ZanRedisDB/node"
+ "github.com/youzan/ZanRedisDB/cluster"
+ "github.com/youzan/ZanRedisDB/common"
+ node "github.com/youzan/ZanRedisDB/node"
)
// this will only be handled on leader of raft group
diff --git a/cluster/etcd_client.go b/cluster/etcd_client.go
new file mode 100644
index 00000000..3d789732
--- /dev/null
+++ b/cluster/etcd_client.go
@@ -0,0 +1,168 @@
+package cluster
+
+import (
+ "net"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/coreos/etcd/client"
+ "golang.org/x/net/context"
+)
+
+var etcdTransport client.CancelableTransport = &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: (&net.Dialer{
+ Timeout: 10 * time.Second,
+ KeepAlive: 10 * time.Second,
+ }).Dial,
+ TLSHandshakeTimeout: 10 * time.Second,
+ WriteBufferSize: 1024,
+ ReadBufferSize: 1024,
+}
+
+type EtcdClient struct {
+ client client.Client
+ kapi client.KeysAPI
+ timeout time.Duration
+}
+
+func NewEClient(host string) (*EtcdClient, error) {
+ machines := strings.Split(host, ",")
+ initEtcdPeers(machines)
+ cfg := client.Config{
+ Endpoints: machines,
+ Transport: etcdTransport,
+ HeaderTimeoutPerRequest: time.Second * 5,
+ }
+
+ c, err := client.New(cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ return &EtcdClient{
+ client: c,
+ kapi: client.NewKeysAPI(c),
+ timeout: time.Second * 10,
+ }, nil
+}
+
+func (self *EtcdClient) GetNewest(key string, sort, recursive bool) (*client.Response, error) {
+ getOptions := &client.GetOptions{
+ Recursive: recursive,
+ Sort: sort,
+ Quorum: true,
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), self.timeout)
+ defer cancel()
+ return self.kapi.Get(ctx, key, getOptions)
+}
+
+func (self *EtcdClient) Get(key string, sort, recursive bool) (*client.Response, error) {
+ getOptions := &client.GetOptions{
+ Recursive: recursive,
+ Sort: sort,
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), self.timeout)
+ defer cancel()
+ return self.kapi.Get(ctx, key, getOptions)
+}
+
+func (self *EtcdClient) Create(key string, value string, ttl uint64) (*client.Response, error) {
+ setOptions := &client.SetOptions{
+ TTL: time.Duration(ttl) * time.Second,
+ PrevExist: client.PrevNoExist,
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), self.timeout)
+ defer cancel()
+ return self.kapi.Set(ctx, key, value, setOptions)
+}
+
+func (self *EtcdClient) Delete(key string, recursive bool) (*client.Response, error) {
+ delOptions := &client.DeleteOptions{
+ Recursive: recursive,
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), self.timeout)
+ defer cancel()
+ return self.kapi.Delete(ctx, key, delOptions)
+}
+
+func (self *EtcdClient) CreateDir(key string, ttl uint64) (*client.Response, error) {
+ setOptions := &client.SetOptions{
+ TTL: time.Duration(ttl) * time.Second,
+ Dir: true,
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), self.timeout)
+ defer cancel()
+ return self.kapi.Set(ctx, key, "", setOptions)
+}
+
+func (self *EtcdClient) CreateInOrder(dir string, value string, ttl uint64) (*client.Response, error) {
+ cirOptions := &client.CreateInOrderOptions{
+ TTL: time.Duration(ttl) * time.Second,
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), self.timeout)
+ defer cancel()
+ return self.kapi.CreateInOrder(ctx, dir, value, cirOptions)
+}
+
+func (self *EtcdClient) Set(key string, value string, ttl uint64) (*client.Response, error) {
+ setOptions := &client.SetOptions{
+ TTL: time.Duration(ttl) * time.Second,
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), self.timeout)
+ defer cancel()
+ return self.kapi.Set(ctx, key, value, setOptions)
+}
+
+func (self *EtcdClient) SetWithTTL(key string, ttl uint64) (*client.Response, error) {
+ setOptions := &client.SetOptions{
+ TTL: time.Duration(ttl) * time.Second,
+ Refresh: true,
+ PrevExist: client.PrevExist,
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), self.timeout)
+ defer cancel()
+ return self.kapi.Set(ctx, key, "", setOptions)
+}
+
+func (self *EtcdClient) Update(key string, value string, ttl uint64) (*client.Response, error) {
+ setOptions := &client.SetOptions{
+ TTL: time.Duration(ttl) * time.Second,
+ Refresh: true,
+ PrevExist: client.PrevExist,
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), self.timeout)
+ defer cancel()
+ return self.kapi.Set(ctx, key, value, setOptions)
+}
+
+func (self *EtcdClient) CompareAndSwap(key string, value string, ttl uint64, prevValue string, prevIndex uint64) (*client.Response, error) {
+ setOptions := &client.SetOptions{
+ PrevValue: prevValue,
+ PrevIndex: prevIndex,
+ TTL: time.Duration(ttl) * time.Second,
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), self.timeout)
+ defer cancel()
+ return self.kapi.Set(ctx, key, value, setOptions)
+}
+
+func (self *EtcdClient) CompareAndDelete(key string, prevValue string, prevIndex uint64) (*client.Response, error) {
+ delOptions := &client.DeleteOptions{
+ PrevValue: prevValue,
+ PrevIndex: prevIndex,
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), self.timeout)
+ defer cancel()
+ return self.kapi.Delete(ctx, key, delOptions)
+}
+
+func (self *EtcdClient) Watch(key string, waitIndex uint64, recursive bool) client.Watcher {
+ watchOptions := &client.WatcherOptions{
+ AfterIndex: waitIndex,
+ Recursive: recursive,
+ }
+ return self.kapi.Watcher(key, watchOptions)
+}
diff --git a/cluster/etcd_master_lock.go b/cluster/etcd_master_lock.go
new file mode 100644
index 00000000..beec8bf6
--- /dev/null
+++ b/cluster/etcd_master_lock.go
@@ -0,0 +1,267 @@
+// description: Utility to perform master election/failover using etcd.
+package cluster
+
+import (
+ "errors"
+ "fmt"
+ "runtime"
+ "sync"
+ "time"
+
+ "github.com/coreos/etcd/client"
+ "golang.org/x/net/context"
+)
+
+type EVENT_TYPE int
+
+const (
+ MASTER_ADD EVENT_TYPE = iota
+ MASTER_DELETE
+ MASTER_MODIFY
+ MASTER_ERROR
+)
+
+const (
+ RETRY_SLEEP = 200
+)
+
+type MasterEvent struct {
+ Type EVENT_TYPE
+ Master string
+ ModifiedIndex uint64
+}
+
+type Master interface {
+ Start()
+ Stop()
+ GetEventsChan() <-chan *MasterEvent
+ GetKey() string
+ GetMaster() string
+}
+
+type EtcdLock struct {
+ sync.Mutex
+
+ client *EtcdClient
+ name string
+ id string
+ ttl uint64
+ enable bool
+ master string
+ watchStopChan chan bool
+ eventsChan chan *MasterEvent
+ stoppedChan chan bool
+ refreshStoppedChan chan bool
+ ifHolding bool
+ modifiedIndex uint64
+}
+
+func NewMaster(etcdClient *EtcdClient, name, value string, ttl uint64) Master {
+ return &EtcdLock{
+ client: etcdClient,
+ name: name,
+ id: value,
+ ttl: ttl,
+ enable: false,
+ master: "",
+ watchStopChan: make(chan bool, 1),
+ eventsChan: make(chan *MasterEvent, 1),
+ stoppedChan: make(chan bool, 1),
+ refreshStoppedChan: make(chan bool, 1),
+ ifHolding: false,
+ modifiedIndex: 0,
+ }
+}
+
+func (self *EtcdLock) Start() {
+ coordLog.Infof("[EtcdLock][Start] start to acquire lock[%s] value[%s].", self.name, self.id)
+ self.Lock()
+ if self.enable {
+ self.Unlock()
+ return
+ }
+ self.enable = true
+ self.Unlock()
+
+ go func() {
+ for {
+ err := self.acquire()
+ if err == nil {
+ break
+ }
+ }
+ }()
+}
+
+func (self *EtcdLock) Stop() {
+ coordLog.Infof("[EtcdLock][Stop] stop acquire lock[%s].", self.name)
+ self.Lock()
+ if !self.enable {
+ self.Unlock()
+ return
+ }
+ self.enable = false
+ self.Unlock()
+
+ self.watchStopChan <- true
+ // wait for acquire to finish
+ <-self.stoppedChan
+}
+
+func (self *EtcdLock) GetEventsChan() <-chan *MasterEvent {
+ return self.eventsChan
+}
+
+func (self *EtcdLock) GetKey() string {
+ return self.name
+}
+
+func (self *EtcdLock) GetMaster() string {
+ self.Lock()
+ defer self.Unlock()
+ return self.master
+}
+
+func (self *EtcdLock) acquire() (ret error) {
+ defer func() {
+ if r := recover(); r != nil {
+ callers := ""
+ for i := 0; true; i++ {
+ _, file, line, ok := runtime.Caller(i)
+ if !ok {
+ break
+ }
+ callers = callers + fmt.Sprintf("%v:%v\n", file, line)
+ }
+ errMsg := fmt.Sprintf("[EtcdLock][acquire] Recovered from panic: %#v (%v)\n%v", r, r, callers)
+ coordLog.Errorf(errMsg)
+ ret = errors.New(errMsg)
+ }
+ }()
+
+ var rsp *client.Response
+ err := fmt.Errorf("Dummy error.")
+
+ ctx, cancel := context.WithCancel(context.Background())
+ go func() {
+ select {
+ case <-self.watchStopChan:
+ cancel()
+ }
+ }()
+
+ for {
+ if !self.enable {
+ self.stopAcquire()
+ break
+ }
+
+ if err != nil || rsp == nil || rsp.Node == nil || rsp.Node.Value == "" {
+ rsp, err = self.client.Get(self.name, false, false)
+ if err != nil {
+ if client.IsKeyNotFound(err) {
+ coordLog.Infof("[EtcdLock][acquire] try to acquire lock[%s]", self.name)
+ rsp, err = self.client.Create(self.name, self.id, self.ttl)
+ if err != nil {
+ coordLog.Errorf("[EtcdLock][acquire] etcd create lock[%s] error: %s", self.name, err.Error())
+ continue
+ }
+ } else {
+ coordLog.Errorf("[EtcdLock][acquire] etcd get lock[%s] error: %s", self.name, err.Error())
+ time.Sleep(RETRY_SLEEP * time.Millisecond)
+ continue
+ }
+ }
+ }
+
+ self.processEtcdRsp(rsp)
+
+ self.Lock()
+ self.master = rsp.Node.Value
+ self.modifiedIndex = rsp.Node.ModifiedIndex
+ self.Unlock()
+
+ // normally it should use modifiedIndex, while for error index is outdated and cleared,
+ // we should use cluster index instead (anyway we should use the larger one)
+ // note the rsp.index in watch is the cluster-index when the watch begin, so the cluster-index may less than modifiedIndex
+ // since it will be increased after watch begin.
+ wi := rsp.Node.ModifiedIndex
+ if rsp.Index > wi {
+ wi = rsp.Index
+ coordLog.Infof("[EtcdLock] watch lock[%s] at cluster index: %v, modify index: %v", self.name, rsp.Index, rsp.Node.ModifiedIndex)
+ }
+ watcher := self.client.Watch(self.name, wi, false)
+ rsp, err = watcher.Next(ctx)
+ if err != nil {
+ if err == context.Canceled {
+ coordLog.Infof("[EtcdLock][acquire] watch lock[%s] stop by user.", self.name)
+ } else if err == context.DeadlineExceeded {
+ coordLog.Infof("[EtcdLock][acquire] watch lock[%s] timeout.", self.name)
+ } else {
+ coordLog.Infof("[EtcdLock][acquire] failed to watch lock[%s] error: %s", self.name, err.Error())
+ }
+ }
+ }
+
+ return nil
+}
+
+func (self *EtcdLock) processEtcdRsp(rsp *client.Response) {
+ if rsp.Node.Value == self.id {
+ if !self.ifHolding {
+ coordLog.Infof("[EtcdLock][processEtcdRsp] acquire lock: %s", self.name)
+ self.ifHolding = true
+ self.eventsChan <- &MasterEvent{Type: MASTER_ADD, Master: self.id, ModifiedIndex: rsp.Node.ModifiedIndex}
+ go self.refresh()
+ }
+ } else {
+ if self.ifHolding {
+ coordLog.Errorf("[EtcdLock][processEtcdRsp] lost lock: %s", self.name)
+ self.ifHolding = false
+ self.refreshStoppedChan <- true
+ self.eventsChan <- &MasterEvent{Type: MASTER_DELETE}
+ }
+ if self.master != rsp.Node.Value {
+ coordLog.Infof("[EtcdLock][processEtcdRsp] modify lock[%s] to master[%s]", self.name, rsp.Node.Value)
+ self.eventsChan <- &MasterEvent{Type: MASTER_MODIFY, Master: rsp.Node.Value, ModifiedIndex: rsp.Node.ModifiedIndex}
+ }
+ }
+}
+
+func (self *EtcdLock) stopAcquire() {
+ if self.ifHolding {
+ coordLog.Infof("[EtcdLock][stopAcquire] delete lock: %s", self.name)
+ _, err := self.client.Delete(self.name, false)
+ if err != nil {
+ coordLog.Errorf("[EtcdLock][stopAcquire] failed to delete lock: %s error: %s", self.name, err.Error())
+ }
+ self.ifHolding = false
+ self.refreshStoppedChan <- true
+ }
+ self.Lock()
+ self.master = ""
+ self.Unlock()
+ self.stoppedChan <- true
+}
+
+func (self *EtcdLock) refresh() {
+ for {
+ select {
+ case <-self.refreshStoppedChan:
+ coordLog.Infof("[EtcdLock][refresh] Stopping refresh for lock %s", self.name)
+ return
+ case <-time.After(time.Second * time.Duration(self.ttl*4/10)):
+ self.Lock()
+ modify := self.modifiedIndex
+ self.Unlock()
+ rsp, err := self.client.CompareAndSwap(self.name, self.id, self.ttl, self.id, modify)
+ if err != nil {
+ coordLog.Errorf("[EtcdLock][refresh] Failed to set ttl for lock[%s] error:%s", self.name, err.Error())
+ } else {
+ self.Lock()
+ self.modifiedIndex = rsp.Node.ModifiedIndex
+ self.Unlock()
+ }
+ }
+ }
+}
diff --git a/cluster/etcd_utils.go b/cluster/etcd_utils.go
new file mode 100644
index 00000000..450e869d
--- /dev/null
+++ b/cluster/etcd_utils.go
@@ -0,0 +1,37 @@
+package cluster
+
+import (
+ "net/url"
+
+ "github.com/coreos/etcd/client"
+)
+
+const (
+ ErrCodeEtcdNotReachable = 501
+ ErrCodeUnhandledHTTPStatus = 502
+)
+
+func initEtcdPeers(machines []string) error {
+ for i, ep := range machines {
+ u, err := url.Parse(ep)
+ if err != nil {
+ return err
+ }
+ if u.Scheme == "" {
+ u.Scheme = "http"
+ }
+ machines[i] = u.String()
+ }
+ return nil
+}
+
+func IsEtcdNotReachable(err error) bool {
+ if cErr, ok := err.(client.Error); ok {
+ return cErr.Code == ErrCodeEtcdNotReachable
+ }
+ return false
+}
+
+func IsEtcdWatchExpired(err error) bool {
+ return isEtcdErrorNum(err, client.ErrorCodeEventIndexCleared)
+}
diff --git a/cluster/pdnode_coord/pd_api.go b/cluster/pdnode_coord/pd_api.go
index 56f0e64c..e60e6889 100644
--- a/cluster/pdnode_coord/pd_api.go
+++ b/cluster/pdnode_coord/pd_api.go
@@ -6,8 +6,8 @@ import (
"sync/atomic"
"time"
- "github.com/absolute8511/ZanRedisDB/cluster"
- "github.com/absolute8511/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/cluster"
+ "github.com/youzan/ZanRedisDB/common"
)
// some API for outside
@@ -78,6 +78,26 @@ func (pdCoord *PDCoordinator) SetClusterUpgradeState(upgrading bool) error {
return nil
}
+func (pdCoord *PDCoordinator) RemoveNamespaceFromNode(ns string, pidStr string, nid string) error {
+ if pdCoord.leaderNode.GetID() != pdCoord.myNode.GetID() {
+ cluster.CoordLog().Infof("not leader while delete namespace")
+ return ErrNotLeader
+ }
+ pid, err := strconv.Atoi(pidStr)
+ if err != nil {
+ return err
+ }
+ nsinfo, err := pdCoord.register.GetNamespacePartInfo(ns, pid)
+ if err != nil {
+ return err
+ }
+ coordErr := pdCoord.removeNamespaceFromNode(nsinfo, nid)
+ if coordErr != nil {
+ return coordErr.ToErrorType()
+ }
+ return nil
+}
+
func (pdCoord *PDCoordinator) MarkNodeAsRemoving(nid string) error {
if pdCoord.leaderNode.GetID() != pdCoord.myNode.GetID() {
cluster.CoordLog().Infof("not leader while delete namespace")
@@ -167,7 +187,7 @@ func (pdCoord *PDCoordinator) ChangeNamespaceMetaParam(namespace string, newRepl
var meta cluster.NamespaceMetaInfo
if ok, _ := pdCoord.register.IsExistNamespace(namespace); !ok {
- cluster.CoordLog().Infof("namespace not exist %v :%v", namespace)
+ cluster.CoordLog().Infof("namespace not exist %v ", namespace)
return cluster.ErrNamespaceNotCreated.ToErrorType()
} else {
oldMeta, err := pdCoord.register.GetNamespaceMetaInfo(namespace)
@@ -282,7 +302,7 @@ func (pdCoord *PDCoordinator) checkAndUpdateNamespacePartitions(currentNodes map
tmpReplicaInfo := partReplicaList[i]
if len(tmpReplicaInfo.GetISR()) <= meta.Replica/2 {
- cluster.CoordLog().Infof("failed update info for namespace : %v-%v since not quorum", namespace, i, tmpReplicaInfo)
+ cluster.CoordLog().Infof("failed update info for namespace : %v-%v since not quorum: %v", namespace, i, tmpReplicaInfo)
continue
}
commonErr := pdCoord.register.UpdateNamespacePartReplicaInfo(namespace, i, &tmpReplicaInfo, tmpReplicaInfo.Epoch())
@@ -312,7 +332,7 @@ func (pdCoord *PDCoordinator) RemoveLearnerFromNs(ns string, pidStr string, nid
return err
}
for i := 0; i < oldMeta.PartitionNum; i++ {
- err = pdCoord.removeNsLearnerFromNode(ns, i, nid)
+ err = pdCoord.removeNsLearnerFromNode(ns, i, nid, true)
if err != nil {
cluster.CoordLog().Infof("namespace %v-%v remove learner %v failed :%v", ns, i, nid, err)
return err
@@ -327,5 +347,5 @@ func (pdCoord *PDCoordinator) RemoveLearnerFromNs(ns string, pidStr string, nid
if err != nil {
return err
}
- return pdCoord.removeNsLearnerFromNode(ns, pid, nid)
+ return pdCoord.removeNsLearnerFromNode(ns, pid, nid, false)
}
diff --git a/cluster/pdnode_coord/pd_coordinator.go b/cluster/pdnode_coord/pd_coordinator.go
index a5bd2fcd..1aac7818 100644
--- a/cluster/pdnode_coord/pd_coordinator.go
+++ b/cluster/pdnode_coord/pd_coordinator.go
@@ -7,12 +7,13 @@ import (
"path"
"runtime"
"strconv"
+ "strings"
"sync"
"sync/atomic"
"time"
- "github.com/absolute8511/ZanRedisDB/cluster"
- "github.com/absolute8511/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/cluster"
+ "github.com/youzan/ZanRedisDB/common"
)
var (
@@ -33,19 +34,19 @@ var (
var (
waitMigrateInterval = time.Minute * 16
- waitEmergencyMigrateInterval = time.Second * 60
waitRemoveRemovingNodeInterval = time.Minute * 5
nsCheckInterval = time.Minute
nsCheckLearnerInterval = time.Second * 10
balanceCheckInterval = time.Minute * 10
+ checkRemovingNodeInterval = time.Minute
)
func ChangeIntervalForTest() {
- waitMigrateInterval = time.Second * 3
- waitEmergencyMigrateInterval = time.Second
- waitRemoveRemovingNodeInterval = time.Second * 3
+ waitMigrateInterval = time.Second * 10
+ waitRemoveRemovingNodeInterval = time.Second * 5
nsCheckInterval = time.Second
balanceCheckInterval = time.Second * 5
+ checkRemovingNodeInterval = time.Second * 5
}
type PDCoordinator struct {
@@ -71,6 +72,7 @@ type PDCoordinator struct {
stableNodeNum int32
dataDir string
learnerRole string
+ filterNamespaces map[string]bool
}
func NewPDCoordinator(clusterID string, n *cluster.NodeInfo, opts *cluster.Options) *PDCoordinator {
@@ -90,6 +92,7 @@ func NewPDCoordinator(clusterID string, n *cluster.NodeInfo, opts *cluster.Optio
stopChan: make(chan struct{}),
monitorChan: make(chan struct{}),
learnerRole: n.LearnerRole,
+ filterNamespaces: make(map[string]bool),
}
coord.dpm = NewDataPlacement(coord)
if opts != nil {
@@ -97,7 +100,17 @@ func NewPDCoordinator(clusterID string, n *cluster.NodeInfo, opts *cluster.Optio
if opts.AutoBalanceAndMigrate {
coord.autoBalance = 1
}
+ coord.dpm.balanceVer = opts.BalanceVer
+ if coord.dpm.balanceVer == "" {
+ coord.dpm.balanceVer = BalanceV2Str
+ }
coord.dataDir = opts.DataDir
+ nss := strings.Split(opts.FilterNamespaces, ",")
+ for _, ns := range nss {
+ if len(ns) > 0 {
+ coord.filterNamespaces[ns] = true
+ }
+ }
}
return coord
}
@@ -258,7 +271,10 @@ func (pdCoord *PDCoordinator) notifyLeaderChanged(monitorChan chan struct{}) {
if pdCoord.register != nil {
newNamespaces, _, err := pdCoord.register.GetAllNamespaces()
if err != nil {
- cluster.CoordLog().Errorf("load namespace info failed: %v", err)
+ // may not init any yet.
+ if err != cluster.ErrKeyNotFound {
+ cluster.CoordLog().Infof("load namespace info failed: %v", err)
+ }
} else {
cluster.CoordLog().Infof("namespace loaded : %v", len(newNamespaces))
// save to file in case of etcd data disaster
@@ -455,19 +471,146 @@ func (pdCoord *PDCoordinator) handleDataNodes(monitorChan chan struct{}, isMaste
}
}
+func (pdCoord *PDCoordinator) checkIfAnyPending(removingNodes map[string]string, allNamespaces map[string]map[int]cluster.PartitionMetaInfo) bool {
+ for nid := range removingNodes {
+ for _, namespacePartList := range allNamespaces {
+ for _, tmpNsInfo := range namespacePartList {
+ namespaceInfo := *(tmpNsInfo.GetCopy())
+ if _, ok := namespaceInfo.Removings[nid]; ok {
+ cluster.CoordLog().Infof("namespace %v data on node %v is in removing, waiting", namespaceInfo.GetDesp(), nid)
+ removingNodes[nid] = "pending"
+ return true
+ }
+ ok, err := IsAllISRFullReady(&namespaceInfo)
+ if err != nil || !ok {
+ cluster.CoordLog().Infof("namespace %v isr is not full ready: %v", namespaceInfo.GetDesp(), err)
+ removingNodes[nid] = "pending"
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+func (pdCoord *PDCoordinator) processRemovingNodes(monitorChan chan struct{}, removingNodes map[string]string) {
+ if !atomic.CompareAndSwapInt32(&pdCoord.balanceWaiting, 0, 1) {
+ cluster.CoordLog().Infof("another balance is running, should wait")
+ return
+ }
+ defer atomic.StoreInt32(&pdCoord.balanceWaiting, 0)
+
+ anyStateChanged := false
+ currentNodes := pdCoord.getCurrentNodes(nil)
+ nodeNameList := getNodeNameList(currentNodes)
+
+ allNamespaces, _, err := pdCoord.register.GetAllNamespaces()
+ if err != nil {
+ return
+ }
+ if pdCoord.checkIfAnyPending(removingNodes, allNamespaces) {
+ pdCoord.nodesMutex.Lock()
+ pdCoord.removingNodes = removingNodes
+ pdCoord.nodesMutex.Unlock()
+ return
+ }
+ anyPending := false
+ for nid := range removingNodes {
+ cluster.CoordLog().Infof("handle removing node %v ", nid)
+ // only check the namespace with one replica left
+ // because the doCheckNamespaces will check the others
+ // we add a new replica for the removing node
+
+ // to avoid too much migration, we break early if any pending migration found
+ for _, namespacePartList := range allNamespaces {
+ for _, tmpNsInfo := range namespacePartList {
+ namespaceInfo := *(tmpNsInfo.GetCopy())
+ if _, ok := namespaceInfo.Removings[nid]; ok {
+ cluster.CoordLog().Infof("namespace %v data on node %v is in removing, waiting", namespaceInfo.GetDesp(), nid)
+ anyPending = true
+ if removingNodes[nid] != "pending" {
+ removingNodes[nid] = "pending"
+ anyStateChanged = true
+ }
+ break
+ }
+ if cluster.FindSlice(namespaceInfo.RaftNodes, nid) == -1 {
+ continue
+ }
+ if anyPending {
+ // waiting other pending
+ if removingNodes[nid] != "pending" {
+ removingNodes[nid] = "pending"
+ anyStateChanged = true
+ }
+ break
+ }
+ anyPending = true
+ if removingNodes[nid] != "pending" {
+ removingNodes[nid] = "pending"
+ anyStateChanged = true
+ }
+ if len(namespaceInfo.GetISR()) <= namespaceInfo.Replica {
+ newInfo, err := pdCoord.dpm.addNodeToNamespaceAndWaitReady(monitorChan, &namespaceInfo,
+ nodeNameList)
+ if err != nil {
+ cluster.CoordLog().Infof("namespace %v data on node %v transferred failed, waiting next time: %v, %v",
+ namespaceInfo.GetDesp(), nid, err.Error(), namespaceInfo)
+ break
+ } else if newInfo != nil {
+ namespaceInfo = *newInfo
+ }
+ cluster.CoordLog().Infof("namespace %v data on node %v transferred success", namespaceInfo.GetDesp(), nid)
+ }
+ ok, err := IsAllISRFullReady(&namespaceInfo)
+ if err != nil || !ok {
+ cluster.CoordLog().Infof("namespace %v isr is not full ready: %v", namespaceInfo.GetDesp(), err)
+ break
+ }
+ cluster.CoordLog().Infof("namespace %v data on node %v removing", namespaceInfo.GetDesp(), nid)
+ pdCoord.removeNamespaceFromNode(&namespaceInfo, nid)
+ }
+ }
+ if !anyPending {
+ anyStateChanged = true
+ cluster.CoordLog().Infof("node %v data has been transferred, it can be removed from cluster: state: %v", nid, removingNodes[nid])
+ if removingNodes[nid] != "data_transferred" && removingNodes[nid] != "done" {
+ removingNodes[nid] = "data_transferred"
+ } else {
+ if removingNodes[nid] == "data_transferred" {
+ removingNodes[nid] = "done"
+ } else if removingNodes[nid] == "done" {
+ pdCoord.nodesMutex.Lock()
+ _, ok := pdCoord.dataNodes[nid]
+ if !ok {
+ delete(removingNodes, nid)
+ cluster.CoordLog().Infof("the node %v is removed finally since not alive in cluster", nid)
+ }
+ pdCoord.nodesMutex.Unlock()
+ }
+ }
+ }
+ }
+
+ if anyStateChanged {
+ pdCoord.nodesMutex.Lock()
+ pdCoord.removingNodes = removingNodes
+ pdCoord.nodesMutex.Unlock()
+ }
+}
+
func (pdCoord *PDCoordinator) handleRemovingNodes(monitorChan chan struct{}) {
cluster.CoordLog().Debugf("start handle the removing nodes.")
defer func() {
cluster.CoordLog().Infof("stop handle the removing nodes.")
}()
- ticker := time.NewTicker(time.Minute)
+ ticker := time.NewTicker(checkRemovingNodeInterval)
defer ticker.Stop()
for {
select {
case <-monitorChan:
return
case <-ticker.C:
- anyStateChanged := false
pdCoord.nodesMutex.RLock()
removingNodes := make(map[string]string)
for nid, removeState := range pdCoord.removingNodes {
@@ -478,96 +621,9 @@ func (pdCoord *PDCoordinator) handleRemovingNodes(monitorChan chan struct{}) {
if len(removingNodes) == 0 {
continue
}
- currentNodes := pdCoord.getCurrentNodes(nil)
- nodeNameList := getNodeNameList(currentNodes)
-
- allNamespaces, _, err := pdCoord.register.GetAllNamespaces()
- if err != nil {
- continue
- }
- for nid := range removingNodes {
- anyPending := false
- cluster.CoordLog().Infof("handle removing node %v ", nid)
- // only check the namespace with one replica left
- // because the doCheckNamespaces will check the others
- // we add a new replica for the removing node
-
- // to avoid too much migration, we break early if any pending migration found
- for _, namespacePartList := range allNamespaces {
- for _, tmpNsInfo := range namespacePartList {
- namespaceInfo := *(tmpNsInfo.GetCopy())
- if cluster.FindSlice(namespaceInfo.RaftNodes, nid) == -1 {
- continue
- }
- if _, ok := namespaceInfo.Removings[nid]; ok {
- cluster.CoordLog().Infof("namespace %v data on node %v is in removing, waiting", namespaceInfo.GetDesp(), nid)
- anyPending = true
- break
- }
- if anyPending {
- // waiting other pending
- break
- }
- if len(namespaceInfo.GetISR()) <= namespaceInfo.Replica {
- anyPending = true
- // find new catchup and wait isr ready
- removingNodes[nid] = "pending"
- newInfo, err := pdCoord.dpm.addNodeToNamespaceAndWaitReady(monitorChan, &namespaceInfo,
- nodeNameList)
- if err != nil {
- cluster.CoordLog().Infof("namespace %v data on node %v transferred failed, waiting next time", namespaceInfo.GetDesp(), nid)
- break
- } else if newInfo != nil {
- namespaceInfo = *newInfo
- }
- cluster.CoordLog().Infof("namespace %v data on node %v transferred success", namespaceInfo.GetDesp(), nid)
- anyStateChanged = true
- }
- ok, err := IsAllISRFullReady(&namespaceInfo)
- if err != nil || !ok {
- cluster.CoordLog().Infof("namespace %v isr is not full ready: %v", namespaceInfo.GetDesp(), err)
- anyPending = true
- if removingNodes[nid] != "pending" {
- removingNodes[nid] = "pending"
- anyStateChanged = true
- }
- break
- }
- coordErr := pdCoord.removeNamespaceFromNode(&namespaceInfo, nid)
- if coordErr != nil {
- anyPending = true
- } else if _, waitingRemove := namespaceInfo.Removings[nid]; waitingRemove {
- anyPending = true
- }
- }
- if !anyPending {
- anyStateChanged = true
- cluster.CoordLog().Infof("node %v data has been transferred, it can be removed from cluster: state: %v", nid, removingNodes[nid])
- if removingNodes[nid] != "data_transferred" && removingNodes[nid] != "done" {
- removingNodes[nid] = "data_transferred"
- } else {
- if removingNodes[nid] == "data_transferred" {
- removingNodes[nid] = "done"
- } else if removingNodes[nid] == "done" {
- pdCoord.nodesMutex.Lock()
- _, ok := pdCoord.dataNodes[nid]
- if !ok {
- delete(removingNodes, nid)
- cluster.CoordLog().Infof("the node %v is removed finally since not alive in cluster", nid)
- }
- pdCoord.nodesMutex.Unlock()
- }
- }
- }
- }
- }
-
- if anyStateChanged {
- pdCoord.nodesMutex.Lock()
- pdCoord.removingNodes = removingNodes
- pdCoord.nodesMutex.Unlock()
- }
+ pdCoord.processRemovingNodes(monitorChan, removingNodes)
}
+
}
}
@@ -630,8 +686,10 @@ func (pdCoord *PDCoordinator) doCheckNamespaces(monitorChan chan struct{}, faile
if failedInfo == nil || failedInfo.NamespaceName == "" || failedInfo.NamespacePartition < 0 {
allNamespaces, _, commonErr := pdCoord.register.GetAllNamespaces()
if commonErr != nil {
- cluster.CoordLog().Infof("scan namespaces failed. %v", commonErr)
- atomic.StoreInt32(&pdCoord.isClusterUnstable, 1)
+ if commonErr != cluster.ErrKeyNotFound {
+ cluster.CoordLog().Infof("scan namespaces failed. %v", commonErr)
+ atomic.StoreInt32(&pdCoord.isClusterUnstable, 1)
+ }
return
}
for n, parts := range allNamespaces {
@@ -746,9 +804,7 @@ func (pdCoord *PDCoordinator) doCheckNamespaces(monitorChan chan struct{}, faile
continue
}
failedTime := partitions[nsInfo.Partition]
- emergency := (aliveCount <= nsInfo.Replica/2) && failedTime.Before(time.Now().Add(-1*waitEmergencyMigrateInterval))
- if emergency ||
- failedTime.Before(time.Now().Add(-1*waitMigrateInterval)) {
+ if failedTime.Before(time.Now().Add(-1 * waitMigrateInterval)) {
aliveNodes, aliveEpoch := pdCoord.getCurrentNodesWithEpoch(nsInfo.Tags)
if aliveEpoch != currentNodesEpoch {
go pdCoord.triggerCheckNamespaces(nsInfo.Name, nsInfo.Partition, time.Second)
@@ -757,9 +813,6 @@ func (pdCoord *PDCoordinator) doCheckNamespaces(monitorChan chan struct{}, faile
cluster.CoordLog().Infof("begin migrate the namespace :%v", nsInfo.GetDesp())
if coordErr := pdCoord.handleNamespaceMigrate(&nsInfo, aliveNodes, aliveEpoch); coordErr != nil {
atomic.StoreInt32(&pdCoord.isClusterUnstable, 1)
- if emergency {
- go pdCoord.triggerCheckNamespaces(nsInfo.Name, nsInfo.Partition, time.Second*3)
- }
continue
} else {
delete(partitions, nsInfo.Partition)
@@ -822,6 +875,7 @@ func (pdCoord *PDCoordinator) handleNamespaceMigrate(origNSInfo *cluster.Partiti
return cluster.ErrClusterChanged
}
if len(origNSInfo.Removings) > 0 {
+ cluster.CoordLog().Infof("namespace: %v still waiting removing node: %v", origNSInfo.GetDesp(), origNSInfo.Removings)
return ErrNamespaceMigrateWaiting
}
isrChanged := false
@@ -830,7 +884,16 @@ func (pdCoord *PDCoordinator) handleNamespaceMigrate(origNSInfo *cluster.Partiti
for _, replica := range nsInfo.RaftNodes {
if _, ok := currentNodes[replica]; ok {
aliveReplicas++
+ // if the other alive replica is not synced, it means the raft group may be unstable,
+ // so we should not remove any node until other replicas became stable to avoid a broken raft group.
+ synced, err := IsRaftNodeSynced(nsInfo, replica)
+ if err != nil || !synced {
+ cluster.CoordLog().Infof("namespace: %v replica %v is not synced while removing node, need wait", nsInfo.GetDesp(), replica)
+ return ErrNamespaceMigrateWaiting
+ }
} else {
+ cluster.CoordLog().Infof("failed raft node %v for namespace: %v",
+ replica, nsInfo.GetDesp())
if nsInfo.Removings == nil {
nsInfo.Removings = make(map[string]cluster.RemovingInfo)
}
@@ -846,6 +909,12 @@ func (pdCoord *PDCoordinator) handleNamespaceMigrate(origNSInfo *cluster.Partiti
}
}
+ // avoid removing any node if current alive replicas is not enough
+ if len(nsInfo.Removings) > 0 && aliveReplicas <= nsInfo.Replica/2 {
+ cluster.CoordLog().Infof("namespace: %v alive replica %v is not enough while removing node", nsInfo.GetDesp(), aliveReplicas)
+ return ErrNamespaceMigrateWaiting
+ }
+
if len(currentNodes) < nsInfo.Replica && len(nsInfo.Removings) > 0 {
cluster.CoordLog().Warningf("no enough alive nodes %v for namespace %v replica: %v",
len(currentNodes), nsInfo.GetDesp(), nsInfo.Replica)
@@ -877,6 +946,10 @@ func (pdCoord *PDCoordinator) handleNamespaceMigrate(origNSInfo *cluster.Partiti
// So we should add new replica before we continue remove replica.
// However, if there is a failed node in (1, 2) replicas, we can not add new because we can not have quorum voters in raft.
// In this way, we need wait failed node restart or we manual force recovery with standalone node.
+
+ // TODO: Consider the case while we force init a standalone raft which has only 1 replica, but the
+ // replicator desired is 4 or more. We will add one node and the isr is only 2 (not Quorum) and
+ // we should allow the standalone raft group to grow as needed.
if isrChanged && nsInfo.IsISRQuorum() {
if len(nsInfo.Removings) > 1 {
cluster.CoordLog().Infof("namespace should not have two removing nodes: %v", nsInfo)
@@ -891,6 +964,7 @@ func (pdCoord *PDCoordinator) handleNamespaceMigrate(origNSInfo *cluster.Partiti
cluster.CoordLog().Infof("namespace %v migrate to replicas : %v", nsInfo.GetDesp(), nsInfo.RaftNodes)
*origNSInfo = *nsInfo
} else {
+ cluster.CoordLog().Infof("namespace %v waiting migrate : %v", nsInfo.GetDesp(), nsInfo)
return ErrNamespaceMigrateWaiting
}
return nil
diff --git a/cluster/pdnode_coord/pd_learner_coord.go b/cluster/pdnode_coord/pd_learner_coord.go
index b51772bf..bb33d17a 100644
--- a/cluster/pdnode_coord/pd_learner_coord.go
+++ b/cluster/pdnode_coord/pd_learner_coord.go
@@ -5,7 +5,11 @@ import (
"sync/atomic"
"time"
- "github.com/absolute8511/ZanRedisDB/cluster"
+ "github.com/youzan/ZanRedisDB/cluster"
+)
+
+const (
+ pdRegisterKVLearnerStartPrefix = "placedriver:learner:need_start_learner:"
)
func (pdCoord *PDCoordinator) checkNamespacesForLearner(monitorChan chan struct{}) {
@@ -28,13 +32,74 @@ func (pdCoord *PDCoordinator) checkNamespacesForLearner(monitorChan chan struct{
}
}
+func (pdCoord *PDCoordinator) cleanAllLearners(ns string) error {
+ oldMeta, err := pdCoord.register.GetNamespaceMetaInfo(ns)
+ if err != nil {
+ cluster.CoordLog().Infof("get namespace key %v failed :%v", ns, err)
+ return err
+ }
+ for i := 0; i < oldMeta.PartitionNum; i++ {
+ origNSInfo, err := pdCoord.register.GetNamespacePartInfo(ns, i)
+ if err != nil {
+ cluster.CoordLog().Infof("get namespace %v info failed :%v", ns, err)
+ continue
+ }
+ err = pdCoord.removeNsAllLearners(origNSInfo)
+ if err != nil {
+ cluster.CoordLog().Infof("namespace %v-%v remove learner failed :%v", ns, i, err.Error())
+ return err
+ }
+ }
+ return nil
+}
+
+// use this to switch start or stop learner without restart the node
+func (pdCoord *PDCoordinator) SwitchStartLearner(enable bool) error {
+ if pdCoord.learnerRole == "" {
+ return nil
+ }
+ value := "false"
+ if enable {
+ value = "true"
+ }
+ return pdCoord.register.SaveKV(pdRegisterKVLearnerStartPrefix+pdCoord.learnerRole, value)
+}
+
+func (pdCoord *PDCoordinator) GetLearnerRunningState() (bool, error) {
+ v, err := pdCoord.register.GetKV(pdRegisterKVLearnerStartPrefix + pdCoord.learnerRole)
+ // we use not equal false to make sure the false running is always determined
+ return v != "false", err
+}
+
func (pdCoord *PDCoordinator) doCheckNamespacesForLearner(monitorChan chan struct{}) {
+ needLearner, err := pdCoord.register.GetKV(pdRegisterKVLearnerStartPrefix + pdCoord.learnerRole)
+ if err != nil {
+ cluster.CoordLog().Infof("learner start state (should be one of true/false) read error: %v", err.Error())
+ return
+ }
namespaces := []cluster.PartitionMetaInfo{}
allNamespaces, _, commonErr := pdCoord.register.GetAllNamespaces()
if commonErr != nil {
cluster.CoordLog().Infof("scan namespaces failed. %v", commonErr)
return
}
+ if needLearner == "false" {
+ // remove learners from meta info
+ // to make sure the learner node is stopped, you should
+ // query the api on the learner node.
+ for ns, _ := range allNamespaces {
+ err = pdCoord.cleanAllLearners(ns)
+ if err != nil {
+ cluster.CoordLog().Infof("clean namespace %s learner failed. %v", ns, err.Error())
+ return
+ }
+ }
+ return
+ }
+ if needLearner != "true" {
+ cluster.CoordLog().Infof("unexpected learner start state (should be one of true/false) : %v", needLearner)
+ return
+ }
for _, parts := range allNamespaces {
for _, p := range parts {
namespaces = append(namespaces, *(p.GetCopy()))
@@ -68,6 +133,13 @@ func (pdCoord *PDCoordinator) doCheckNamespacesForLearner(monitorChan chan struc
// wait enough isr for leader election before we add learner
continue
}
+ // filter some namespace
+ if _, ok := pdCoord.filterNamespaces[nsInfo.Name]; ok {
+ // need clean learner for old configure (new configure add new filter namespace after
+ // old learner already added)
+ pdCoord.removeNsAllLearners(&nsInfo)
+ continue
+ }
// check current learner node alive
newMaster := ""
learnerIDs := nsInfo.LearnerNodes[pdCoord.learnerRole]
@@ -161,22 +233,27 @@ func (pdCoord *PDCoordinator) addNsLearnerToNode(origNSInfo *cluster.PartitionMe
}
// remove learner should be manual since learner is not expected to change too often
-func (pdCoord *PDCoordinator) removeNsLearnerFromNode(ns string, pid int, nid string) error {
+func (pdCoord *PDCoordinator) removeNsLearnerFromNode(ns string, pid int, nid string, checkNode bool) error {
origNSInfo, err := pdCoord.register.GetNamespacePartInfo(ns, pid)
if err != nil {
return err
}
nsInfo := origNSInfo.GetCopy()
- currentNodes, _ := pdCoord.getCurrentLearnerNodes()
- if _, ok := currentNodes[nid]; ok {
- cluster.CoordLog().Infof("namespace %v: mark learner node %v removing before stopped", nsInfo.GetDesp(), nid)
- return errors.New("removing learner node should be stopped first")
+ if checkNode {
+ currentNodes, _ := pdCoord.getCurrentLearnerNodes()
+ if _, ok := currentNodes[nid]; ok {
+ cluster.CoordLog().Infof("namespace %v: mark learner node %v removing before stopped", nsInfo.GetDesp(), nid)
+ return errors.New("removing learner node should be stopped first")
+ }
}
role := pdCoord.learnerRole
cluster.CoordLog().Infof("namespace %v: mark learner role %v node %v removing , current : %v", nsInfo.GetDesp(), role, nid,
nsInfo.LearnerNodes)
+ if nsInfo.LearnerNodes == nil {
+ nsInfo.LearnerNodes = make(map[string][]string)
+ }
old := nsInfo.LearnerNodes[role]
newLrns := make([]string, 0, len(old))
for _, oid := range old {
@@ -188,9 +265,6 @@ func (pdCoord *PDCoordinator) removeNsLearnerFromNode(ns string, pid int, nid st
if len(old) == len(newLrns) {
return errors.New("remove node id is not in learners")
}
- if nsInfo.LearnerNodes == nil {
- nsInfo.LearnerNodes = make(map[string][]string)
- }
nsInfo.LearnerNodes[role] = newLrns
delete(nsInfo.RaftIDs, nid)
@@ -206,3 +280,33 @@ func (pdCoord *PDCoordinator) removeNsLearnerFromNode(ns string, pid int, nid st
}
return nil
}
+
+func (pdCoord *PDCoordinator) removeNsAllLearners(origNSInfo *cluster.PartitionMetaInfo) error {
+ nsInfo := origNSInfo.GetCopy()
+ role := pdCoord.learnerRole
+ if nsInfo.LearnerNodes == nil {
+ nsInfo.LearnerNodes = make(map[string][]string)
+ }
+ old := nsInfo.LearnerNodes[role]
+ if len(old) == 0 {
+ return nil
+ }
+ cluster.CoordLog().Infof("namespace %v: removing all learner role %v nodes %v",
+ nsInfo.GetDesp(), role, old)
+ for _, nid := range old {
+ delete(nsInfo.RaftIDs, nid)
+ }
+ nsInfo.LearnerNodes[role] = make([]string, 0)
+
+ err := pdCoord.register.UpdateNamespacePartReplicaInfo(nsInfo.Name, nsInfo.Partition,
+ &nsInfo.PartitionReplicaInfo, nsInfo.PartitionReplicaInfo.Epoch())
+ if err != nil {
+ cluster.CoordLog().Infof("update namespace %v replica info failed: %v", nsInfo.GetDesp(), err.Error())
+ return err
+ } else {
+ cluster.CoordLog().Infof("namespace %v: removed all learner role %v node:%v", nsInfo.GetDesp(),
+ role, old)
+ *origNSInfo = *nsInfo
+ }
+ return nil
+}
diff --git a/cluster/pdnode_coord/pd_schema_coord.go b/cluster/pdnode_coord/pd_schema_coord.go
index 4bd84331..d232a042 100644
--- a/cluster/pdnode_coord/pd_schema_coord.go
+++ b/cluster/pdnode_coord/pd_schema_coord.go
@@ -6,8 +6,8 @@ import (
"net"
"time"
- "github.com/absolute8511/ZanRedisDB/cluster"
- "github.com/absolute8511/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/cluster"
+ "github.com/youzan/ZanRedisDB/common"
)
var (
@@ -19,7 +19,7 @@ func getIndexSchemasFromDataNode(remoteNode string, ns string) (map[string]*comm
rsp := make(map[string]*common.IndexSchema)
_, err := common.APIRequest("GET",
"http://"+net.JoinHostPort(nip, httpPort)+common.APIGetIndexes+"/"+ns,
- nil, time.Second*3, &rsp)
+ nil, cluster.APIShortTo, &rsp)
if err != nil {
cluster.CoordLog().Infof("failed (%v) to get indexes for namespace %v : %v",
nip, ns, err)
diff --git a/cluster/pdnode_coord/place_driver.go b/cluster/pdnode_coord/place_driver.go
index 64db1bd2..a4735e5f 100644
--- a/cluster/pdnode_coord/place_driver.go
+++ b/cluster/pdnode_coord/place_driver.go
@@ -10,9 +10,15 @@ import (
"sync/atomic"
"time"
- "github.com/absolute8511/ZanRedisDB/cluster"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/spaolacci/murmur3"
+ "github.com/emirpasic/gods/maps/treemap"
+ "github.com/emirpasic/gods/utils"
+ "github.com/twmb/murmur3"
+ "github.com/youzan/ZanRedisDB/cluster"
+ "github.com/youzan/ZanRedisDB/common"
+)
+
+const (
+ BalanceV2Str = "v2"
)
var (
@@ -90,7 +96,7 @@ func IsRaftNodeJoined(nsInfo *cluster.PartitionMetaInfo, nid string) (bool, erro
var rsp []*common.MemberInfo
_, err := common.APIRequest("GET",
"http://"+net.JoinHostPort(nip, httpPort)+common.APIGetMembers+"/"+nsInfo.GetDesp(),
- nil, time.Second*3, &rsp)
+ nil, cluster.APIShortTo, &rsp)
if err != nil {
cluster.CoordLog().Infof("failed (%v) to get members for namespace %v: %v", nip, nsInfo.GetDesp(), err)
lastErr = err
@@ -107,7 +113,7 @@ func IsRaftNodeJoined(nsInfo *cluster.PartitionMetaInfo, nid string) (bool, erro
return false, lastErr
}
-// query the raft peers if the nid already in the raft group for the namespace and all logs synced in all peers
+// query the raft peers if all the isr nodes already in the raft group for the namespace and all logs synced in all peers
func IsAllISRFullReady(nsInfo *cluster.PartitionMetaInfo) (bool, error) {
for _, nid := range nsInfo.GetISR() {
ok, err := IsRaftNodeFullReady(nsInfo, nid)
@@ -128,7 +134,7 @@ func IsRaftNodeFullReady(nsInfo *cluster.PartitionMetaInfo, nid string) (bool, e
var rsp []*common.MemberInfo
_, err := common.APIRequest("GET",
"http://"+net.JoinHostPort(nip, httpPort)+common.APIGetMembers+"/"+nsInfo.GetDesp(),
- nil, time.Second*3, &rsp)
+ nil, cluster.APIShortTo, &rsp)
if err != nil {
cluster.CoordLog().Infof("failed (%v) to get members for namespace %v: %v", nip, nsInfo.GetDesp(), err)
return false, err
@@ -147,7 +153,7 @@ func IsRaftNodeFullReady(nsInfo *cluster.PartitionMetaInfo, nid string) (bool, e
}
_, err = common.APIRequest("GET",
"http://"+net.JoinHostPort(nip, httpPort)+common.APIIsRaftSynced+"/"+nsInfo.GetDesp(),
- nil, time.Second*5, nil)
+ nil, cluster.APILongTo, nil)
if err != nil {
cluster.CoordLog().Infof("failed (%v) to check sync state for namespace %v: %v", nip, nsInfo.GetDesp(), err)
return false, err
@@ -156,8 +162,22 @@ func IsRaftNodeFullReady(nsInfo *cluster.PartitionMetaInfo, nid string) (bool, e
return true, nil
}
+// check if all nodes in list is raft synced for the namespace-partition
+func IsRaftNodeSynced(nsInfo *cluster.PartitionMetaInfo, nid string) (bool, error) {
+ nip, _, _, httpPort := cluster.ExtractNodeInfoFromID(nid)
+ _, err := common.APIRequest("GET",
+ "http://"+net.JoinHostPort(nip, httpPort)+common.APIIsRaftSynced+"/"+nsInfo.GetDesp(),
+ nil, cluster.APILongTo, nil)
+ if err != nil {
+ cluster.CoordLog().Infof("failed (%v) to check sync state for namespace %v: %v", nip, nsInfo.GetDesp(), err)
+ return false, err
+ }
+ return true, nil
+}
+
type DataPlacement struct {
balanceInterval [2]int32
+ balanceVer string
pdCoord *PDCoordinator
}
@@ -221,16 +241,20 @@ func (dp *DataPlacement) addNodeToNamespaceAndWaitReady(monitorChan chan struct{
currentSelect := 0
namespaceName := namespaceInfo.Name
partitionID := namespaceInfo.Partition
- // since we need add new catchup, we make the replica as replica+1
+ oldParts, coordErr := dp.getCurrentPartitionNodes(namespaceInfo.Name)
+ if coordErr != nil {
+ return namespaceInfo, coordErr.ToErrorType()
+ }
partitionNodes, coordErr := getRebalancedPartitionsFromNameList(
namespaceInfo.Name,
namespaceInfo.PartitionNum,
- namespaceInfo.Replica+1, nodeNameList)
+ namespaceInfo.Replica, oldParts, nodeNameList, dp.balanceVer)
if coordErr != nil {
return namespaceInfo, coordErr.ToErrorType()
}
fullName := namespaceInfo.GetDesp()
selectedCatchup := make([]string, 0)
+ // we need add new catchup, choose from the the diff between the new isr and old isr
for _, nid := range partitionNodes[namespaceInfo.Partition] {
if cluster.FindSlice(namespaceInfo.RaftNodes, nid) != -1 {
// already isr, ignore add catchup
@@ -306,10 +330,14 @@ func (dp *DataPlacement) allocNodeForNamespace(namespaceInfo *cluster.PartitionM
return nil, cluster.ErrRegisterServiceUnstable
}
+ oldParts, coordErr := dp.getCurrentPartitionNodes(namespaceInfo.Name)
+ if coordErr != nil {
+ return nil, coordErr
+ }
partitionNodes, err := getRebalancedNamespacePartitions(
namespaceInfo.Name,
namespaceInfo.PartitionNum,
- namespaceInfo.Replica, currentNodes)
+ namespaceInfo.Replica, oldParts, currentNodes, dp.balanceVer)
if err != nil {
return nil, err
}
@@ -344,10 +372,15 @@ func (dp *DataPlacement) checkNamespaceNodeConflict(namespaceInfo *cluster.Parti
func (dp *DataPlacement) allocNamespaceRaftNodes(ns string, currentNodes map[string]cluster.NodeInfo,
replica int, partitionNum int, existPart map[int]*cluster.PartitionMetaInfo) ([]cluster.PartitionReplicaInfo, *cluster.CoordErr) {
replicaList := make([]cluster.PartitionReplicaInfo, partitionNum)
+
+ oldParts, coordErr := dp.getCurrentPartitionNodes(ns)
+ if coordErr != nil {
+ return nil, coordErr
+ }
partitionNodes, err := getRebalancedNamespacePartitions(
ns,
partitionNum,
- replica, currentNodes)
+ replica, oldParts, currentNodes, dp.balanceVer)
if err != nil {
return nil, err
}
@@ -371,6 +404,26 @@ func (dp *DataPlacement) allocNamespaceRaftNodes(ns string, currentNodes map[str
return replicaList, nil
}
+func (dp *DataPlacement) getCurrentPartitionNodes(ns string) ([][]string, *cluster.CoordErr) {
+ if dp.pdCoord == nil || dp.pdCoord.register == nil {
+ return nil, cluster.ErrNoCoordRegister
+ }
+ allNamespaces, _, err := dp.pdCoord.register.GetAllNamespaces()
+ if err != nil {
+ cluster.CoordLog().Infof("scan namespaces error: %v", err)
+ return nil, cluster.NewCoordErr(err.Error(), cluster.CoordTmpErr)
+ }
+ nsInfos, _ := allNamespaces[ns]
+ partNodes := make([][]string, 0)
+ for pid, part := range nsInfos {
+ if pid >= len(partNodes) {
+ partNodes = append(partNodes, make([][]string, pid-len(partNodes)+1)...)
+ }
+ partNodes[pid] = part.GetISR()
+ }
+ return partNodes, nil
+}
+
func (dp *DataPlacement) rebalanceNamespace(monitorChan chan struct{}) (bool, bool) {
moved := false
isAllBalanced := false
@@ -399,6 +452,11 @@ func (dp *DataPlacement) rebalanceNamespace(monitorChan chan struct{}) (bool, bo
return moved, false
default:
}
+ // only balance at given interval, check again here since last move may cost a long time
+ if time.Now().Hour() > int(atomic.LoadInt32(&dp.balanceInterval[1])) ||
+ time.Now().Hour() < int(atomic.LoadInt32(&dp.balanceInterval[0])) {
+ return moved, false
+ }
if !dp.pdCoord.IsClusterStable() {
return moved, false
}
@@ -419,6 +477,11 @@ func (dp *DataPlacement) rebalanceNamespace(monitorChan chan struct{}) (bool, bo
cluster.CoordLog().Infof("namespace %v isr is not full ready while balancing", namespaceInfo.GetDesp())
continue
}
+
+ oldParts, coordErr := dp.getCurrentPartitionNodes(namespaceInfo.Name)
+ if coordErr != nil {
+ continue
+ }
currentNodes := dp.pdCoord.getCurrentNodes(namespaceInfo.Tags)
nodeNameList := getNodeNameList(currentNodes)
cluster.CoordLog().Debugf("node name list: %v", nodeNameList)
@@ -426,7 +489,7 @@ func (dp *DataPlacement) rebalanceNamespace(monitorChan chan struct{}) (bool, bo
partitionNodes, err := getRebalancedNamespacePartitions(
namespaceInfo.Name,
namespaceInfo.PartitionNum,
- namespaceInfo.Replica, currentNodes)
+ namespaceInfo.Replica, oldParts, currentNodes, dp.balanceVer)
if err != nil {
isAllBalanced = false
continue
@@ -464,8 +527,17 @@ func (dp *DataPlacement) rebalanceNamespace(monitorChan chan struct{}) (bool, bo
coordErr := dp.pdCoord.removeNamespaceFromNode(&namespaceInfo, nid)
moved = true
if coordErr != nil {
+ cluster.CoordLog().Infof("namespace %v removing node: %v failed while balance",
+ namespaceInfo.GetDesp(), nid)
return moved, false
}
+ // Move only one node in a loop, we need wait the removing node be removed from raft group.
+ break
+ }
+ if len(namespaceInfo.Removings) > 0 {
+ // need wait moved node be removed from raft group
+ isAllBalanced = false
+ continue
}
expectLeader := partitionNodes[namespaceInfo.Partition][0]
if _, ok := namespaceInfo.Removings[expectLeader]; ok {
@@ -521,7 +593,8 @@ func (s SortableStrings) Swap(l, r int) {
func getRebalancedNamespacePartitions(ns string,
partitionNum int, replica int,
- currentNodes map[string]cluster.NodeInfo) ([][]string, *cluster.CoordErr) {
+ oldPartitionNodes [][]string,
+ currentNodes map[string]cluster.NodeInfo, balanceVer string) ([][]string, *cluster.CoordErr) {
if len(currentNodes) < replica {
return nil, ErrNodeUnavailable
}
@@ -535,6 +608,7 @@ func getRebalancedNamespacePartitions(ns string,
// start from the index of the current node array
// 4. for next partition, start from the next index of node array.
// l -> leader, f-> follower
+ // 5. In this way, it may happend some nodes have the replicas more than average, we think it is ok if there are many partitions
// nodeA nodeB nodeC nodeD
// p1 l f f
// p2 l f f
@@ -560,16 +634,47 @@ func getRebalancedNamespacePartitions(ns string,
// p9 l x f x-f
// p10 x-f x f-l f
+ // if using the V2 balanced, we choose as below
+ // for each partition choose the leader and replicas one by one by, selecte the least leader/replica node for now
+ // nodeA nodeB nodeC nodeD
+ // p1 l f f
+ // p2 f f l
+ // p3 f l f
+ // p4 l f f
+ // p5 l f f
+ // p6 f f l
+ // p7 f l f
+ // p8 l f f
+ // p9 l f f
+ // p10 f f l
+
+ // after nodeB is down, the migration as bellow
+ // we keep old unchanged, and if the replica is on the failed node, we move leader or replica to the least leader/replica node
+ // and if this make the leader/replica unbalanced, we try move them by select the least node again.
+ // nodeA xxxx nodeC nodeD
+ // p1 l-f x f-l x-f
+ // p2 f x x-f l
+ // p3 f l f
+ // p4 x-l x f f
+ // p5 l x f x-f
+ // p6 f x x-f l
+ // p7 f l f
+ // p8 x-l x f f
+ // p9 l x f x-f
+ // p10 f x x-f l
+ // unbalanced, we continue balance leader
+
// if there are several data centers, we sort them one by one as below
// nodeA1@dc1 nodeA2@dc2 nodeA3@dc3 nodeB1@dc1 nodeB2@dc2 nodeB3@dc3
nodeNameList := getNodeNameList(currentNodes)
- return getRebalancedPartitionsFromNameList(ns, partitionNum, replica, nodeNameList)
+ return getRebalancedPartitionsFromNameList(ns, partitionNum, replica, oldPartitionNodes, nodeNameList, balanceVer)
}
func getRebalancedPartitionsFromNameList(ns string,
partitionNum int, replica int,
- nodeNameList []SortableStrings) ([][]string, *cluster.CoordErr) {
+ oldPartitionNodes [][]string,
+ nodeNameList []SortableStrings, balanceVer string) ([][]string, *cluster.CoordErr) {
var combined SortableStrings
sortedNodeNameList := make([]SortableStrings, 0, len(nodeNameList))
@@ -596,27 +701,320 @@ func getRebalancedPartitionsFromNameList(ns string,
sortedNodeNameList[idx%len(sortedNodeNameList)] = nList[1:]
idx++
}
+
+ if balanceVer == BalanceV2Str {
+ return fillPartitionMapV2(ns, partitionNum, replica, oldPartitionNodes, combined), nil
+ }
+ return fillPartitionMapV1(ns, partitionNum, replica, combined), nil
+}
+
+func fillPartitionMapV1(ns string,
+ partitionNum int, replica int,
+ sortedNodes SortableStrings) [][]string {
+
partitionNodes := make([][]string, partitionNum)
selectIndex := int(murmur3.Sum32([]byte(ns)))
for i := 0; i < partitionNum; i++ {
nlist := make([]string, replica)
partitionNodes[i] = nlist
for j := 0; j < replica; j++ {
- nlist[j] = combined[(selectIndex+j)%len(combined)]
+ nlist[j] = sortedNodes[(selectIndex+j)%len(sortedNodes)]
}
selectIndex++
}
+ return partitionNodes
+ // maybe check if any node has too much replicas than average
+ // to avoid move data, we do not do balance here.(use v2 balance instead if need more balanced)
+ //
+}
- return partitionNodes, nil
+type loadItem struct {
+ name string
+ nameIndex int
+ leaderPids []int
+ replicaPids []int
+}
+
+func loadItemLeaderCmp(l interface{}, r interface{}) int {
+ li := l.(loadItem)
+ ri := r.(loadItem)
+ if len(li.leaderPids) == len(ri.leaderPids) && len(li.replicaPids) == len(ri.replicaPids) {
+ return utils.IntComparator(li.nameIndex, ri.nameIndex)
+ }
+ if len(li.leaderPids) == len(ri.leaderPids) {
+ return utils.IntComparator(len(li.replicaPids), len(ri.replicaPids))
+ }
+ return utils.IntComparator(len(li.leaderPids), len(ri.leaderPids))
+}
+
+func loadItemReplicaCmp(l interface{}, r interface{}) int {
+ li := l.(loadItem)
+ ri := r.(loadItem)
+ if len(li.replicaPids) == len(ri.replicaPids) {
+ return utils.IntComparator(li.nameIndex, ri.nameIndex)
+ }
+ return utils.IntComparator(len(li.replicaPids), len(ri.replicaPids))
+}
+
+func getMinMaxLoadForLeader(leaders map[string][]int, replicas map[string][]int, exclude []string, nameIndexMap map[string]int) (loadItem, loadItem) {
+ m := treemap.NewWith(loadItemLeaderCmp)
+ for name, lpids := range leaders {
+ ignore := false
+ for _, ex := range exclude {
+ if name == ex {
+ ignore = true
+ break
+ }
+ }
+ if ignore {
+ continue
+ }
+ rpids, _ := replicas[name]
+ m.Put(loadItem{
+ name: name,
+ nameIndex: nameIndexMap[name],
+ leaderPids: lpids,
+ replicaPids: rpids,
+ }, name)
+ }
+ // Do we need copy the pid list for leader and replica?
+ mm, _ := m.Min()
+ min := mm.(loadItem)
+ mmax, _ := m.Max()
+ max := mmax.(loadItem)
+ return min, max
+}
+
+func getMinMaxLoadForReplica(replicas map[string][]int, exclude []string, nameIndexMap map[string]int) (loadItem, loadItem) {
+ m := treemap.NewWith(loadItemReplicaCmp)
+ for name, rpids := range replicas {
+ ignore := false
+ for _, ex := range exclude {
+ if name == ex {
+ ignore = true
+ break
+ }
+ }
+ if ignore {
+ continue
+ }
+ m.Put(loadItem{
+ name: name,
+ nameIndex: nameIndexMap[name],
+ replicaPids: rpids,
+ }, name)
+ }
+ min, _ := m.Min()
+ max, _ := m.Max()
+ return min.(loadItem), max.(loadItem)
+}
+
+func fillPartitionMapV2(ns string,
+ partitionNum int, replica int,
+ oldPartitionNodes [][]string,
+ sortedNodes SortableStrings) [][]string {
+
+ newNodesReplicaMap := make(map[string][]int)
+ newNodesLeaderMap := make(map[string][]int)
+ nameIndexMap := make(map[string]int, len(sortedNodes))
+ // hash namespace name to random the init chosen index, to avoid all namespace use the same init chosen index for sorted nodes
+ selectIndex := int(murmur3.Sum32([]byte(ns)))
+ for i, n := range sortedNodes {
+ nameIndexMap[n] = (i + selectIndex) % len(sortedNodes)
+ newNodesReplicaMap[n] = make([]int, 0)
+ newNodesLeaderMap[n] = make([]int, 0)
+ }
+ for pid, olist := range oldPartitionNodes {
+ for i, name := range olist {
+ if i == 0 {
+ pidlist, ok := newNodesLeaderMap[name]
+ if ok {
+ pidlist = append(pidlist, pid)
+ newNodesLeaderMap[name] = pidlist
+ }
+ }
+ pidlist, ok := newNodesReplicaMap[name]
+ if ok {
+ pidlist = append(pidlist, pid)
+ newNodesReplicaMap[name] = pidlist
+ }
+ }
+ }
+
+ partitionNodes := make([][]string, partitionNum)
+ // check and reuse the old first and if no old found, we choose the least load on current and update the load
+ for pid := 0; pid < partitionNum; pid++ {
+ var oldlist []string
+ if pid < len(oldPartitionNodes) {
+ oldlist = oldPartitionNodes[pid]
+ }
+ nlist := make([]string, replica)
+ partitionNodes[pid] = nlist
+ exclude := make([]string, 0)
+ exclude = append(exclude, oldlist...)
+ for j := 0; j < replica; j++ {
+ var old string
+ if len(oldlist) > j {
+ old = oldlist[j]
+ }
+ if j == 0 {
+ // check if old leader still alive for leader
+ _, ok := newNodesLeaderMap[old]
+ if ok {
+ nlist[j] = old
+ continue
+ }
+ nleader, _ := getMinMaxLoadForLeader(newNodesLeaderMap, newNodesReplicaMap, exclude, nameIndexMap)
+ newNodesLeaderMap[nleader.name] = append(nleader.leaderPids, pid)
+ newNodesReplicaMap[nleader.name] = append(nleader.replicaPids, pid)
+ nlist[j] = nleader.name
+ exclude = append(exclude, nlist[j])
+ continue
+ }
+ _, ok := newNodesReplicaMap[old]
+ if ok {
+ nlist[j] = old
+ continue
+ }
+ nreplica, _ := getMinMaxLoadForReplica(newNodesReplicaMap, exclude, nameIndexMap)
+ newNodesReplicaMap[nreplica.name] = append(nreplica.replicaPids, pid)
+ nlist[j] = nreplica.name
+ exclude = append(exclude, nlist[j])
+ }
+ }
+ // move if unbalanced
+ balanced := false
+ maxMoved := replica * partitionNum
+ for !balanced {
+ partitionNodes, balanced = moveIfUnbalanced(nameIndexMap, newNodesLeaderMap,
+ newNodesReplicaMap, partitionNodes)
+ maxMoved--
+ if maxMoved < 0 {
+ cluster.CoordLog().Warningf("balance moved too much times: %v", partitionNodes)
+ break
+ }
+ }
+ return partitionNodes
+}
+
+func findPidInList(pid int, l []int) bool {
+ for _, p := range l {
+ if p == pid {
+ return true
+ }
+ }
+ return false
+}
+
+func removePidFromList(pid int, l []int) []int {
+ nl := make([]int, 0, len(l)-1)
+ for _, p := range l {
+ if p == pid {
+ continue
+ }
+ nl = append(nl, p)
+ }
+ return nl
+}
+
+func replaceReplicaWith(replicas []string, oldR string, newR string) {
+ for i := 0; i < len(replicas); i++ {
+ if replicas[i] == oldR {
+ // move to min
+ replicas[i] = newR
+ break
+ }
+ }
+}
+
+// will only move once and return changed replica map and whether we already balanced.
+func moveIfUnbalanced(
+ nameIndexMap map[string]int,
+ newNodesLeaderMap map[string][]int,
+ newNodesReplicaMap map[string][]int,
+ partitionNodes [][]string) ([][]string, bool) {
+ min, max := getMinMaxLoadForLeader(newNodesLeaderMap, newNodesReplicaMap, nil, nameIndexMap)
+ balanced := true
+ if len(max.leaderPids)-len(min.leaderPids) <= 1 {
+ // leader is balanced
+ } else {
+ balanced = false
+ cluster.CoordLog().Infof("balance since too much leaders(max %v-min %v): %v, before move, replicas: %v", len(max.leaderPids),
+ len(min.leaderPids), newNodesLeaderMap, partitionNodes)
+ for _, pid := range max.leaderPids {
+ if findPidInList(pid, min.leaderPids) {
+ continue
+ }
+ if findPidInList(pid, min.replicaPids) {
+ // if have non-leader replica, we can just exchange the leader
+ cluster.CoordLog().Debugf("balance pid %v leaders, just exchange: %v %v", pid, max.name, min.name)
+ nlist := partitionNodes[pid]
+ for index, n := range nlist {
+ if n == min.name {
+ tmp := nlist[0]
+ nlist[0] = min.name
+ nlist[index] = tmp
+ break
+ }
+ }
+ } else {
+ replaceReplicaWith(partitionNodes[pid], max.name, min.name)
+ min.replicaPids = append(min.replicaPids, pid)
+ newNodesReplicaMap[min.name] = min.replicaPids
+ newNodesReplicaMap[max.name] = removePidFromList(pid, max.replicaPids)
+ cluster.CoordLog().Debugf("balance pid %v leaders, move: %v %v", pid, max.name, min.name)
+ }
+ min.leaderPids = append(min.leaderPids, pid)
+ newNodesLeaderMap[min.name] = min.leaderPids
+ newNodesLeaderMap[max.name] = removePidFromList(pid, max.leaderPids)
+ break
+ }
+ cluster.CoordLog().Infof("after moved(max %v-min %v), replicas: %v", len(max.leaderPids),
+ len(min.leaderPids), partitionNodes)
+ return partitionNodes, balanced
+ }
+
+ min, max = getMinMaxLoadForReplica(newNodesReplicaMap, nil, nameIndexMap)
+ // note, the leaderPids is empty since we do not care the load of leader while moving replicas
+ if len(max.replicaPids)-len(min.replicaPids) <= 1 {
+ // replica is balanced
+ } else {
+ balanced = false
+ cluster.CoordLog().Infof("balance since too much replicas(max %v-min %v): %v, before move, replicas: %v",
+ len(max.replicaPids), len(min.replicaPids), newNodesReplicaMap, partitionNodes)
+ for _, pid := range max.replicaPids {
+ if findPidInList(pid, min.replicaPids) {
+ continue
+ }
+ // do not move leader since we will check above
+ if findPidInList(pid, newNodesLeaderMap[max.name]) {
+ continue
+ }
+ cluster.CoordLog().Debugf("balance pid %v replicas, move: %v %v", pid, max.name, min.name)
+ replaceReplicaWith(partitionNodes[pid], max.name, min.name)
+ min.replicaPids = append(min.replicaPids, pid)
+ newNodesReplicaMap[min.name] = min.replicaPids
+ newNodesReplicaMap[max.name] = removePidFromList(pid, max.replicaPids)
+ break
+ }
+ cluster.CoordLog().Infof("after moved(max %v- min %v), replicas: %v", len(max.replicaPids),
+ len(min.replicaPids), partitionNodes)
+ return partitionNodes, balanced
+ }
+ return partitionNodes, balanced
}
func (dp *DataPlacement) decideUnwantedRaftNode(namespaceInfo *cluster.PartitionMetaInfo, currentNodes map[string]cluster.NodeInfo) string {
unwantedNode := ""
+ oldParts, coordErr := dp.getCurrentPartitionNodes(namespaceInfo.Name)
+ if coordErr != nil {
+ return unwantedNode
+ }
//remove the unwanted node in isr
partitionNodes, err := getRebalancedNamespacePartitions(
namespaceInfo.Name,
namespaceInfo.PartitionNum,
- namespaceInfo.Replica, currentNodes)
+ namespaceInfo.Replica, oldParts, currentNodes, dp.balanceVer)
if err != nil {
return unwantedNode
}
diff --git a/cluster/pdnode_coord/place_driver_test.go b/cluster/pdnode_coord/place_driver_test.go
index edc946c8..81b3ae2f 100644
--- a/cluster/pdnode_coord/place_driver_test.go
+++ b/cluster/pdnode_coord/place_driver_test.go
@@ -1,13 +1,103 @@
package pdnode_coord
import (
+ "math"
"testing"
+ "time"
- "github.com/absolute8511/ZanRedisDB/cluster"
"github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/cluster"
)
-func TestClusterNodesPlacementAcrossDC(t *testing.T) {
+type testLogger struct {
+ t *testing.T
+}
+
+func newTestLogger(t *testing.T) *testLogger {
+ return &testLogger{t: t}
+}
+
+func (l *testLogger) Output(maxdepth int, s string) error {
+ l.t.Logf("%v:%v", time.Now().UnixNano(), s)
+ return nil
+}
+
+func (l *testLogger) OutputErr(maxdepth int, s string) error {
+ l.t.Logf("%v:%v", time.Now().UnixNano(), s)
+ return nil
+}
+
+func (l *testLogger) OutputWarning(maxdepth int, s string) error {
+ l.t.Logf("%v:%v", time.Now().UnixNano(), s)
+ return nil
+}
+
+func checkPartitionNodesBalance(t *testing.T, balanceVer string, partitionNodes [][]string) {
+ replicaNodesMap := make(map[string]int)
+ leaderNodesMap := make(map[string]int)
+ for _, nlist := range partitionNodes {
+ l := nlist[0]
+ cnt, ok := leaderNodesMap[l]
+ if !ok {
+ cnt = 0
+ }
+ cnt++
+ leaderNodesMap[l] = cnt
+ nameMap := make(map[string]bool)
+ for _, n := range nlist {
+ nameMap[n] = true
+ cnt, ok = replicaNodesMap[n]
+ if !ok {
+ cnt = 0
+ }
+ cnt++
+ replicaNodesMap[n] = cnt
+ }
+ assert.Equal(t, len(nlist), len(nameMap), nlist)
+ }
+
+ maxL := 0
+ minL := math.MaxInt32
+ for _, cnt := range leaderNodesMap {
+ if cnt > maxL {
+ maxL = cnt
+ }
+ if cnt < minL {
+ minL = cnt
+ }
+ }
+ t.Logf("leader max vs min: %v, %v", maxL, minL)
+ assert.True(t, maxL-minL <= 1, partitionNodes)
+
+ maxL = 0
+ minL = math.MaxInt32
+ for _, cnt := range replicaNodesMap {
+ if cnt > maxL {
+ maxL = cnt
+ }
+ if cnt < minL {
+ minL = cnt
+ }
+ }
+ t.Logf("replica max vs min: %v, %v", maxL, minL)
+ if balanceVer == "" {
+ // default balance may not have balanced replicas
+ assert.True(t, maxL-minL <= 3, partitionNodes)
+ return
+ }
+ assert.True(t, maxL-minL <= 1, partitionNodes)
+}
+
+func TestClusterNodesPlacementAcrossDCV1(t *testing.T) {
+ testClusterNodesPlacementAcrossDC(t, "")
+}
+
+func TestClusterNodesPlacementAcrossDCV2(t *testing.T) {
+ testClusterNodesPlacementAcrossDC(t, "v2")
+}
+
+func testClusterNodesPlacementAcrossDC(t *testing.T, balanceVer string) {
+ cluster.SetLogger(2, newTestLogger(t))
nodes := make(map[string]cluster.NodeInfo)
dc1Nodes := SortableStrings{"11", "12", "13", "14", "15", "16"}
@@ -26,24 +116,29 @@ func TestClusterNodesPlacementAcrossDC(t *testing.T) {
n.Tags[cluster.DCInfoTag] = "2"
nodes[nid] = n
}
+ partitionNum := 8
+ replicator := 3
nodeNameList := getNodeNameList(nodes)
assert.Equal(t, nodeNameList, getNodeNameList(nodes))
t.Log(nodeNameList)
placementNodes, err := getRebalancedPartitionsFromNameList("test",
- 8, 3, nodeNameList)
+ partitionNum, replicator, nil, nodeNameList, balanceVer)
assert.Nil(t, err)
- assert.Equal(t, 8, len(placementNodes))
t.Log(placementNodes)
t.Log(nodeNameList)
+ assert.Equal(t, partitionNum, len(placementNodes))
+ checkPartitionNodesBalance(t, balanceVer, placementNodes)
placementNodes2, err := getRebalancedPartitionsFromNameList("test",
- 8, 3, nodeNameList)
+ partitionNum, replicator, placementNodes, nodeNameList, balanceVer)
assert.Nil(t, err)
+ t.Log(placementNodes2)
assert.Equal(t, placementNodes, placementNodes2)
t.Log(nodeNameList)
+ checkPartitionNodesBalance(t, balanceVer, placementNodes2)
for _, v := range placementNodes {
- assert.Equal(t, 3, len(v))
+ assert.Equal(t, replicator, len(v))
dc1Cnt := 0
dc2Cnt := 0
for _, n := range v {
@@ -54,13 +149,13 @@ func TestClusterNodesPlacementAcrossDC(t *testing.T) {
dc2Cnt++
}
}
- assert.Equal(t, true, dc1Cnt > 0)
- assert.Equal(t, true, dc2Cnt > 0)
+ assert.Equal(t, true, dc1Cnt > 0, v)
+ assert.Equal(t, true, dc2Cnt > 0, v)
diff := dc2Cnt - dc1Cnt
if diff < 0 {
diff = -1 * diff
}
- assert.Equal(t, 1, diff)
+ assert.Equal(t, 1, diff, v)
}
dc1Nodes = SortableStrings{"11", "12", "13", "14", "15", "16"}
dc2Nodes = SortableStrings{"21", "22", "23", "24", "25", "26"}
@@ -69,12 +164,13 @@ func TestClusterNodesPlacementAcrossDC(t *testing.T) {
nodeNameList = make([]SortableStrings, 0)
nodeNameList = append(nodeNameList, dc1Nodes, dc2Nodes, dc3Nodes)
placementNodes, err = getRebalancedPartitionsFromNameList("test",
- 8, 3, nodeNameList)
+ partitionNum, replicator, nil, nodeNameList, balanceVer)
assert.Nil(t, err)
- assert.Equal(t, 8, len(placementNodes))
t.Log(placementNodes)
+ assert.Equal(t, partitionNum, len(placementNodes))
+ checkPartitionNodesBalance(t, balanceVer, placementNodes)
for _, v := range placementNodes {
- assert.Equal(t, 3, len(v))
+ assert.Equal(t, replicator, len(v))
dc1Cnt := 0
dc2Cnt := 0
dc3Cnt := 0
@@ -101,13 +197,14 @@ func TestClusterNodesPlacementAcrossDC(t *testing.T) {
nodeNameList = make([]SortableStrings, 0)
nodeNameList = append(nodeNameList, dc1Nodes, dc2Nodes)
placementNodes, err = getRebalancedPartitionsFromNameList("test",
- 8, 3, nodeNameList)
+ partitionNum, replicator, nil, nodeNameList, balanceVer)
assert.Nil(t, err)
- assert.Equal(t, 8, len(placementNodes))
t.Log(placementNodes)
+ assert.Equal(t, partitionNum, len(placementNodes))
+ checkPartitionNodesBalance(t, balanceVer, placementNodes)
dc2NoReplicas := 0
for _, v := range placementNodes {
- assert.Equal(t, 3, len(v))
+ assert.Equal(t, replicator, len(v))
dc1Cnt := 0
dc2Cnt := 0
for _, n := range v {
@@ -119,14 +216,15 @@ func TestClusterNodesPlacementAcrossDC(t *testing.T) {
}
}
assert.Equal(t, true, dc1Cnt > 0)
- if dc2NoReplicas >= 2 {
+ if dc2NoReplicas >= 3 {
assert.Equal(t, true, dc2Cnt > 0)
}
if dc2Cnt == 0 {
dc2NoReplicas++
}
}
- assert.Equal(t, 2, dc2NoReplicas)
+ assert.True(t, dc2NoReplicas >= 2)
+ assert.True(t, dc2NoReplicas <= 3)
dc1Nodes = SortableStrings{"11", "12", "13", "14", "15", "16"}
dc2Nodes = SortableStrings{}
@@ -134,24 +232,233 @@ func TestClusterNodesPlacementAcrossDC(t *testing.T) {
nodeNameList = make([]SortableStrings, 0)
nodeNameList = append(nodeNameList, dc1Nodes, dc2Nodes)
placementNodes, err = getRebalancedPartitionsFromNameList("test",
- 8, 3, nodeNameList)
+ partitionNum, replicator, nil, nodeNameList, balanceVer)
assert.Nil(t, err)
- assert.Equal(t, 8, len(placementNodes))
t.Log(placementNodes)
+ assert.Equal(t, partitionNum, len(placementNodes))
+ checkPartitionNodesBalance(t, balanceVer, placementNodes)
for _, v := range placementNodes {
- assert.Equal(t, 3, len(v))
+ assert.Equal(t, replicator, len(v))
}
dc1Nodes = SortableStrings{"11", "12", "13", "14", "15", "16"}
nodeNameList = make([]SortableStrings, 0)
nodeNameList = append(nodeNameList, dc1Nodes)
placementNodes2, err = getRebalancedPartitionsFromNameList("test",
- 8, 3, nodeNameList)
+ partitionNum, replicator, nil, nodeNameList, balanceVer)
t.Log(placementNodes2)
assert.Nil(t, err)
- assert.Equal(t, 8, len(placementNodes2))
+ assert.Equal(t, partitionNum, len(placementNodes2))
+ checkPartitionNodesBalance(t, balanceVer, placementNodes2)
for _, v := range placementNodes2 {
- assert.Equal(t, 3, len(v))
+ assert.Equal(t, replicator, len(v))
}
assert.Equal(t, placementNodes, placementNodes2)
+ checkPartitionNodesBalance(t, balanceVer, placementNodes2)
+}
+
+func TestClusterNodesPlacementWithMigrateV1(t *testing.T) {
+ testClusterNodesPlacementWithMigrateIn1DC(t, "")
+ testClusterNodesPlacementWithMigrateIn2DC(t, "")
+}
+func TestClusterNodesPlacementWithMigrateV2(t *testing.T) {
+ testClusterNodesPlacementWithMigrateIn1DC(t, "v2")
+ testClusterNodesPlacementWithMigrateIn2DC(t, "v2")
+}
+
+func computeTheMigrateCost(oldParts [][]string, newParts [][]string) (int, int, int) {
+ leaderChanged := 0
+ replicaAdded := 0
+ replicaDeleted := 0
+ for pid := 0; pid < len(oldParts); pid++ {
+ olist := oldParts[pid]
+ nlist := newParts[pid]
+ if olist[0] != nlist[0] {
+ leaderChanged++
+ }
+ // replica may more or less while change the replicator
+ omap := make(map[string]bool)
+ for _, o := range olist {
+ omap[o] = true
+ }
+ // remove the same
+ unchanged := 0
+ for _, n := range nlist {
+ _, ok := omap[n]
+ if ok {
+ delete(omap, n)
+ unchanged++
+ }
+ }
+ replicaDeleted += len(omap)
+ replicaAdded += len(nlist) - unchanged
+ }
+ return leaderChanged, replicaAdded, replicaDeleted
+}
+
+func genClusterNodes(names SortableStrings, dc string, cnodes map[string]cluster.NodeInfo) map[string]cluster.NodeInfo {
+ if cnodes == nil {
+ cnodes = make(map[string]cluster.NodeInfo)
+ }
+ for _, nid := range names {
+ var n cluster.NodeInfo
+ n.ID = nid
+ n.Tags = make(map[string]interface{})
+ n.Tags[cluster.DCInfoTag] = dc
+ cnodes[nid] = n
+ }
+ return cnodes
+}
+
+func testClusterNodesPlacementWithMigrateIn2DC(t *testing.T, balanceVer string) {
+ dc1Nodes := SortableStrings{"110", "130", "150", "170", "190"}
+ dc2Nodes := SortableStrings{"210", "230", "250", "270", "290"}
+ addedList1 := SortableStrings{"100", "120", "140", "160", "180", "199"}
+ addedList2 := SortableStrings{"200", "220", "240", "260", "280", "299"}
+ testClusterNodesPlacementWithMigrate(t, balanceVer, dc1Nodes, dc2Nodes, addedList1, addedList2)
+}
+
+func testClusterNodesPlacementWithMigrateIn1DC(t *testing.T, balanceVer string) {
+ dc1Nodes := SortableStrings{"110", "120", "130", "140", "150"}
+ dc2Nodes := SortableStrings{}
+ addedList1 := SortableStrings{"100", "115", "125", "135", "145", "155"}
+ addedList2 := SortableStrings{}
+ testClusterNodesPlacementWithMigrate(t, balanceVer, dc1Nodes, dc2Nodes, addedList1, addedList2)
+}
+
+func testClusterNodesPlacementWithMigrate(t *testing.T, balanceVer string,
+ dc1Nodes SortableStrings, dc2Nodes SortableStrings,
+ addedList1 SortableStrings, addedList2 SortableStrings) {
+
+ cluster.SetLogger(2, newTestLogger(t))
+ nodes := make(map[string]cluster.NodeInfo)
+ nodes = genClusterNodes(dc1Nodes, "1", nodes)
+ nodes = genClusterNodes(dc2Nodes, "2", nodes)
+
+ partitionNum := 32
+ replicator := 3
+ nodeNameList := getNodeNameList(nodes)
+ placementNodes, err := getRebalancedPartitionsFromNameList("test",
+ partitionNum, replicator, nil, nodeNameList, balanceVer)
+ t.Log(placementNodes)
+ assert.Nil(t, err)
+ assert.Equal(t, partitionNum, len(placementNodes))
+ checkPartitionNodesBalance(t, balanceVer, placementNodes)
+
+ allNodes := make([]cluster.NodeInfo, 0)
+ for _, nlist := range nodeNameList {
+ for _, n := range nlist {
+ allNodes = append(allNodes, nodes[n])
+ }
+ }
+ // try remove any of node, and check the migrated partitions
+ for removeIndex := 0; removeIndex < len(allNodes); removeIndex++ {
+ newNodes := make(map[string]cluster.NodeInfo)
+ for i, v := range allNodes {
+ if i == removeIndex {
+ continue
+ }
+ newNodes[v.ID] = v
+ }
+ nodeNameList := getNodeNameList(newNodes)
+ placementNodes2, err := getRebalancedPartitionsFromNameList("test",
+ partitionNum, replicator, placementNodes, nodeNameList, balanceVer)
+ t.Log(placementNodes2)
+ assert.Nil(t, err)
+ assert.Equal(t, partitionNum, len(placementNodes2))
+ checkPartitionNodesBalance(t, balanceVer, placementNodes2)
+ lcost, fadded, deleted := computeTheMigrateCost(placementNodes, placementNodes2)
+ t.Logf("%v remove, leader changed: %v, replica added: %v, deleted: %v", removeIndex, lcost, fadded, deleted)
+ }
+ // try add node to any of the sorted position, and check the migrated partitions
+ for _, added := range addedList1 {
+ newNodes := make(map[string]cluster.NodeInfo)
+ for _, v := range allNodes {
+ newNodes[v.ID] = v
+ }
+ var n cluster.NodeInfo
+ n.ID = added
+ n.Tags = make(map[string]interface{})
+ n.Tags[cluster.DCInfoTag] = "1"
+ newNodes[added] = n
+ nodeNameList := getNodeNameList(newNodes)
+ placementNodes2, err := getRebalancedPartitionsFromNameList("test",
+ partitionNum, replicator, placementNodes, nodeNameList, balanceVer)
+ t.Log(placementNodes2)
+ assert.Nil(t, err)
+ assert.Equal(t, partitionNum, len(placementNodes2))
+ checkPartitionNodesBalance(t, balanceVer, placementNodes2)
+ lcost, fadded, deleted := computeTheMigrateCost(placementNodes, placementNodes2)
+ t.Logf("%v added, leader changed: %v, replica added: %v, deleted: %v", added, lcost, fadded, deleted)
+ }
+ for _, added := range addedList2 {
+ newNodes := make(map[string]cluster.NodeInfo)
+ for _, v := range allNodes {
+ newNodes[v.ID] = v
+ }
+ var n cluster.NodeInfo
+ n.ID = added
+ n.Tags = make(map[string]interface{})
+ n.Tags[cluster.DCInfoTag] = "2"
+ newNodes[added] = n
+ nodeNameList := getNodeNameList(newNodes)
+ placementNodes2, err := getRebalancedPartitionsFromNameList("test",
+ partitionNum, replicator, placementNodes, nodeNameList, balanceVer)
+ t.Log(placementNodes2)
+ assert.Nil(t, err)
+ assert.Equal(t, partitionNum, len(placementNodes2))
+ checkPartitionNodesBalance(t, balanceVer, placementNodes2)
+ lcost, fadded, deleted := computeTheMigrateCost(placementNodes, placementNodes2)
+ t.Logf("%v added, leader changed: %v, replica added: %v, deleted: %v", added, lcost, fadded, deleted)
+ }
+
+ // try increase replica, and check the migrated partitions
+ placementNodes2, err := getRebalancedPartitionsFromNameList("test",
+ partitionNum, replicator+1, placementNodes, nodeNameList, balanceVer)
+ t.Log(placementNodes2)
+ assert.Nil(t, err)
+ assert.Equal(t, partitionNum, len(placementNodes2))
+ checkPartitionNodesBalance(t, balanceVer, placementNodes2)
+ lcost, fadded, deleted := computeTheMigrateCost(placementNodes, placementNodes2)
+ t.Logf("replica increased, leader changed: %v, replica added: %v, deleted: %v", lcost, fadded, deleted)
+ // try decrease replica, and check the migrated partitions
+ placementNodes2, err = getRebalancedPartitionsFromNameList("test",
+ partitionNum, replicator-1, placementNodes, nodeNameList, balanceVer)
+ t.Log(placementNodes2)
+ assert.Nil(t, err)
+ assert.Equal(t, partitionNum, len(placementNodes2))
+ checkPartitionNodesBalance(t, balanceVer, placementNodes2)
+ lcost, fadded, deleted = computeTheMigrateCost(placementNodes, placementNodes2)
+ t.Logf("replica decreased, leader changed: %v, replica added: %v, deleted: %v", lcost, fadded, deleted)
+}
+
+func TestClusterMigrateWhileBalanceChanged(t *testing.T) {
+ // check change the default balance to v2
+ cluster.SetLogger(2, newTestLogger(t))
+ nodes := make(map[string]cluster.NodeInfo)
+ dc1Nodes := SortableStrings{"110", "120", "130", "140", "150"}
+ nodes = genClusterNodes(dc1Nodes, "1", nodes)
+
+ partitionNum := 8
+ replicator := 3
+ nodeNameList := getNodeNameList(nodes)
+ placementNodes, err := getRebalancedPartitionsFromNameList("test",
+ partitionNum, replicator, nil, nodeNameList, "")
+ t.Log(placementNodes)
+ assert.Nil(t, err)
+ assert.Equal(t, partitionNum, len(placementNodes))
+ checkPartitionNodesBalance(t, "", placementNodes)
+
+ placementNodes2, err := getRebalancedPartitionsFromNameList("test",
+ partitionNum, replicator, placementNodes, nodeNameList, "v2")
+ t.Log(placementNodes2)
+ assert.Nil(t, err)
+ assert.Equal(t, partitionNum, len(placementNodes2))
+ checkPartitionNodesBalance(t, "v2", placementNodes2)
+ lcost, fadded, deleted := computeTheMigrateCost(placementNodes, placementNodes2)
+ t.Logf("leader changed: %v, replica added: %v, deleted: %v", lcost, fadded, deleted)
+ assert.True(t, lcost <= 0, lcost)
+ assert.True(t, fadded <= 1, fadded)
+ assert.True(t, deleted <= 1, deleted)
+ assert.True(t, fadded+deleted > 0, fadded, deleted)
}
diff --git a/cluster/register.go b/cluster/register.go
index 3231be02..b5258c39 100644
--- a/cluster/register.go
+++ b/cluster/register.go
@@ -3,7 +3,7 @@ package cluster
import (
"errors"
- "github.com/absolute8511/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/common"
)
var (
@@ -62,12 +62,22 @@ type NamespaceMetaInfo struct {
SnapCount int
Tags map[string]interface{}
ExpirationPolicy string
+ DataVersion string
}
func (self *NamespaceMetaInfo) MetaEpoch() EpochType {
return self.metaEpoch
}
+func (self *NamespaceMetaInfo) DeepClone() NamespaceMetaInfo {
+ nm := *self
+ nm.Tags = make(map[string]interface{})
+ for k, v := range self.Tags {
+ nm.Tags[k] = v
+ }
+ return nm
+}
+
type RemovingInfo struct {
RemoveTime int64
RemoveReplicaID uint64
@@ -158,22 +168,10 @@ func (self *PartitionMetaInfo) GetRealLeader() string {
func (self *PartitionMetaInfo) GetCopy() *PartitionMetaInfo {
newp := *self
- newp.RaftNodes = make([]string, len(self.RaftNodes))
- copy(newp.RaftNodes, self.RaftNodes)
- newp.RaftIDs = make(map[string]uint64, len(self.RaftIDs))
- for k, v := range self.RaftIDs {
- newp.RaftIDs[k] = v
- }
- newp.Removings = make(map[string]RemovingInfo, len(self.Removings))
- for k, v := range self.Removings {
- newp.Removings[k] = v
- }
- newp.LearnerNodes = make(map[string][]string)
- for k, v := range self.LearnerNodes {
- ln := make([]string, len(v))
- copy(ln, v)
- newp.LearnerNodes[k] = ln
- }
+
+ newp.PartitionReplicaInfo = self.PartitionReplicaInfo.DeepClone()
+ newp.NamespaceMetaInfo = self.NamespaceMetaInfo.DeepClone()
+
return &newp
}
@@ -209,6 +207,11 @@ type Register interface {
GetNamespacesNotifyChan() chan struct{}
GetNamespaceSchemas(ns string) (map[string]SchemaInfo, error)
GetNamespaceTableSchema(ns string, table string) (*SchemaInfo, error)
+ // the saved key should have the node info prefix to avoid conflict with each other data node
+ // if it is designed to shared between data node, should use carefully with concurrent modify
+ // note: the data key will be under the cluster root data path
+ SaveKV(key string, value string) error
+ GetKV(key string) (string, error)
Stop()
}
diff --git a/cluster/register_etcd.go b/cluster/register_etcd.go
index 4e0e5750..237a7e30 100644
--- a/cluster/register_etcd.go
+++ b/cluster/register_etcd.go
@@ -10,9 +10,8 @@ import (
"sync/atomic"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
- etcdlock "github.com/absolute8511/xlock2"
"github.com/coreos/etcd/client"
+ "github.com/youzan/ZanRedisDB/common"
"golang.org/x/net/context"
)
@@ -21,8 +20,8 @@ const (
EVENT_WATCH_L_DELETE
)
-const (
- ETCD_TTL = 60
+var (
+ EtcdTTL = 30
)
const (
@@ -37,6 +36,7 @@ const (
PD_ROOT_DIR = "PDInfo"
PD_NODE_DIR = "PDNodes"
PD_LEADER_SESSION = "PDLeaderSession"
+ DataKVDir = "DataKV"
)
const (
@@ -66,7 +66,7 @@ func isEtcdErrorNum(err error, errorCode int) bool {
return false
}
-func exchangeNodeValue(c *etcdlock.EtcdClient, nodePath string, initValue string,
+func exchangeNodeValue(c *EtcdClient, nodePath string, initValue string,
valueChangeFn func(bool, string) (string, error)) error {
rsp, err := c.Get(nodePath, false, false)
isNew := false
@@ -106,25 +106,113 @@ func exchangeNodeValue(c *etcdlock.EtcdClient, nodePath string, initValue string
return err
}
+func getMaxIndexFromNode(n *client.Node) uint64 {
+ if n == nil {
+ return 0
+ }
+ maxI := n.ModifiedIndex
+ if n.Dir {
+ for _, child := range n.Nodes {
+ index := getMaxIndexFromNode(child)
+ if index > maxI {
+ maxI = index
+ }
+ }
+ }
+ return maxI
+}
+
+func getMaxIndexFromWatchRsp(resp *client.Response) uint64 {
+ if resp == nil {
+ return 0
+ }
+ index := resp.Index
+ if resp.Node != nil {
+ if nmi := getMaxIndexFromNode(resp.Node); nmi > index {
+ index = nmi
+ }
+ }
+ return index
+}
+
+func watchWaitAndDo(ctx context.Context, client *EtcdClient,
+ key string, recursive bool, callback func(rsp *client.Response),
+ watchExpiredCb func()) {
+ initIndex := uint64(0)
+ rsp, err := client.Get(key, false, recursive)
+ if err != nil {
+ coordLog.Errorf("get watched key[%s] error: %s", key, err.Error())
+ } else {
+ if rsp.Index > 0 {
+ initIndex = rsp.Index - 1
+ }
+ callback(rsp)
+ }
+ watcher := client.Watch(key, initIndex, recursive)
+ for {
+ // to avoid dead connection issues, we add timeout for watch connection to wake up watch if too long no
+ // any event
+ start := time.Now()
+ rsp, err := watcher.Next(ctx)
+ if err != nil {
+ if err == context.Canceled {
+ coordLog.Infof("watch key[%s] cancelled.", key)
+ return
+ } else if err == context.DeadlineExceeded {
+ coordLog.Debugf("watcher key[%s] timeout: %s, cost: %s", key, err.Error(), time.Since(start))
+ // since after timeout the cluster index may changed much, we need watch using new index to avoid index expired
+
+ continue
+ } else {
+ // rewatch
+ if IsEtcdWatchExpired(err) {
+ coordLog.Debugf("watcher key[%s] error: %s", key, err.Error())
+ if watchExpiredCb != nil {
+ watchExpiredCb()
+ }
+ rsp, err = client.Get(key, false, false)
+ if err != nil {
+ coordLog.Errorf("rewatch and get key[%s] error: %s", key, err.Error())
+ time.Sleep(time.Second)
+ continue
+ }
+ // watch for v2 client should not +1 on index, since it is the after index (which will +1 in the method of watch)
+ watcher = client.Watch(key, rsp.Index, recursive)
+ // watch expired should be treated as changed of node
+ } else {
+ coordLog.Errorf("watcher key[%s] error: %s", key, err.Error())
+ time.Sleep(5 * time.Second)
+ continue
+ }
+ }
+ }
+ callback(rsp)
+ }
+}
+
type EtcdRegister struct {
nsMutex sync.Mutex
- client *etcdlock.EtcdClient
- clusterID string
- namespaceRoot string
- clusterPath string
- pdNodeRootPath string
- allNamespaceInfos map[string]map[int]PartitionMetaInfo
- nsEpoch EpochType
- ifNamespaceChanged int32
- watchNamespaceStopCh chan struct{}
- nsChangedChan chan struct{}
- triggerScanCh chan struct{}
- wg sync.WaitGroup
-}
-
-func NewEtcdRegister(host string) *EtcdRegister {
- client := etcdlock.NewEClient(host)
+ client *EtcdClient
+ clusterID string
+ namespaceRoot string
+ clusterPath string
+ pdNodeRootPath string
+ allNamespaceInfos map[string]map[int]PartitionMetaInfo
+ nsEpoch EpochType
+ ifNamespaceChanged int32
+ watchNamespaceStopCh chan struct{}
+ nsChangedChan chan struct{}
+ triggerScanCh chan struct{}
+ wg sync.WaitGroup
+ watchedNsClusterIndex uint64
+}
+
+func NewEtcdRegister(host string) (*EtcdRegister, error) {
+ client, err := NewEClient(host)
+ if err != nil {
+ return nil, err
+ }
r := &EtcdRegister{
allNamespaceInfos: make(map[string]map[int]PartitionMetaInfo),
watchNamespaceStopCh: make(chan struct{}),
@@ -133,7 +221,7 @@ func NewEtcdRegister(host string) *EtcdRegister {
nsChangedChan: make(chan struct{}, 3),
triggerScanCh: make(chan struct{}, 3),
}
- return r
+ return r, nil
}
func (etcdReg *EtcdRegister) InitClusterID(id string) {
@@ -145,6 +233,11 @@ func (etcdReg *EtcdRegister) InitClusterID(id string) {
func (etcdReg *EtcdRegister) Start() {
etcdReg.watchNamespaceStopCh = make(chan struct{})
+ etcdReg.nsMutex.Lock()
+ etcdReg.ifNamespaceChanged = 1
+ etcdReg.allNamespaceInfos = make(map[string]map[int]PartitionMetaInfo)
+ etcdReg.nsEpoch = 0
+ etcdReg.nsMutex.Unlock()
etcdReg.wg.Add(2)
go func() {
defer etcdReg.wg.Done()
@@ -237,6 +330,7 @@ func (etcdReg *EtcdRegister) refreshNamespaces(stopC <-chan struct{}) {
case <-etcdReg.triggerScanCh:
if atomic.LoadInt32(&etcdReg.ifNamespaceChanged) == 1 {
etcdReg.scanNamespaces()
+ time.Sleep(time.Millisecond * 100)
}
case <-ticker.C:
if atomic.LoadInt32(&etcdReg.ifNamespaceChanged) == 1 {
@@ -247,7 +341,7 @@ func (etcdReg *EtcdRegister) refreshNamespaces(stopC <-chan struct{}) {
}
func (etcdReg *EtcdRegister) watchNamespaces(stopC <-chan struct{}) {
- watcher := etcdReg.client.Watch(etcdReg.namespaceRoot, 0, true)
+ key := etcdReg.namespaceRoot
ctx, cancel := context.WithCancel(context.Background())
go func() {
select {
@@ -255,31 +349,25 @@ func (etcdReg *EtcdRegister) watchNamespaces(stopC <-chan struct{}) {
cancel()
}
}()
- for {
- _, err := watcher.Next(ctx)
- if err != nil {
- if err == context.Canceled {
- coordLog.Infof("watch key[%s] canceled.", etcdReg.namespaceRoot)
- return
- }
- atomic.StoreInt32(&etcdReg.ifNamespaceChanged, 1)
- coordLog.Errorf("watcher key[%s] error: %s", etcdReg.namespaceRoot, err.Error())
- if etcdlock.IsEtcdWatchExpired(err) {
- rsp, err := etcdReg.client.Get(etcdReg.namespaceRoot, false, true)
- if err != nil {
- coordLog.Errorf("rewatch and get key[%s] error: %s", etcdReg.namespaceRoot, err.Error())
- time.Sleep(time.Second)
- continue
- }
- watcher = etcdReg.client.Watch(etcdReg.namespaceRoot, rsp.Index+1, true)
- // watch expired should be treated as changed of node
- } else {
- time.Sleep(5 * time.Second)
- continue
- }
+ watchWaitAndDo(ctx, etcdReg.client, key, true, func(rsp *client.Response) {
+ // it seems the rsp.index may not changed while watch trigged even if the keys has changed
+ // note the rsp.index in watch is the cluster-index when the watch begin, so the cluster-index may less than modifiedIndex
+ // since it will be increased after watch begin.
+ mi := getMaxIndexFromWatchRsp(rsp)
+ old := atomic.LoadUint64(&etcdReg.watchedNsClusterIndex)
+ if mi == old {
+ coordLog.Infof("namespace changed but index not changed: %v", rsp)
+ }
+ if mi > 0 {
+ atomic.StoreUint64(&etcdReg.watchedNsClusterIndex, mi)
}
- coordLog.Debugf("namespace changed.")
atomic.StoreInt32(&etcdReg.ifNamespaceChanged, 1)
+ if rsp.Node != nil {
+ coordLog.Infof("namespace changed at max cluster index %v (%v, modified index: %v), old: %v",
+ mi, rsp.Index, rsp.Node.ModifiedIndex, old)
+ } else {
+ coordLog.Infof("namespace changed at cluster index %v, %v, old: %v", mi, rsp.Index, old)
+ }
select {
case etcdReg.triggerScanCh <- struct{}{}:
default:
@@ -288,14 +376,16 @@ func (etcdReg *EtcdRegister) watchNamespaces(stopC <-chan struct{}) {
case etcdReg.nsChangedChan <- struct{}{}:
default:
}
- }
+ }, nil)
}
func (etcdReg *EtcdRegister) scanNamespaces() (map[string]map[int]PartitionMetaInfo, EpochType, error) {
coordLog.Infof("refreshing namespaces")
atomic.StoreInt32(&etcdReg.ifNamespaceChanged, 0)
- rsp, err := etcdReg.client.Get(etcdReg.namespaceRoot, true, true)
+ // since the scan is triggered by watch, we need get newest from quorum
+ // to avoid get the old data from follower and no any more update event on leader.
+ rsp, err := etcdReg.client.Get(etcdReg.namespaceRoot, false, true)
if err != nil {
atomic.StoreInt32(&etcdReg.ifNamespaceChanged, 1)
if client.IsKeyNotFound(err) {
@@ -312,12 +402,18 @@ func (etcdReg *EtcdRegister) scanNamespaces() (map[string]map[int]PartitionMetaI
metaMap := make(map[string]NamespaceMetaInfo)
replicasMap := make(map[string]map[string]PartitionReplicaInfo)
leaderMap := make(map[string]map[string]RealLeader)
- maxEpoch := etcdReg.processNamespaceNode(rsp.Node.Nodes, metaMap, replicasMap, leaderMap)
+ err = etcdReg.processNamespaceNode(rsp.Node.Nodes, metaMap, replicasMap, leaderMap)
+ if err != nil {
+ atomic.StoreInt32(&etcdReg.ifNamespaceChanged, 1)
+ etcdReg.nsMutex.Lock()
+ nsInfos := etcdReg.allNamespaceInfos
+ nsEpoch := etcdReg.nsEpoch
+ etcdReg.nsMutex.Unlock()
+ coordLog.Infof("refreshing namespaces failed: %v, use old info instead", err)
+ return nsInfos, nsEpoch, err
+ }
nsInfos := make(map[string]map[int]PartitionMetaInfo)
- if EpochType(rsp.Node.ModifiedIndex) > maxEpoch {
- maxEpoch = EpochType(rsp.Node.ModifiedIndex)
- }
for k, v := range replicasMap {
meta, ok := metaMap[k]
if !ok {
@@ -350,11 +446,22 @@ func (etcdReg *EtcdRegister) scanNamespaces() (map[string]map[int]PartitionMetaI
}
etcdReg.nsMutex.Lock()
- etcdReg.allNamespaceInfos = nsInfos
- if maxEpoch != etcdReg.nsEpoch {
- coordLog.Infof("ns epoch changed from %v to : %v ", etcdReg.nsEpoch, maxEpoch)
+ // here we must use the cluster index for the whole, since the delete for the node may decrease the modified index for the whole tree
+ maxEpoch := EpochType(rsp.Index)
+ if maxEpoch > etcdReg.nsEpoch {
+ etcdReg.allNamespaceInfos = nsInfos
+ coordLog.Infof("ns epoch changed from %v to : %v , watched: %v", etcdReg.nsEpoch, maxEpoch, atomic.LoadUint64(&etcdReg.watchedNsClusterIndex))
+ etcdReg.nsEpoch = maxEpoch
+ } else {
+ coordLog.Infof("ns epoch changed %v not newer than current: %v , watched: %v", maxEpoch, etcdReg.nsEpoch, atomic.LoadUint64(&etcdReg.watchedNsClusterIndex))
+ nsInfos = etcdReg.allNamespaceInfos
+ maxEpoch = etcdReg.nsEpoch
+ }
+ if rsp.Index < atomic.LoadUint64(&etcdReg.watchedNsClusterIndex) {
+ // need scan next time since the cluster index is older than the watched newest
+ // note it may happen the reponse index larger than watched, because other non-namespace root changes may increase the cluster index
+ atomic.StoreInt32(&etcdReg.ifNamespaceChanged, 1)
}
- etcdReg.nsEpoch = maxEpoch
etcdReg.nsMutex.Unlock()
return nsInfos, maxEpoch, nil
@@ -363,19 +470,14 @@ func (etcdReg *EtcdRegister) scanNamespaces() (map[string]map[int]PartitionMetaI
func (etcdReg *EtcdRegister) processNamespaceNode(nodes client.Nodes,
metaMap map[string]NamespaceMetaInfo,
replicasMap map[string]map[string]PartitionReplicaInfo,
- leaderMap map[string]map[string]RealLeader) EpochType {
- maxEpoch := EpochType(0)
+ leaderMap map[string]map[string]RealLeader) error {
for _, node := range nodes {
if node.Nodes != nil {
- newEpoch := etcdReg.processNamespaceNode(node.Nodes, metaMap, replicasMap, leaderMap)
- if newEpoch > maxEpoch {
- maxEpoch = newEpoch
+ err := etcdReg.processNamespaceNode(node.Nodes, metaMap, replicasMap, leaderMap)
+ if err != nil {
+ return err
}
}
- if EpochType(node.ModifiedIndex) > maxEpoch {
- maxEpoch = EpochType(node.ModifiedIndex)
- }
-
if node.Dir {
continue
}
@@ -438,7 +540,7 @@ func (etcdReg *EtcdRegister) processNamespaceNode(nodes client.Nodes,
}
}
}
- return maxEpoch
+ return nil
}
func (etcdReg *EtcdRegister) GetNamespacePartInfo(ns string, partition int) (*PartitionMetaInfo, error) {
@@ -526,10 +628,38 @@ func (etcdReg *EtcdRegister) GetNamespaceMetaInfo(ns string) (NamespaceMetaInfo,
return parts[0].NamespaceMetaInfo, nil
}
+func (etcdReg *EtcdRegister) GetKV(key string) (string, error) {
+ if key == "" {
+ return "", errors.New("data key can not be empty")
+ }
+ rk := path.Join(etcdReg.getClusterDataKVPath(), key)
+ rsp, err := etcdReg.client.Get(rk, false, false)
+ if err != nil {
+ if client.IsKeyNotFound(err) {
+ return "", ErrKeyNotFound
+ }
+ return "", err
+ }
+ return string(rsp.Node.Value), nil
+}
+
+func (etcdReg *EtcdRegister) SaveKV(key string, value string) error {
+ if key == "" || value == "" {
+ return errors.New("data key value can not be empty")
+ }
+ rk := path.Join(etcdReg.getClusterDataKVPath(), key)
+ _, err := etcdReg.client.Set(rk, value, 0)
+ return err
+}
+
func (etcdReg *EtcdRegister) getClusterPath() string {
return path.Join("/", ROOT_DIR, etcdReg.clusterID)
}
+func (etcdReg *EtcdRegister) getClusterDataKVPath() string {
+ return path.Join(etcdReg.getClusterPath(), DataKVDir)
+}
+
func (etcdReg *EtcdRegister) getClusterMetaPath() string {
return path.Join(etcdReg.getClusterPath(), CLUSTER_META_INFO)
}
@@ -591,11 +721,15 @@ type PDEtcdRegister struct {
refreshStopCh chan bool
}
-func NewPDEtcdRegister(host string) *PDEtcdRegister {
+func NewPDEtcdRegister(host string) (*PDEtcdRegister, error) {
+ reg, err := NewEtcdRegister(host)
+ if err != nil {
+ return nil, err
+ }
return &PDEtcdRegister{
- EtcdRegister: NewEtcdRegister(host),
+ EtcdRegister: reg,
refreshStopCh: make(chan bool, 1),
- }
+ }, nil
}
func (etcdReg *PDEtcdRegister) Register(value *NodeInfo) error {
@@ -612,7 +746,7 @@ func (etcdReg *PDEtcdRegister) Register(value *NodeInfo) error {
etcdReg.leaderStr = string(valueB)
etcdReg.nodeKey = etcdReg.getPDNodePath(value)
etcdReg.nodeValue = string(valueB)
- _, err = etcdReg.client.Set(etcdReg.nodeKey, etcdReg.nodeValue, ETCD_TTL)
+ _, err = etcdReg.client.Set(etcdReg.nodeKey, etcdReg.nodeValue, uint64(EtcdTTL))
if err != nil {
return err
}
@@ -628,11 +762,11 @@ func (etcdReg *PDEtcdRegister) refresh(stopC <-chan bool) {
select {
case <-stopC:
return
- case <-time.After(time.Second * time.Duration(ETCD_TTL/10)):
- _, err := etcdReg.client.SetWithTTL(etcdReg.nodeKey, ETCD_TTL)
+ case <-time.After(time.Second * time.Duration(EtcdTTL/10)):
+ _, err := etcdReg.client.SetWithTTL(etcdReg.nodeKey, uint64(EtcdTTL))
if err != nil {
coordLog.Errorf("update error: %s", err.Error())
- _, err := etcdReg.client.Set(etcdReg.nodeKey, etcdReg.nodeValue, ETCD_TTL)
+ _, err := etcdReg.client.Set(etcdReg.nodeKey, etcdReg.nodeValue, uint64(EtcdTTL))
if err != nil {
coordLog.Errorf("set key error: %s", err.Error())
}
@@ -650,7 +784,7 @@ func (etcdReg *PDEtcdRegister) Unregister(value *NodeInfo) error {
_, err := etcdReg.client.Delete(etcdReg.getPDNodePath(value), false)
if err != nil {
- coordLog.Warningf("cluser[%s] node[%s] unregister failed: %v", etcdReg.clusterID, value, err)
+ coordLog.Warningf("cluser[%s] node[%v] unregister failed: %v", etcdReg.clusterID, value, err)
return err
}
@@ -707,16 +841,16 @@ func (etcdReg *PDEtcdRegister) GetClusterEpoch() (EpochType, error) {
}
func (etcdReg *PDEtcdRegister) AcquireAndWatchLeader(leader chan *NodeInfo, stop chan struct{}) {
- master := etcdlock.NewMaster(etcdReg.client, etcdReg.leaderSessionPath, etcdReg.leaderStr, ETCD_TTL)
+ master := NewMaster(etcdReg.client, etcdReg.leaderSessionPath, etcdReg.leaderStr, uint64(EtcdTTL))
go etcdReg.processMasterEvents(master, leader, stop)
master.Start()
}
-func (etcdReg *PDEtcdRegister) processMasterEvents(master etcdlock.Master, leader chan *NodeInfo, stop chan struct{}) {
+func (etcdReg *PDEtcdRegister) processMasterEvents(master Master, leader chan *NodeInfo, stop chan struct{}) {
for {
select {
case e := <-master.GetEventsChan():
- if e.Type == etcdlock.MASTER_ADD || e.Type == etcdlock.MASTER_MODIFY {
+ if e.Type == MASTER_ADD || e.Type == MASTER_MODIFY {
// Acquired the lock || lock change.
var node NodeInfo
if err := json.Unmarshal([]byte(e.Master), &node); err != nil {
@@ -725,13 +859,12 @@ func (etcdReg *PDEtcdRegister) processMasterEvents(master etcdlock.Master, leade
}
coordLog.Infof("master event type[%d] Node[%v].", e.Type, node)
leader <- &node
- } else if e.Type == etcdlock.MASTER_DELETE {
+ } else if e.Type == MASTER_DELETE {
coordLog.Infof("master event delete.")
// Lost the lock.
var node NodeInfo
leader <- &node
} else {
- // TODO: lock error.
coordLog.Infof("unexpected event: %v", e)
}
case <-stop:
@@ -754,22 +887,22 @@ func (etcdReg *PDEtcdRegister) CheckIfLeader() bool {
}
func (etcdReg *PDEtcdRegister) GetDataNodes() ([]NodeInfo, error) {
- return etcdReg.getDataNodes()
+ n, _, err := etcdReg.getDataNodes(false)
+ return n, err
}
func (etcdReg *PDEtcdRegister) WatchDataNodes(dataNodesChan chan []NodeInfo, stop chan struct{}) {
- dataNodes, err := etcdReg.getDataNodes()
+ defer close(dataNodesChan)
+ dataNodes, _, err := etcdReg.getDataNodes(false)
if err == nil {
select {
case dataNodesChan <- dataNodes:
case <-stop:
- close(dataNodesChan)
return
}
}
key := etcdReg.getDataNodeRootPath()
- watcher := etcdReg.client.Watch(key, 0, true)
ctx, cancel := context.WithCancel(context.Background())
go func() {
select {
@@ -777,52 +910,35 @@ func (etcdReg *PDEtcdRegister) WatchDataNodes(dataNodesChan chan []NodeInfo, sto
cancel()
}
}()
- for {
- rsp, err := watcher.Next(ctx)
- if err != nil {
- if err == context.Canceled {
- coordLog.Infof("watch key[%s] canceled.", key)
- close(dataNodesChan)
- return
- } else {
- coordLog.Errorf("watcher key[%s] error: %s", key, err.Error())
- //rewatch
- if etcdlock.IsEtcdWatchExpired(err) {
- rsp, err = etcdReg.client.Get(key, false, true)
- if err != nil {
- coordLog.Errorf("rewatch and get key[%s] error: %s", key, err.Error())
- time.Sleep(time.Second)
- continue
- }
- watcher = etcdReg.client.Watch(key, rsp.Index+1, true)
- // should get the nodes to notify watcher since last watch is expired
- } else {
- time.Sleep(5 * time.Second)
- continue
- }
- }
- }
- dataNodes, err := etcdReg.getDataNodes()
+ watchWaitAndDo(ctx, etcdReg.client, key, true, func(rsp *client.Response) {
+ // must get the newest data
+ // otherwise, the get may get the old data from another follower
+ dataNodes, _, err := etcdReg.getDataNodes(true)
if err != nil {
coordLog.Errorf("key[%s] getNodes error: %s", key, err.Error())
- continue
+ return
}
select {
case dataNodesChan <- dataNodes:
case <-stop:
- close(dataNodesChan)
return
}
- }
+ }, nil)
}
-func (etcdReg *PDEtcdRegister) getDataNodes() ([]NodeInfo, error) {
- rsp, err := etcdReg.client.Get(etcdReg.getDataNodeRootPath(), false, false)
+func (etcdReg *PDEtcdRegister) getDataNodes(upToDate bool) ([]NodeInfo, uint64, error) {
+ var rsp *client.Response
+ var err error
+ if upToDate {
+ rsp, err = etcdReg.client.GetNewest(etcdReg.getDataNodeRootPath(), false, false)
+ } else {
+ rsp, err = etcdReg.client.Get(etcdReg.getDataNodeRootPath(), false, false)
+ }
if err != nil {
if client.IsKeyNotFound(err) {
- return nil, ErrKeyNotFound
+ return nil, 0, ErrKeyNotFound
}
- return nil, err
+ return nil, 0, err
}
dataNodes := make([]NodeInfo, 0)
for _, node := range rsp.Node.Nodes {
@@ -836,7 +952,7 @@ func (etcdReg *PDEtcdRegister) getDataNodes() ([]NodeInfo, error) {
}
dataNodes = append(dataNodes, nodeInfo)
}
- return dataNodes, nil
+ return dataNodes, rsp.Index, nil
}
func (etcdReg *PDEtcdRegister) CreateNamespacePartition(ns string, partition int) error {
@@ -875,9 +991,8 @@ func (etcdReg *PDEtcdRegister) IsExistNamespace(ns string) (bool, error) {
if err != nil {
if client.IsKeyNotFound(err) {
return false, nil
- } else {
- return false, err
}
+ return false, err
}
return true, nil
}
@@ -887,9 +1002,8 @@ func (etcdReg *PDEtcdRegister) IsExistNamespacePartition(ns string, partitionNum
if err != nil {
if client.IsKeyNotFound(err) {
return false, nil
- } else {
- return false, err
}
+ return false, err
}
return true, nil
}
@@ -934,6 +1048,7 @@ func (etcdReg *PDEtcdRegister) DeleteNamespacePart(ns string, partition int) err
return err
}
}
+ atomic.StoreInt32(&etcdReg.ifNamespaceChanged, 1)
return nil
}
@@ -950,6 +1065,7 @@ func (etcdReg *PDEtcdRegister) UpdateNamespacePartReplicaInfo(ns string, partiti
return err
}
replicaInfo.epoch = EpochType(rsp.Node.ModifiedIndex)
+ atomic.StoreInt32(&etcdReg.ifNamespaceChanged, 1)
return nil
}
rsp, err := etcdReg.client.CompareAndSwap(etcdReg.getNamespaceReplicaInfoPath(ns, partition), string(value), 0, "", uint64(oldGen))
@@ -957,6 +1073,7 @@ func (etcdReg *PDEtcdRegister) UpdateNamespacePartReplicaInfo(ns string, partiti
return err
}
replicaInfo.epoch = EpochType(rsp.Node.ModifiedIndex)
+ atomic.StoreInt32(&etcdReg.ifNamespaceChanged, 1)
return nil
}
@@ -967,6 +1084,7 @@ func (etcdReg *PDEtcdRegister) UpdateNamespaceSchema(ns string, table string, sc
return err
}
schema.Epoch = EpochType(rsp.Node.ModifiedIndex)
+ atomic.StoreInt32(&etcdReg.ifNamespaceChanged, 1)
return nil
}
@@ -976,6 +1094,7 @@ func (etcdReg *PDEtcdRegister) UpdateNamespaceSchema(ns string, table string, sc
return err
}
schema.Epoch = EpochType(rsp.Node.ModifiedIndex)
+ atomic.StoreInt32(&etcdReg.ifNamespaceChanged, 1)
return nil
}
@@ -988,19 +1107,19 @@ type DNEtcdRegister struct {
refreshStopCh chan bool
}
-func SetEtcdLogger(log etcdlock.Logger, level int32) {
- etcdlock.SetLogger(log, int(level))
-}
-
-func NewDNEtcdRegister(host string) *DNEtcdRegister {
- return &DNEtcdRegister{
- EtcdRegister: NewEtcdRegister(host),
+func NewDNEtcdRegister(host string) (*DNEtcdRegister, error) {
+ reg, err := NewEtcdRegister(host)
+ if err != nil {
+ return nil, err
}
+ return &DNEtcdRegister{
+ EtcdRegister: reg,
+ }, nil
}
func (etcdReg *DNEtcdRegister) Register(nodeData *NodeInfo) error {
if nodeData.LearnerRole != "" &&
- nodeData.LearnerRole != common.LearnerRoleLogSyncer &&
+ !common.IsRoleLogSyncer(nodeData.LearnerRole) &&
nodeData.LearnerRole != common.LearnerRoleSearcher {
return ErrLearnerRoleUnsupported
}
@@ -1030,7 +1149,7 @@ func (etcdReg *DNEtcdRegister) Register(nodeData *NodeInfo) error {
}
etcdReg.nodeValue = string(value)
- _, err = etcdReg.client.Set(etcdReg.nodeKey, etcdReg.nodeValue, ETCD_TTL)
+ _, err = etcdReg.client.Set(etcdReg.nodeKey, etcdReg.nodeValue, uint64(EtcdTTL))
if err != nil {
return err
}
@@ -1047,13 +1166,15 @@ func (etcdReg *DNEtcdRegister) refresh(stopChan chan bool) {
select {
case <-stopChan:
return
- case <-time.After(time.Second * time.Duration(ETCD_TTL/10)):
- _, err := etcdReg.client.SetWithTTL(etcdReg.nodeKey, ETCD_TTL)
+ case <-time.After(time.Second * time.Duration(EtcdTTL/10)):
+ _, err := etcdReg.client.SetWithTTL(etcdReg.nodeKey, uint64(EtcdTTL))
if err != nil {
coordLog.Errorf("update error: %s", err.Error())
- _, err := etcdReg.client.Set(etcdReg.nodeKey, etcdReg.nodeValue, ETCD_TTL)
+ _, err := etcdReg.client.Set(etcdReg.nodeKey, etcdReg.nodeValue, uint64(EtcdTTL))
if err != nil {
coordLog.Errorf("set key error: %s", err.Error())
+ } else {
+ coordLog.Infof("refresh registered new node: %v", etcdReg.nodeValue)
}
}
}
@@ -1072,7 +1193,7 @@ func (etcdReg *DNEtcdRegister) Unregister(nodeData *NodeInfo) error {
_, err := etcdReg.client.Delete(etcdReg.getDataNodePath(nodeData), false)
if err != nil {
- coordLog.Warningf("cluser[%s] node[%s] unregister failed: %v", etcdReg.clusterID, nodeData, err)
+ coordLog.Warningf("cluser[%s] node[%v] unregister failed: %v", etcdReg.clusterID, nodeData, err)
return err
}
@@ -1149,6 +1270,7 @@ func (etcdReg *DNEtcdRegister) NewRegisterNodeID() (uint64, error) {
}
func (etcdReg *DNEtcdRegister) WatchPDLeader(leader chan *NodeInfo, stop chan struct{}) error {
+ defer close(leader)
key := etcdReg.getPDLeaderPath()
rsp, err := etcdReg.client.Get(key, false, false)
@@ -1160,7 +1282,6 @@ func (etcdReg *DNEtcdRegister) WatchPDLeader(leader chan *NodeInfo, stop chan st
select {
case leader <- &node:
case <-stop:
- close(leader)
return nil
}
}
@@ -1168,7 +1289,6 @@ func (etcdReg *DNEtcdRegister) WatchPDLeader(leader chan *NodeInfo, stop chan st
coordLog.Errorf("get error: %s", err.Error())
}
- watcher := etcdReg.client.Watch(key, 0, true)
ctx, cancel := context.WithCancel(context.Background())
go func() {
select {
@@ -1177,34 +1297,9 @@ func (etcdReg *DNEtcdRegister) WatchPDLeader(leader chan *NodeInfo, stop chan st
}
}()
isMissing := true
- for {
- rsp, err = watcher.Next(ctx)
- if err != nil {
- if err == context.Canceled {
- coordLog.Infof("watch key[%s] canceled.", key)
- close(leader)
- return nil
- } else {
- coordLog.Errorf("watcher key[%s] error: %s", key, err.Error())
- //rewatch
- if etcdlock.IsEtcdWatchExpired(err) {
- isMissing = true
- rsp, err = etcdReg.client.Get(key, false, true)
- if err != nil {
- coordLog.Errorf("rewatch and get key[%s] error: %s", key, err.Error())
- time.Sleep(time.Second)
- continue
- }
- coordLog.Errorf("watch expired key[%s] : %s", key, rsp.Node.String())
- watcher = etcdReg.client.Watch(key, rsp.Index+1, true)
- } else {
- time.Sleep(5 * time.Second)
- continue
- }
- }
- }
+ watchWaitAndDo(ctx, etcdReg.client, key, true, func(rsp *client.Response) {
if rsp == nil {
- continue
+ return
}
var node NodeInfo
if rsp.Action == "expire" || rsp.Action == "delete" {
@@ -1213,7 +1308,7 @@ func (etcdReg *DNEtcdRegister) WatchPDLeader(leader chan *NodeInfo, stop chan st
} else if rsp.Action == "create" || rsp.Action == "update" || rsp.Action == "set" {
err := json.Unmarshal([]byte(rsp.Node.Value), &node)
if err != nil {
- continue
+ return
}
if node.ID != "" {
isMissing = false
@@ -1221,24 +1316,25 @@ func (etcdReg *DNEtcdRegister) WatchPDLeader(leader chan *NodeInfo, stop chan st
} else {
if isMissing {
coordLog.Infof("key[%s] new data : %s", key, rsp.Node.String())
- err := json.Unmarshal([]byte(rsp.Node.Value), &node)
- if err != nil {
- continue
- }
- if node.ID != "" {
- isMissing = false
- }
- } else {
- continue
}
+ err := json.Unmarshal([]byte(rsp.Node.Value), &node)
+ if err != nil {
+ return
+ }
+ if node.ID != "" {
+ isMissing = false
+ }
+ }
+ if node.ID == "" {
+ isMissing = true
}
select {
case leader <- &node:
case <-stop:
- close(leader)
- return nil
+ return
}
- }
+ }, nil)
+ return nil
}
func (etcdReg *DNEtcdRegister) getDataNodePathFromID(nid string) string {
diff --git a/cluster/register_etcd_test.go b/cluster/register_etcd_test.go
new file mode 100644
index 00000000..7af77b63
--- /dev/null
+++ b/cluster/register_etcd_test.go
@@ -0,0 +1,269 @@
+package cluster
+
+import (
+ "context"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+var testEtcdServers = "http://127.0.0.1:2379"
+
+type testLogger struct {
+ t *testing.T
+}
+
+func newTestLogger(t *testing.T) *testLogger {
+ return &testLogger{t: t}
+}
+
+func (l *testLogger) Output(maxdepth int, s string) error {
+ l.t.Logf("%v:%v", time.Now().UnixNano(), s)
+ return nil
+}
+
+func (l *testLogger) OutputErr(maxdepth int, s string) error {
+ l.t.Logf("%v:%v", time.Now().UnixNano(), s)
+ return nil
+}
+
+func (l *testLogger) OutputWarning(maxdepth int, s string) error {
+ l.t.Logf("%v:%v", time.Now().UnixNano(), s)
+ return nil
+}
+
+func TestRegisterEtcd(t *testing.T) {
+ clusterID := "unittest-zankv-cluster-test-register"
+ reg, err := NewPDEtcdRegister(testEtcdServers)
+ assert.Nil(t, err)
+ reg.InitClusterID(clusterID)
+ nodeInfo := &NodeInfo{
+ NodeIP: "127.0.0.1",
+ }
+ nodeInfo.ID = GenNodeID(nodeInfo, "pd")
+ reg.Register(nodeInfo)
+ defer reg.Unregister(nodeInfo)
+ reg.Start()
+
+ ns := "test-ns"
+ reg.DeleteWholeNamespace(ns)
+
+ part := 0
+ err = reg.CreateNamespacePartition(ns, part)
+ assert.Nil(t, err)
+ minGID, err := reg.PrepareNamespaceMinGID()
+ assert.Nil(t, err)
+
+ err = reg.CreateNamespace(ns, &NamespaceMetaInfo{
+ PartitionNum: 2,
+ Replica: 3,
+ MinGID: minGID,
+ })
+ assert.Nil(t, err)
+ replicaInfo := &PartitionReplicaInfo{
+ RaftNodes: []string{"127.0.0.1:111"},
+ RaftIDs: map[string]uint64{"127.0.0.1:111": 1},
+ }
+ err = reg.UpdateNamespacePartReplicaInfo(ns, part, replicaInfo, 0)
+ assert.Nil(t, err)
+ time.Sleep(time.Second)
+ allNs, epoch, err := reg.GetAllNamespaces()
+ assert.Nil(t, err)
+ t.Logf("ns: %v, epoch: %v", allNs, epoch)
+ assert.Equal(t, 1, len(allNs))
+ assert.Equal(t, 1, len(allNs[ns]))
+ assert.Equal(t, part, allNs[ns][part].Partition)
+
+ part1 := 1
+ err = reg.CreateNamespacePartition(ns, part1)
+ assert.Nil(t, err)
+ err = reg.UpdateNamespacePartReplicaInfo(ns, part1, replicaInfo, 0)
+ assert.Nil(t, err)
+
+ start := time.Now()
+ for {
+ allNs, nepoch, err := reg.GetAllNamespaces()
+ assert.Nil(t, err)
+ assert.Equal(t, 1, len(allNs))
+ t.Logf("ns: %v, epoch for ns: %v", allNs, nepoch)
+ if nepoch > epoch {
+ epoch = nepoch
+ assert.Equal(t, 2, len(allNs[ns]))
+ assert.Equal(t, part, allNs[ns][part].Partition)
+ assert.Equal(t, part1, allNs[ns][part1].Partition)
+ break
+ }
+ time.Sleep(time.Millisecond)
+ if time.Since(start) > time.Second*5 {
+ t.Errorf("epoch not increased: %v, %v", epoch, nepoch)
+ }
+ }
+ // should not changed if no change anymore
+ time.Sleep(time.Millisecond)
+ _, nepoch, err := reg.GetAllNamespaces()
+ assert.Equal(t, epoch, nepoch)
+
+ reg.DeleteNamespacePart(ns, part)
+
+ start = time.Now()
+ for {
+ allNs, nepoch, err := reg.GetAllNamespaces()
+ assert.Nil(t, err)
+ assert.Equal(t, 1, len(allNs))
+ t.Logf("ns: %v, epoch for ns: %v", allNs, nepoch)
+ if nepoch > epoch {
+ epoch = nepoch
+ assert.Equal(t, 1, len(allNs[ns]))
+ assert.Equal(t, part1, allNs[ns][part1].Partition)
+ break
+ }
+ time.Sleep(time.Millisecond)
+ if time.Since(start) > time.Second*5 {
+ t.Errorf("epoch not increased: %v, %v", epoch, nepoch)
+ }
+ }
+
+ // should not changed if no change anymore
+ time.Sleep(time.Millisecond)
+ _, nepoch, err = reg.GetAllNamespaces()
+ assert.Equal(t, epoch, nepoch)
+
+ reg.DeleteWholeNamespace(ns)
+ start = time.Now()
+ for {
+ allNs, nepoch, err := reg.GetAllNamespaces()
+ assert.Nil(t, err)
+ t.Logf("ns: %v, epoch for ns: %v", allNs, nepoch)
+ if nepoch > epoch {
+ assert.Equal(t, 0, len(allNs))
+ epoch = nepoch
+ break
+ }
+ time.Sleep(time.Millisecond)
+ if time.Since(start) > time.Second*5 {
+ t.Errorf("epoch not increased: %v, %v", epoch, nepoch)
+ }
+ }
+
+ // should not changed if no change anymore
+ time.Sleep(time.Millisecond)
+ _, nepoch, err = reg.GetAllNamespaces()
+ assert.Equal(t, epoch, nepoch)
+}
+
+func TestEtcdRegisterGetSetTimeout(t *testing.T) {
+ client, err := NewEClient(testEtcdServers)
+ assert.Nil(t, err)
+ client.timeout = time.Microsecond
+
+ testKey := "/zankv_test/unittest-zankv-cluster-test-register/timeouttest"
+ _, err = client.Set(testKey, "testvalue", 10)
+ assert.Equal(t, err, context.DeadlineExceeded)
+ _, err = client.Get(testKey, false, false)
+ assert.Equal(t, err, context.DeadlineExceeded)
+}
+
+func TestRegisterWatchKeepAliveTimeoutInDeadConn(t *testing.T) {
+ clusterID := "unittest-zankv-cluster-test-register"
+ reg, err := NewPDEtcdRegister(testEtcdServers)
+ assert.Nil(t, err)
+ reg.InitClusterID(clusterID)
+ nodeInfo := &NodeInfo{
+ NodeIP: "127.0.0.1",
+ }
+ nodeInfo.ID = GenNodeID(nodeInfo, "pd")
+ reg.Start()
+ defer reg.Stop()
+ reg.Register(nodeInfo)
+ defer reg.Unregister(nodeInfo)
+
+ stopC := make(chan struct{}, 1)
+ leaderChan := make(chan *NodeInfo, 10)
+ // watch pd leader and acquire leader
+ reg.AcquireAndWatchLeader(leaderChan, stopC)
+ pdLeaderChanged := int32(0)
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case n := <-leaderChan:
+ t.Logf("in pd register pd leader changed to : %v", n)
+ atomic.AddInt32(&pdLeaderChanged, 1)
+ case <-stopC:
+ return
+ }
+ }
+ }()
+ // watch data nodes
+ dnChan := make(chan []NodeInfo, 10)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ reg.WatchDataNodes(dnChan, stopC)
+ }()
+ dataNodeChanged := int32(0)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case n, ok := <-dnChan:
+ if !ok {
+ return
+ }
+ t.Logf("nodes changed to %v", n)
+ atomic.AddInt32(&dataNodeChanged, 1)
+ case <-stopC:
+ return
+ }
+ }
+ }()
+
+ // watch pd leader
+ nodeReg, err := NewDNEtcdRegister(testEtcdServers)
+ assert.Nil(t, err)
+ nodeReg.InitClusterID(clusterID)
+ nodeInfo2 := &NodeInfo{
+ NodeIP: "127.0.0.1",
+ }
+ nodeInfo2.ID = GenNodeID(nodeInfo2, "datanode")
+ nodeReg.Start()
+ defer nodeReg.Stop()
+ nodeReg.Register(nodeInfo2)
+ defer nodeReg.Unregister(nodeInfo2)
+ leaderChan2 := make(chan *NodeInfo, 10)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ nodeReg.WatchPDLeader(leaderChan2, stopC)
+ }()
+ timer := time.NewTimer(time.Second * time.Duration(EtcdTTL+1))
+ for {
+ select {
+ case n, ok := <-leaderChan2:
+ if !ok {
+ return
+ }
+ t.Logf("in data register pd leader changed to %v", n)
+ atomic.AddInt32(&pdLeaderChanged, 1)
+ case <-timer.C:
+ close(stopC)
+ wg.Wait()
+ t.Logf("changed: %v , %v", atomic.LoadInt32(&pdLeaderChanged), atomic.LoadInt32(&dataNodeChanged))
+ assert.True(t, atomic.LoadInt32(&dataNodeChanged) >= 1)
+ assert.True(t, atomic.LoadInt32(&dataNodeChanged) <= 4)
+ assert.True(t, atomic.LoadInt32(&pdLeaderChanged) <= 8)
+ assert.True(t, atomic.LoadInt32(&pdLeaderChanged) >= 2)
+ return
+ }
+ }
+}
+
+func TestRegisterWatchExpired(t *testing.T) {
+ // TODO: test watch when etcd returned index cleared
+}
diff --git a/cluster/util.go b/cluster/util.go
index e42d23f7..a8632a33 100644
--- a/cluster/util.go
+++ b/cluster/util.go
@@ -1,10 +1,10 @@
package cluster
import (
- "github.com/absolute8511/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/common"
)
-var coordLog = common.NewLevelLogger(common.LOG_INFO, common.NewDefaultLogger("cluster"))
+var coordLog = common.NewLevelLogger(common.LOG_INFO, common.NewLogger())
func CoordLog() *common.LevelLogger {
return coordLog
diff --git a/common/api_request.go b/common/api_request.go
index 29dd4c5c..21a78b33 100644
--- a/common/api_request.go
+++ b/common/api_request.go
@@ -2,6 +2,7 @@ package common
import (
"encoding/json"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -73,5 +74,9 @@ func APIRequest(method string, endpoint string, body io.Reader, timeout time.Dur
if len(respBody) == 0 {
respBody = []byte("{}")
}
- return resp.StatusCode, json.Unmarshal(respBody, ret)
+ err = json.Unmarshal(respBody, ret)
+ if err != nil {
+ err = errors.New(err.Error() + string(respBody))
+ }
+ return resp.StatusCode, err
}
diff --git a/common/dynamic_conf.go b/common/dynamic_conf.go
new file mode 100644
index 00000000..7d43eff1
--- /dev/null
+++ b/common/dynamic_conf.go
@@ -0,0 +1,117 @@
+package common
+
+import (
+ "fmt"
+ "sort"
+ "strconv"
+ "sync"
+ "sync/atomic"
+)
+
+const (
+ ConfCheckSnapTimeout = "check_snap_timeout"
+ ConfCheckRaftTimeout = "check_raft_timeout"
+ ConfIgnoreStartupNoBackup = "ignore_startup_nobackup"
+ ConfIgnoreRemoteFileSync = "ignore_remote_file_sync"
+ ConfMaxRemoteRecover = "max_remote_recover"
+ ConfSlowLimiterSwitch = "slow_limiter_switch"
+ ConfSlowLimiterRefuseCostMs = "slow_limiter_refuse_cost_ms"
+ ConfSlowLimiterHalfOpenSec = "slow_limiter_half_open_sec"
+)
+
+var intConfMap map[string]*int64
+var strConfMap sync.Map
+var changedHandler sync.Map
+
+type KeyChangedHandler func(newV interface{})
+
+func init() {
+ intConfMap = make(map[string]*int64)
+ snapCheckTimeout := int64(60)
+ intConfMap[ConfCheckSnapTimeout] = &snapCheckTimeout
+ raftCheckTimeout := int64(5)
+ intConfMap[ConfCheckRaftTimeout] = &raftCheckTimeout
+ emptyInt := int64(0)
+ intConfMap["empty_int"] = &emptyInt
+ maxRemoteRecover := int64(2)
+ intConfMap[ConfMaxRemoteRecover] = &maxRemoteRecover
+ slowSwitch := int64(1)
+ intConfMap[ConfSlowLimiterSwitch] = &slowSwitch
+ slowRefuceCostMs := int64(600)
+ intConfMap[ConfSlowLimiterRefuseCostMs] = &slowRefuceCostMs
+ slowHalfOpenSec := int64(15)
+ intConfMap[ConfSlowLimiterHalfOpenSec] = &slowHalfOpenSec
+
+ strConfMap.Store("test_str", "test_str")
+}
+
+func RegisterConfChangedHandler(key string, h KeyChangedHandler) {
+ changedHandler.Store(key, h)
+}
+
+func DumpDynamicConf() []string {
+ cfs := make([]string, 0, len(intConfMap)*2)
+ for k, v := range intConfMap {
+ iv := atomic.LoadInt64(v)
+ cfs = append(cfs, k+":"+strconv.Itoa(int(iv)))
+ }
+ strConfMap.Range(func(k, v interface{}) bool {
+ cfs = append(cfs, fmt.Sprintf("%v:%v", k, v))
+ return true
+ })
+ sort.Sort(sort.StringSlice(cfs))
+ return cfs
+}
+
+func SetIntDynamicConf(k string, newV int) {
+ v, ok := intConfMap[k]
+ if ok {
+ atomic.StoreInt64(v, int64(newV))
+ v, ok := changedHandler.Load(k)
+ if ok {
+ hd, ok := v.(KeyChangedHandler)
+ if ok {
+ hd(newV)
+ }
+ }
+ }
+}
+
+func IsConfSetted(k string) bool {
+ iv := GetIntDynamicConf(k)
+ if iv != 0 {
+ return true
+ }
+ sv := GetStrDynamicConf(k)
+ if sv != "" {
+ return true
+ }
+ return false
+}
+
+func GetIntDynamicConf(k string) int {
+ v, ok := intConfMap[k]
+ if ok {
+ return int(atomic.LoadInt64(v))
+ }
+ return 0
+}
+
+func SetStrDynamicConf(k string, newV string) {
+ strConfMap.Store(k, newV)
+ v, ok := changedHandler.Load(k)
+ if ok {
+ hd, ok := v.(KeyChangedHandler)
+ if ok {
+ hd(newV)
+ }
+ }
+}
+
+func GetStrDynamicConf(k string) string {
+ v, ok := strConfMap.Load(k)
+ if !ok {
+ return ""
+ }
+ return v.(string)
+}
diff --git a/common/dynamic_conf_test.go b/common/dynamic_conf_test.go
new file mode 100644
index 00000000..fe16eff4
--- /dev/null
+++ b/common/dynamic_conf_test.go
@@ -0,0 +1,169 @@
+package common
+
+import (
+ "reflect"
+ "strconv"
+ "sync"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestDumpDynamicConf(t *testing.T) {
+ tests := []struct {
+ name string
+ want []string
+ }{
+ // Add test cases.
+ {"dump", []string{"check_raft_timeout:5", "check_snap_timeout:60",
+ "empty_int:0",
+ "max_remote_recover:2",
+ "slow_limiter_half_open_sec:15",
+ "slow_limiter_refuse_cost_ms:600",
+ "slow_limiter_switch:1",
+ "test_str:test_str"}},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := DumpDynamicConf(); !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("DumpDynamicConf() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestGetIntDynamicConf(t *testing.T) {
+ type args struct {
+ k string
+ }
+ tests := []struct {
+ name string
+ args args
+ want int
+ }{
+ //Add test cases.
+ {"get default check_snap_timeout", args{ConfCheckSnapTimeout}, 60},
+ {"get changed check_snap_timeout", args{ConfCheckSnapTimeout}, 2},
+ {"get non exist", args{"noexist"}, 0},
+ {"get after set non exist", args{"noexist-set"}, 0},
+ }
+ changedCalled := 0
+ RegisterConfChangedHandler(ConfCheckSnapTimeout, func(nv interface{}) {
+ _, ok := nv.(int)
+ assert.True(t, ok)
+ changedCalled++
+ })
+ SetIntDynamicConf("noexist-set", 2)
+ for i, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := GetIntDynamicConf(tt.args.k); got != tt.want {
+ t.Errorf("GetIntDynamicConf() = %v, want %v", got, tt.want)
+ }
+ SetIntDynamicConf(ConfCheckSnapTimeout, 2)
+ assert.Equal(t, i+1, changedCalled)
+ })
+ }
+}
+
+func TestGetStrDynamicConf(t *testing.T) {
+ type args struct {
+ k string
+ }
+ tests := []struct {
+ name string
+ args args
+ want string
+ }{
+ {"get default test_str", args{"test_str"}, "test_str"},
+ {"get changed test_str", args{"test_str"}, "test_str_changed"},
+ {"get non exist", args{"noexist"}, ""},
+ {"get after set non exist", args{"noexist-set"}, "set-noexist"},
+ }
+ changedCalled := 0
+ RegisterConfChangedHandler("test_str", func(nv interface{}) {
+ _, ok := nv.(string)
+ assert.True(t, ok)
+ changedCalled++
+ })
+ SetStrDynamicConf("noexist-set", "set-noexist")
+ for i, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := GetStrDynamicConf(tt.args.k); got != tt.want {
+ t.Errorf("GetStrDynamicConf() = %v, want %v", got, tt.want)
+ }
+ SetStrDynamicConf("test_str", "test_str_changed")
+ assert.Equal(t, i+1, changedCalled)
+ })
+ }
+}
+
+func TestIsConfSetted(t *testing.T) {
+ type args struct {
+ k string
+ }
+ tests := []struct {
+ pre func()
+ name string
+ args args
+ want bool
+ }{
+ {nil, "check default check_snap_timeout", args{"check_snap_timeout"}, true},
+ {func() { SetIntDynamicConf("check_snap_timeout", 0) }, "check empty check_snap_timeout", args{"check_snap_timeout"}, false},
+ {nil, "check non exist", args{"noexist"}, false},
+ {nil, "check empty str conf", args{"empty_str"}, false},
+ {nil, "check empty int conf", args{"empty_int"}, false},
+ {nil, "check non exist str", args{"noexist-set-str"}, false},
+ {func() { SetStrDynamicConf("noexist-set-str", "v") }, "check after set non exist str", args{"noexist-set-str"}, true},
+ }
+ SetStrDynamicConf("empty_str", "")
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if tt.pre != nil {
+ tt.pre()
+ }
+ if got := IsConfSetted(tt.args.k); got != tt.want {
+ t.Errorf("IsConfSetted() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestConfRace(t *testing.T) {
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i := 0; i < 1000000; i++ {
+ GetIntDynamicConf("check_snap_timeout")
+ GetIntDynamicConf("check_raft_timeout")
+ GetStrDynamicConf("test_str")
+ GetIntDynamicConf("noexist")
+ GetStrDynamicConf("noexist")
+ }
+ }()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i := 0; i < 1000000; i++ {
+ SetIntDynamicConf("check_snap_timeout", i)
+ SetIntDynamicConf("check_raft_timeout", i)
+ SetStrDynamicConf("test_str", strconv.Itoa(i))
+ SetIntDynamicConf("noexist", i)
+ SetStrDynamicConf("noexist", "v")
+ }
+ }()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i := 0; i < 1000000; i++ {
+ GetIntDynamicConf("check_snap_timeout")
+ GetIntDynamicConf("check_raft_timeout")
+ GetStrDynamicConf("test_str")
+ GetIntDynamicConf("noexist")
+ GetStrDynamicConf("noexist")
+ }
+ }()
+ wg.Wait()
+}
diff --git a/common/file_sync.go b/common/file_sync.go
index e5235055..f419c72c 100644
--- a/common/file_sync.go
+++ b/common/file_sync.go
@@ -1,20 +1,46 @@
package common
import (
+ "errors"
+ "time"
+
//"github.com/Redundancy/go-sync"
+ "fmt"
"log"
"os"
"os/exec"
"path/filepath"
+ "sync/atomic"
)
var runningCh chan struct{}
func init() {
- runningCh = make(chan struct{}, 2)
+ runningCh = make(chan struct{}, 3)
+}
+
+var rsyncLimit = int64(51200)
+
+const (
+ SnapWaitTimeout = time.Minute * 20
+)
+
+var ErrTransferOutofdate = errors.New("waiting transfer snapshot too long, maybe out of date")
+var ErrRsyncFailed = errors.New("transfer snapshot failed due to rsync error")
+
+func SetRsyncLimit(limit int64) {
+ atomic.StoreInt64(&rsyncLimit, limit)
}
+// make sure the file sync will not overwrite hard link file inplace. (Because the hard link file content which may be
+// used in rocksdb should not be changed )
+// So with hard link sync, we make sure we do unlink on the file before we update it. (rsync just do it)
func RunFileSync(remote string, srcPath string, dstPath string, stopCh chan struct{}) error {
+ // retrict the running number of rsync may cause transfer snapshot again and again.
+ // Because there are many partitions waiting transfer, 1->2->3->4.
+ // The later partition may wait too much time, in this case the snapshot maybe
+ // already out of date.
+ begin := time.Now()
select {
case runningCh <- struct{}{}:
case <-stopCh:
@@ -27,6 +53,9 @@ func RunFileSync(remote string, srcPath string, dstPath string, stopCh chan stru
}
}()
+ if time.Since(begin) > SnapWaitTimeout {
+ return ErrTransferOutofdate
+ }
var cmd *exec.Cmd
if filepath.Base(srcPath) == filepath.Base(dstPath) {
dir := filepath.Dir(dstPath)
@@ -40,7 +69,8 @@ func RunFileSync(remote string, srcPath string, dstPath string, stopCh chan stru
} else {
log.Printf("copy from remote :%v/%v to local: %v\n", remote, srcPath, dstPath)
// limit rate in kilobytes
- cmd = exec.Command("rsync", "-avP", "--bwlimit=25600",
+ limitStr := fmt.Sprintf("--bwlimit=%v", atomic.LoadInt64(&rsyncLimit))
+ cmd = exec.Command("rsync", "--timeout=300", "-avP", "--delete", limitStr,
"rsync://"+remote+"/"+srcPath, dstPath)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
@@ -48,6 +78,7 @@ func RunFileSync(remote string, srcPath string, dstPath string, stopCh chan stru
err := cmd.Run()
if err != nil {
log.Printf("cmd %v error: %v", cmd, err)
+ return ErrRsyncFailed
}
return err
}
diff --git a/common/limit.go b/common/limit.go
new file mode 100644
index 00000000..d38b868e
--- /dev/null
+++ b/common/limit.go
@@ -0,0 +1,42 @@
+package common
+
+import "errors"
+
+var (
+ errKeySize = errors.New("invalid key size")
+ errSubKeySize = errors.New("invalid sub key size")
+)
+
+const (
+ //max key size
+ MaxKeySize int = 10240
+
+ // subkey length for hash/set/zset
+ MaxSubKeyLen int = 10240
+
+ //max value size
+ MaxValueSize int = 1024 * 1024 * 8
+)
+
+func CheckKey(key []byte) error {
+ if len(key) > MaxKeySize || len(key) == 0 {
+ return errKeySize
+ }
+ return nil
+}
+
+func CheckSubKey(subkey []byte) error {
+ if len(subkey) > MaxSubKeyLen {
+ return errSubKeySize
+ }
+ return nil
+}
+
+func CheckKeySubKey(key []byte, field []byte) error {
+ if len(key) > MaxKeySize || len(key) == 0 {
+ return errKeySize
+ } else if len(field) > MaxSubKeyLen {
+ return errSubKeySize
+ }
+ return nil
+}
diff --git a/common/listener.go b/common/listener.go
index 8d572c54..bd686bbf 100644
--- a/common/listener.go
+++ b/common/listener.go
@@ -53,7 +53,7 @@ func (ln StoppableListener) Accept() (c net.Conn, err error) {
return nil, err
case tc := <-connc:
tc.SetKeepAlive(true)
- tc.SetKeepAlivePeriod(3 * time.Minute)
+ tc.SetKeepAlivePeriod(10 * time.Second)
return tc, nil
}
}
diff --git a/common/logger.go b/common/logger.go
index 352d56a4..5d461cc8 100644
--- a/common/logger.go
+++ b/common/logger.go
@@ -8,15 +8,39 @@ import (
"sync/atomic"
"time"
- "github.com/absolute8511/glog"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+ lumberjack "gopkg.in/natefinch/lumberjack.v2"
)
+var inTestLog bool
+
+func init() {
+ conf := zap.NewProductionConfig()
+ conf.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
+ conf.DisableStacktrace = true
+ conf.OutputPaths = []string{"stdout"}
+ l, err := conf.Build(zap.AddCallerSkip(2), zap.AddCaller())
+ if err != nil {
+ panic(err)
+ }
+
+ zapLog = l
+}
+
type Logger interface {
Output(maxdepth int, s string) error
OutputErr(maxdepth int, s string) error
OutputWarning(maxdepth int, s string) error
}
+func NewLogger() Logger {
+ if inTestLog {
+ return NewDefaultLogger("test")
+ }
+ return newZapLogger("")
+}
+
type defaultLogger struct {
logger *log.Logger
}
@@ -46,24 +70,6 @@ func (dl *defaultLogger) OutputWarning(maxdepth int, s string) error {
return nil
}
-type GLogger struct {
-}
-
-func (gl *GLogger) Output(maxdepth int, s string) error {
- glog.InfoDepth(maxdepth, s)
- return nil
-}
-
-func (gl *GLogger) OutputErr(maxdepth int, s string) error {
- glog.ErrorDepth(maxdepth, s)
- return nil
-}
-
-func (gl *GLogger) OutputWarning(maxdepth int, s string) error {
- glog.WarningDepth(maxdepth, s)
- return nil
-}
-
const (
LOG_ERR int32 = iota
LOG_WARN
@@ -358,3 +364,128 @@ func (l *MergeLogger) outputLoop() {
}
}
}
+
+var zapLog *zap.Logger
+
+func FlushZapDefault() {
+ if zapLog != nil {
+ zapLog.Sync()
+ }
+}
+
+func SetZapRotateOptions(alsoLogToStdout bool, alsoLogErrToStdErr bool, logfile string, maxMB int, maxBackup int, maxAgeDay int) {
+ stdOut := zapcore.Lock(os.Stdout)
+ stdErr := zapcore.Lock(os.Stderr)
+ errPri := zap.LevelEnablerFunc(func(lvl zapcore.Level) bool {
+ return lvl >= zapcore.ErrorLevel
+ })
+ nonErrPri := zap.LevelEnablerFunc(func(lvl zapcore.Level) bool {
+ return lvl < zapcore.ErrorLevel
+ })
+ encConf := zap.NewProductionEncoderConfig()
+ encConf.EncodeTime = zapcore.ISO8601TimeEncoder
+ enc := zapcore.NewJSONEncoder(encConf)
+ fmt.Printf("zap logger option: %v, %v, %v, %v\n", alsoLogToStdout, alsoLogErrToStdErr, logfile, maxAgeDay)
+ if logfile == "" {
+ wrap := zap.WrapCore(func(c zapcore.Core) zapcore.Core {
+ return zapcore.NewTee(
+ zapcore.NewCore(enc, stdErr, errPri),
+ zapcore.NewCore(enc, stdOut, nonErrPri),
+ )
+ })
+ zapLog = zapLog.WithOptions(wrap)
+ return
+ }
+ rotateLog := &lumberjack.Logger{
+ Filename: logfile,
+ MaxSize: 200,
+ MaxAge: 30,
+ }
+ if maxMB > 0 {
+ rotateLog.MaxSize = maxMB
+ }
+ if maxBackup > 0 {
+ rotateLog.MaxBackups = maxBackup
+ }
+ if maxAgeDay > 0 {
+ rotateLog.MaxAge = maxAgeDay
+ }
+ wrap := zap.WrapCore(func(c zapcore.Core) zapcore.Core {
+ w := zapcore.AddSync(rotateLog)
+ rotateCore := zapcore.NewCore(enc, w, zap.DebugLevel)
+ if alsoLogToStdout && alsoLogErrToStdErr {
+ return zapcore.NewTee(
+ zapcore.NewCore(enc, stdOut, nonErrPri),
+ zapcore.NewCore(enc, stdErr, errPri),
+ rotateCore,
+ )
+ } else if alsoLogToStdout {
+ return zapcore.NewTee(
+ zapcore.NewCore(enc, stdOut, nonErrPri),
+ rotateCore,
+ )
+ } else if alsoLogErrToStdErr {
+ return zapcore.NewTee(
+ zapcore.NewCore(enc, stdErr, errPri),
+ rotateCore,
+ )
+ }
+ return zapcore.NewTee(rotateCore)
+ })
+ zapLog = zapLog.WithOptions(wrap)
+}
+
+type zapLogger struct {
+ module string
+}
+
+// note: currently, the zap logger do not support buffer writer by default, so we need merge new code after the zap new
+// release published.
+func newZapLogger(module string) *zapLogger {
+ return &zapLogger{
+ module: module,
+ }
+}
+
+func (zl *zapLogger) Output(maxdepth int, s string) error {
+ if zapLog == nil {
+ return nil
+ }
+ if maxdepth <= 2 {
+ zapLog.Named(zl.module).Info(s)
+ } else {
+ zapLog.Named(zl.module).WithOptions(zap.AddCallerSkip(maxdepth - 2)).Info(s)
+ }
+ return nil
+}
+
+func (zl *zapLogger) OutputWarning(maxdepth int, s string) error {
+ if zapLog == nil {
+ return nil
+ }
+ if maxdepth == 2 {
+ zapLog.Named(zl.module).Warn(s)
+ } else {
+ zapLog.Named(zl.module).WithOptions(zap.AddCallerSkip(maxdepth - 2)).Warn(s)
+ }
+ return nil
+}
+
+func (zl *zapLogger) OutputErr(maxdepth int, s string) error {
+ if zapLog == nil {
+ return nil
+ }
+ if maxdepth == 2 {
+ zapLog.Named(zl.module).Error(s)
+ } else {
+ zapLog.Named(zl.module).WithOptions(zap.AddCallerSkip(maxdepth - 2)).Error(s)
+ }
+ return nil
+}
+
+func (zl *zapLogger) Flush() {
+ if zapLog == nil {
+ return
+ }
+ zapLog.Sync()
+}
diff --git a/common/type.go b/common/type.go
index aafd26e7..bcc89794 100644
--- a/common/type.go
+++ b/common/type.go
@@ -21,6 +21,10 @@ const (
LearnerRoleSearcher = "role_searcher"
)
+func IsRoleLogSyncer(role string) bool {
+ return strings.HasPrefix(role, LearnerRoleLogSyncer)
+}
+
var (
SCAN_CURSOR_SEP = []byte(";")
SCAN_NODE_SEP = []byte(":")
@@ -31,6 +35,7 @@ var (
ErrStopped = errors.New("the node stopped")
ErrQueueTimeout = errors.New("queue request timeout")
ErrInvalidArgs = errors.New("invalid arguments")
+ ErrInvalidTTL = errors.New("invalid expire time")
ErrInvalidRedisKey = errors.New("invalid redis key")
ErrInvalidScanType = errors.New("invalid scan type")
ErrEpochMismatch = errors.New("epoch mismatch")
@@ -91,33 +96,65 @@ const (
// do not need to care about the data expiration. Every node in the cluster should start the 'TTLChecker' of the storage system
// with this policy.
LocalDeletion ExpirationPolicy = iota
-
- // ConsistencyDeletion indicates all the expired data should be deleted through Raft, the underlying storage system should
- // not delete any data and all the expired keys should be sent to the expired channel. Only the leader should starts
- // the 'TTLChecker' with this policy.
- ConsistencyDeletion
-
//
PeriodicalRotation
+ // WaitCompact indicates that all ttl will be stored in the values and will be checked while compacting and reading
+ WaitCompact
+
UnknownPolicy
)
const (
- DefaultExpirationPolicy = "local_deletion"
+ DefaultExpirationPolicy = "local_deletion"
+ WaitCompactExpirationPolicy = "wait_compact"
+)
+
+var (
+ DefaultSnapCount = 600000
+ DefaultSnapCatchup = 500000
)
func StringToExpirationPolicy(s string) (ExpirationPolicy, error) {
switch s {
- case "local_deletion":
+ case DefaultExpirationPolicy:
return LocalDeletion, nil
- case "consistency_deletion":
- return ConsistencyDeletion, nil
+ case WaitCompactExpirationPolicy:
+ return WaitCompact, nil
default:
return UnknownPolicy, errors.New("unknown policy")
}
}
+type DataVersionT int
+
+const (
+ DefaultDataVer DataVersionT = iota
+
+ //ValueHeaderV1 will add header to kv values to store ttl or other header data
+ ValueHeaderV1
+
+ UnknownDataType
+)
+
+const (
+ ValueHeaderV1Str = "value_header_v1"
+ ValueHeaderDefaultStr = "default"
+)
+
+func StringToDataVersionType(s string) (DataVersionT, error) {
+ switch s {
+ case "":
+ return DefaultDataVer, nil
+ case ValueHeaderDefaultStr:
+ return DefaultDataVer, nil
+ case ValueHeaderV1Str:
+ return ValueHeaderV1, nil
+ default:
+ return UnknownDataType, errors.New("unknown data version type")
+ }
+}
+
type WriteCmd struct {
Operation string
Args [][]byte
@@ -174,14 +211,15 @@ type ScorePair struct {
Member []byte
}
+type WriteCommandFunc func(redcon.Command) (interface{}, error)
type CommandFunc func(redcon.Conn, redcon.Command)
-type CommandRspFunc func(redcon.Conn, redcon.Command, interface{})
+type CommandRspFunc func(redcon.Command, interface{}) (interface{}, error)
type InternalCommandFunc func(redcon.Command, int64) (interface{}, error)
type MergeCommandFunc func(redcon.Command) (interface{}, error)
type MergeWriteCommandFunc func(redcon.Command, interface{}) (interface{}, error)
type CmdRouter struct {
- wcmds map[string]CommandFunc
+ wcmds map[string]WriteCommandFunc
rcmds map[string]CommandFunc
mergeCmds map[string]MergeCommandFunc
mergeWriteCmds map[string]MergeCommandFunc
@@ -189,18 +227,23 @@ type CmdRouter struct {
func NewCmdRouter() *CmdRouter {
return &CmdRouter{
- wcmds: make(map[string]CommandFunc),
+ wcmds: make(map[string]WriteCommandFunc),
rcmds: make(map[string]CommandFunc),
mergeCmds: make(map[string]MergeCommandFunc),
mergeWriteCmds: make(map[string]MergeCommandFunc),
}
}
-func (r *CmdRouter) Register(isWrite bool, name string, f CommandFunc) bool {
- cmds := r.wcmds
- if !isWrite {
- cmds = r.rcmds
+func (r *CmdRouter) RegisterWrite(name string, f WriteCommandFunc) bool {
+ if _, ok := r.wcmds[strings.ToLower(name)]; ok {
+ return false
}
+ r.wcmds[name] = f
+ return true
+}
+
+func (r *CmdRouter) RegisterRead(name string, f CommandFunc) bool {
+ cmds := r.rcmds
if _, ok := cmds[strings.ToLower(name)]; ok {
return false
}
@@ -208,13 +251,14 @@ func (r *CmdRouter) Register(isWrite bool, name string, f CommandFunc) bool {
return true
}
-func (r *CmdRouter) GetCmdHandler(name string) (CommandFunc, bool, bool) {
+func (r *CmdRouter) GetWCmdHandler(name string) (WriteCommandFunc, bool) {
+ v, ok := r.wcmds[strings.ToLower(name)]
+ return v, ok
+}
+
+func (r *CmdRouter) GetCmdHandler(name string) (CommandFunc, bool) {
v, ok := r.rcmds[strings.ToLower(name)]
- if ok {
- return v, false, ok
- }
- v, ok = r.wcmds[strings.ToLower(name)]
- return v, true, ok
+ return v, ok
}
func (r *CmdRouter) RegisterMerge(name string, f MergeCommandFunc) bool {
diff --git a/common/util.go b/common/util.go
index 2bb4c198..32ea309b 100644
--- a/common/util.go
+++ b/common/util.go
@@ -2,7 +2,11 @@ package common
import (
"bytes"
+ "fmt"
+ "io"
"net"
+ "os"
+ "path/filepath"
"regexp"
"strconv"
"strings"
@@ -22,6 +26,7 @@ const (
APINodeAllReady = "/node/allready"
// check if the namespace raft node is synced and can be elected as leader immediately
APIIsRaftSynced = "/cluster/israftsynced"
+ APITableStats = "/tablestats"
// below api for pd
APIGetSnapshotSyncInfo = "/pd/snapshot_sync_info"
@@ -50,7 +55,8 @@ func GetIPv4ForInterfaceName(ifname string) string {
return ""
}
-var validNamespaceTableNameRegex = regexp.MustCompile(`^[a-zA-Z0-9_-]+$`)
+// do not use middle '-' which is used as join for name and partition
+var validNamespaceTableNameRegex = regexp.MustCompile(`^[a-zA-Z0-9_]+$`)
const (
InternalPrefix = "##"
@@ -60,10 +66,6 @@ func IsValidNamespaceName(ns string) bool {
return isValidNameString(ns)
}
-func IsValidTableName(tb []byte) bool {
- return isValidName(tb)
-}
-
func IsInternalTableName(tb string) bool {
return strings.HasPrefix(tb, InternalPrefix)
}
@@ -75,11 +77,13 @@ func isValidNameString(name string) bool {
return validNamespaceTableNameRegex.MatchString(name)
}
-func isValidName(name []byte) bool {
- if len(name) > 255 || len(name) < 1 {
- return false
+func CutNamesapce(rawKey []byte) ([]byte, error) {
+ index := bytes.IndexByte(rawKey, NamespaceTableSeperator)
+ if index <= 0 {
+ return nil, ErrInvalidRedisKey
}
- return validNamespaceTableNameRegex.Match(name)
+ realKey := rawKey[index+1:]
+ return realKey, nil
}
func ExtractNamesapce(rawKey []byte) (string, []byte, error) {
@@ -122,6 +126,9 @@ func DeepCopyCmd(cmd redcon.Command) redcon.Command {
}
func IsMergeScanCommand(cmd string) bool {
+ if len(cmd) < 4 {
+ return false
+ }
switch len(cmd) {
case 4:
if (cmd[0] == 's' || cmd[0] == 'S') &&
@@ -140,6 +147,15 @@ func IsMergeScanCommand(cmd string) bool {
(cmd[6] == 'n' || cmd[6] == 'N') {
return true
}
+ if (cmd[0] == 'r' || cmd[0] == 'R') &&
+ (cmd[1] == 'e' || cmd[1] == 'E') &&
+ (cmd[2] == 'v' || cmd[2] == 'V') &&
+ (cmd[3] == 's' || cmd[3] == 'S') &&
+ (cmd[4] == 'c' || cmd[4] == 'C') &&
+ (cmd[5] == 'a' || cmd[5] == 'A') &&
+ (cmd[6] == 'n' || cmd[6] == 'N') {
+ return true
+ }
case 8:
if (cmd[0] == 'f' || cmd[0] == 'F') &&
(cmd[1] == 'u' || cmd[1] == 'U') &&
@@ -151,6 +167,19 @@ func IsMergeScanCommand(cmd string) bool {
(cmd[7] == 'n' || cmd[7] == 'N') {
return true
}
+ case 10:
+ if (cmd[0] == 'a' || cmd[0] == 'A') &&
+ (cmd[1] == 'd' || cmd[1] == 'D') &&
+ (cmd[2] == 'v' || cmd[2] == 'V') &&
+ (cmd[3] == 'r' || cmd[3] == 'R') &&
+ (cmd[4] == 'e' || cmd[4] == 'E') &&
+ (cmd[5] == 'v' || cmd[5] == 'V') &&
+ (cmd[6] == 's' || cmd[6] == 'S') &&
+ (cmd[7] == 'c' || cmd[7] == 'C') &&
+ (cmd[8] == 'a' || cmd[8] == 'A') &&
+ (cmd[9] == 'n' || cmd[9] == 'N') {
+ return true
+ }
}
return false
@@ -171,6 +200,9 @@ func IsFullScanCommand(cmd string) bool {
}
func IsMergeIndexSearchCommand(cmd string) bool {
+ if len(cmd) != len("hidx.from") {
+ return false
+ }
return strings.ToLower(cmd) == "hidx.from"
}
@@ -194,3 +226,132 @@ func IsMergeCommand(cmd string) bool {
return false
}
+
+func BuildCommand(args [][]byte) redcon.Command {
+ // build a pipeline command
+ bufSize := 128
+ if len(args) > 5 {
+ bufSize = 256
+ }
+ buf := make([]byte, 0, bufSize)
+ buf = append(buf, '*')
+ buf = append(buf, strconv.FormatInt(int64(len(args)), 10)...)
+ buf = append(buf, '\r', '\n')
+
+ poss := make([]int, 0, len(args)*2)
+ for _, arg := range args {
+ buf = append(buf, '$')
+ buf = append(buf, strconv.FormatInt(int64(len(arg)), 10)...)
+ buf = append(buf, '\r', '\n')
+ poss = append(poss, len(buf), len(buf)+len(arg))
+ buf = append(buf, arg...)
+ buf = append(buf, '\r', '\n')
+ }
+
+ // reformat a new command
+ var ncmd redcon.Command
+ ncmd.Raw = buf
+ ncmd.Args = make([][]byte, len(poss)/2)
+ for i, j := 0, 0; i < len(poss); i, j = i+2, j+1 {
+ ncmd.Args[j] = ncmd.Raw[poss[i]:poss[i+1]]
+ }
+ return ncmd
+}
+
+// This will copy file as hard link, if failed it will failover to do the file content copy
+// Used this method when both the src and dst file content will never be changed to save disk space
+func CopyFileForHardLink(src, dst string) error {
+ // open source file
+ sfi, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+ if !sfi.Mode().IsRegular() {
+ return fmt.Errorf("non-regular source file %s (%q)", sfi.Name(), sfi.Mode().String())
+ }
+
+ // open dest file
+ dfi, err := os.Stat(dst)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return err
+ }
+ // file doesn't exist
+ err := os.MkdirAll(filepath.Dir(dst), DIR_PERM)
+ if err != nil {
+ return err
+ }
+ } else {
+ if !(dfi.Mode().IsRegular()) {
+ return fmt.Errorf("non-regular destination file %s (%q)", dfi.Name(), dfi.Mode().String())
+ }
+ if os.SameFile(sfi, dfi) {
+ return nil
+ }
+ // link will failed if dst exist, so remove it first
+ os.Remove(dst)
+ }
+ if err = os.Link(src, dst); err == nil {
+ return nil
+ }
+ err = copyFileContents(src, dst)
+ if err != nil {
+ return err
+ }
+ os.Chmod(dst, sfi.Mode())
+ return nil
+}
+
+// copyFileContents copies the contents.
+// all contents will be replaced by the source .
+func copyFileContents(src, dst string) error {
+ srcFile, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer srcFile.Close()
+ // we remove dst to avoid override the hard link file content which may affect the origin linked file
+ err = os.Remove(dst)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return err
+ }
+ }
+ dstFile, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ cerr := dstFile.Close()
+ if err == nil {
+ err = cerr
+ }
+ }()
+
+ if _, err = io.Copy(dstFile, srcFile); err != nil {
+ return err
+ }
+ err = dstFile.Sync()
+ return err
+}
+
+func CopyFile(src, dst string, override bool) error {
+ sfi, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+ if !sfi.Mode().IsRegular() {
+ return fmt.Errorf("copyfile: non-regular source file %v (%v)", sfi.Name(), sfi.Mode().String())
+ }
+ _, err = os.Stat(dst)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return err
+ }
+ } else {
+ if !override {
+ return nil
+ }
+ }
+ return copyFileContents(src, dst)
+}
diff --git a/common/util_test.go b/common/util_test.go
new file mode 100644
index 00000000..23073c00
--- /dev/null
+++ b/common/util_test.go
@@ -0,0 +1,23 @@
+package common
+
+import "testing"
+
+func TestIsValidNamespace(t *testing.T) {
+
+ tests := []struct {
+ name string
+ args string
+ want bool
+ }{
+ {"test", "test", true},
+ {"test", "test_$%", false},
+ {"test", "", false},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := IsValidNamespaceName(tt.args); got != tt.want {
+ t.Errorf("IsValidTableName() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/default.conf b/default.conf
index 08089199..1cc82238 100644
--- a/default.conf
+++ b/default.conf
@@ -1,7 +1,7 @@
{
"server_conf": {
"cluster_id": "test-default-alpha-1",
- "etcd_cluster_addresses": "http://etcd0.example.com:2379,http://etcd1.example.com:2379",
+ "etcd_cluster_addresses": "http://127.0.0.1:2379",
"broadcast_interface":"lo0",
"data_dir":"./test1",
"data_rsync_module": "zanredisdb",
@@ -11,6 +11,8 @@
"election_tick": 30,
"tick_ms": 200,
"keep_wal": 20,
+ "default_snap_count": 100000,
+ "default_snap_catchup": 50000,
"local_raft_addr":"http://0.0.0.0:12379",
"tags": {"ssd":"", "dc_info":"dc1"},
"rocksdb_opts": {
@@ -19,6 +21,7 @@
"block_cache":0,
"use_shared_cache": true,
"cache_index_and_filter_blocks": false,
+ "optimize_filters_for_hits": false,
"write_buffer_size": 0,
"max_write_buffer_number": 0,
"min_write_buffer_number_to_merge":0,
diff --git a/dist.sh b/dist.sh
index b31d4a70..849da44d 100755
--- a/dist.sh
+++ b/dist.sh
@@ -11,7 +11,8 @@
set -e
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-export GOPATH=$DIR/.godeps:$(go env GOPATH)
+# this is used by CI which can not use dep ensure
+export GOPATH=$(go env GOPATH):$DIR/.godeps
echo $GOPATH
arch=$(go env GOARCH)
@@ -19,13 +20,18 @@ os=$(go env GOOS)
version=$(awk '/VERBINARY\?/ {print $NF}' < $DIR/Makefile | sed 's/"//g')
goversion=$(go version | awk '{print $3}')
+echo $ROCKSDB
+
echo "... building v$version for $os/$arch"
BUILD=$(mktemp -d -t zankvXXXXXX)
TARGET="zankv-$version.$os-$arch.$goversion"
LATEST="zankv-latest.$os-$arch.$goversion"
-GOOS=$os GOARCH=$arch \
+GOOS=$os GOARCH=$arch ROCKSDB=$ROCKSDB \
make DESTDIR=$BUILD PREFIX=/$TARGET install
pushd $BUILD
+if [ "$os" == "linux" ]; then
+ cp -r $TARGET/bin $DIR/dist/docker/
+fi
tar czvf $TARGET.tar.gz $TARGET
mv $TARGET.tar.gz $DIR/dist/
mv $TARGET $LATEST
@@ -35,3 +41,9 @@ rm -rf $LATEST
popd
make clean
rm -r $BUILD
+
+IMAGE_URL="image.example.com"
+if [ "$os" == "linux" ]; then
+ docker build -t $IMAGE_URL/youzan/zankv:v$version .
+ docker push $IMAGE_URL/youzan/zankv:v$version
+fi
diff --git a/doc/design.md b/doc/design.md
index 3d14cb7c..6b75d6e1 100644
--- a/doc/design.md
+++ b/doc/design.md
@@ -1 +1,169 @@
# Design
+
+## 整体架构
+
+### 集群架构
+
+![arch](resource/zankv-arch.png)
+
+整个集群由placedriver + 数据节点datanode + etcd + rsync组成. 各个节点的角色如下:
+
+PD node: 负责数据分布和数据均衡, 协调集群里面所有的zankv node节点, 将元数据写入etcd
+
+datanode: 负责存储具体的数据
+
+etcd: 负责存储元数据, 数据分布情况以及其他用于协调的元数据
+
+rsync: 用于传输snapshot备份文件
+
+### 数据节点架构
+
+![datanode](resource/zankv-datanode.png)
+
+数据节点datanode有多个不同的分区组成, 每个分区是一个raftgroup. 每个分区由协议层redis, 日志同步层raftlog, 数据映射层和数据存储层构成.
+
+redis协议层: 负责读取和解析客户端的redis命令, 并提交到raft层同步
+
+raft日志同步层: 负责raft副本同步, 保证数据一致性
+
+数据映射层: 负责将redis数据结构编码映射成db层的kv形式, 保证符合redis的数据结构语意
+
+数据存储层: 负责封装rocksdb的kv操作.
+
+数据节点会往etcd注册节点信息, 并且定时更新, 使得placedriver可以通过etcd感知自己.
+
+
+### placedriver节点
+
+placedriver负责指定的数据分区的节点分布, 还会在某个数据节点异常时, 自动重新分配数据分布. 总体上负责调度整个集群的数据节点的元数据. placedriver本身并不存储任何数据, 关于数据分布的元数据都是存储在etcd集群, 可以有多个placedriver作为读负载均衡, 多个placedriver通过etcd选举来产生一个master进行数据节点的分配和迁移任务. placedriver节点会watch集群的节点变化来感知整个集群的数据节点变化.
+
+placedriver提供了数据分布查询接口, 供客户端sdk查询对应的数据分区所在节点.
+
+#### 数据分区的分布
+
+目前数据分区算法是通过hash分片实现的, 分区算法是namespace级别的, 因此不同namespace可以使用不同的分区算法. 某条数据请求读写hash分区的过程如下:
+
+- 创建namespace时指定分区总数, 比如 16
+- 客户端对redis命令的主key做hash得到一个整数值, 然后对分区总数取模, 得到一个分区id
+- 根据分区id, 查找分区id和数据节点映射表, 得到数据节点
+- 客户端将命令发送个指定的数据节点服务端
+- 数据节点收到命令后, 根据分区算法做验证, 在数据节点内部发送给本地节点拥有指定分区id的数据分区, 如果本地没有对应的分区id, 则返回错误.
+- 数据节点内部本地数据分区处理具体的redis命令
+
+![hash partition](resource/hash-partition.png)
+
+其中, 分区到数据节点的映射表, 会根据算法生成后写入etcd, 生成算法需要保证如下几点
+
+- 每个分区的不同副本必须分布在不同节点
+- 每个节点拥有尽可能平均数量的leader副本, 以及尽可能的保证分区follower副本的平均
+- 尽可能的减少节点异常后的副本数据迁移
+- 机房感知模式部署时, 还需要将分区的副本分散到不同机房
+- 其他特殊部署需求, 通过对节点打tag的方式, 过滤掉一部分节点, 限制部署到特定机器上
+
+为了满足以上目标, 在计算映射表时通过以下方式:
+
+- 将存活的正常节点过滤掉不符合tag的条件后, 按照机房分类放到不同的列表中, 如 a:[1,3,2], b:[2,3,1]
+- 相同机房内的节点按照名称排序, a:[1,2,3], b:[1,2,3]
+- 不同机房的节点按照顺序交叉放入候选节点数组中 [a1, b1, a2, b2, a3, b3]
+- 按照分区从小到大分配leader和follower节点,分配的起始节点位置依次顺延. 分区0->[a1(leader), b1(follower), a2(follower)], 分区1->[b1, a2, b2], 分区2->[a2, b2, a3], 分区3->[b2, a3, b3], 分区4->[a3, b3, a1], 分区5->[b3, a1, b1]
+- 假如b1节点异常, 新的候选节点变成 [a1, b2, a2, b3, a3], 此时映射表会更新成, 分区0->[a1(leader), b2(follower), a2(follower)], 分区1->[b2, a2, b3], 分区2->[a2, b3, a3], 分区3->[b3, a3, a1], 分区4->[a3, a1, b2], 分区5->[a1, b2, a2].
+
+
+#### 数据平衡
+
+在数据节点发生变化时, 需要动态的修改分区到数据节点的映射表, 动态调整映射表的过程就是数据平衡的过程. 数据节点变化时会触发etcd的watch事件, placedriver会实时监测数据节点变化, 来判断是否需要做数据平衡. 为了避免影响线上服务, 可以设置数据平衡的允许时间区间.
+
+为了避免频繁发生数据迁移, 节点发生变化后, 会根据紧急情况, 判断数据平衡的必要性. 特别是在数据节点升级过程中, 可以避免不必要的数据迁移. 考虑以下几种情况:
+
+- 新增节点: 平衡优先级最低, 仅在允许的时间区间并且没有异常节点时尝试迁移数据到新节点
+- 少于半数节点异常: 等待一段时间后, 尝试将异常节点的副本数据迁移到其他节点.
+- 超过半数节点异常: 可能发生网络分区, 此时不会进行自动迁移, 如果确认不是网络分区, 可以强制调整集群稳定节点数触发迁移.
+- 可用于分配的节点数不足: 假如副本数配置是3, 但是可用节点少于3个, 则不会发生数据迁移
+
+稳定集群节点数默认只会增加, 每次发现新的数据节点, 就自动增加, 节点异常不会自动减少. 如果稳定集群节点数需要减少, 则需要调用API设置:
+```
+POST /stable/nodenum?number=xx
+```
+维护稳定集群节点总数, 是为了避免集群网络分区时不必要的数据迁移. 当集群正常节点数小于等于稳定节点数一半时, 自动数据迁移将停止.
+
+## HA流程
+
+### 服务端
+
+- 正常下线时, 当前节点通过raft转移自己节点拥有的分区的leader节点, 自动摘除本机节点
+- 新被选举出来的leader自动重新注册到etcd, 每个分区将当前最新的leader信息更新到etcd元数据
+- placedriver监控leader节点变化, 一旦触发watch, 则自动从etcd刷新最新的leader返回给客户端
+- 如果有客户端读写请求发送到非leader节点上, 服务端会返回特定的集群变更错误. 客户端刷新集群数据后重试
+
+### go-sdk处理
+
+- sdk启动时会启动一个定时lookup线程, 线程会定时的从placedriver服务中查询最新的leader信息, 并缓存到本地
+- 读写操作时, 会从当前缓存的节点中找到对应的分区leader连接
+- 通过连接发起读写操作时, 如果服务端返回了特定的错误信息, 则判断是否集群发生变更, 如果发生集群变更, 则立即触发查询最新leader信息, 并等待自动重试, 一直等到重试成功或者超过指定的超时时间和次数.
+- 定时placedriver服务查询线程会剔除已经摘除的节点连接
+
+## 动态namespace
+
+## 分布式索引
+
+由于分布式系统中, 一个索引会有多个分片, 为了保证多个分片之间的数据一致性, 需要协调多个分片的索引创建流程. 基本流程如下:
+
+![index-create](resource/zankv-index-create.png)
+
+## 跨机房集群
+zankv目前支持两种跨机房部署模式, 分别适用于不同的场景
+
+### 单个跨多机房集群模式
+
+此模式, 部署一个大集群, 并且都是同城机房, 延迟较小, 一般是3机房模式. 部署此模式, 需要保证每个副本都在不同机房均匀分布, 从而可以容忍单机房宕机后, 不影响数据的读写服务, 并且保证数据的一致性.
+
+部署时, 需要在配置文件中指定当前机房的信息, 用于数据分布时感知机房信息.增加如下配置项:
+
+```
+"tags": {"dc_info":"dc1"}
+```
+不同机房的数据节点, 使用不同的dc_info配置, placedriver进行副本配置时, 会保证每个分区的几个副本都均匀分布在不同的dc中.
+
+跨机房的集群, 通过raft来完成各个机房副本的同步, 发生单机房故障时, 由于另外2个机房拥有超过一半的副本, 因此raft的读写操作可以不受影响, 且数据保证一致. 等待故障机房恢复后, raft自动完成故障期间的数据同步, 使得故障机房数据在恢复后能保持同步.此模式在故障发生和恢复时都无需任何人工介入, 保证单机房故障的可用性的同时, 数据一致性也得到保证.
+
+### 多个机房内集群同步模式
+
+如果是异地机房, 使用跨机房单集群部署方式, 可能会带来较高的同步延迟, 使得读写的延迟都大大增加. 为了优化延迟问题, 可以使用异地机房同步模式. 由于异地机房是后台异步同步的, 异地机房不影响本地机房的延迟, 但同时引入了数据同步滞后的问题, 在故障时可能会发生数据不一致的情况.
+
+此模式的部署方式稍微复杂一些, 具体的部署和故障处理可以参考运维指南.基本原理是通过在异地机房增加一个raft learner节点, 通过raft异步的拉取log然后重放到异地机房集群. 由于每个分区都是一个独立的raft group, 因此分区内是串行回放, 各个分区间是并行回放raft log. 异地同步机房默认是只读的, 如果主机房发生故障需要切换时, 可能发生部分数据未同步, 需要在故障恢复后根据raft log进行人工修复.
+
+## 数据过期功能设计
+
+为了满足各种场景下的业务需求,ZanKV设计了多种不同的数据过期策略。在不同的数据过期策略下,ZanKV对数据过期有不同的支持和表现。
+目前,ZanKV支持的数据过期策略有:
+- 一致性同步过期
+- 非一致性本地删除
+
+与redis不同的是,ZanKV只支持秒级的数据过期,不支持pexpire指令(毫秒级别的数据过期)。
+
+### 一致性同步过期
+
+当ZanKV集群的数据过期策略配置为:一致性同步过期时。所有的数据过期操作都由leader发起,通过raft协议进行删除操作。
+
+在实际操作中,过期数据处理逻辑层每间隔1s会调用存储层接口,获取当前已经过期的数据key和类型。为减少集群node间网络交互的次数,逻辑层会对相同类型的key通过一条指令、一次raft交互进行批量删除。
+
+使用该策略时,数据过期删除需要集群节点之间频繁的网络交互,在大量数据过期的情况下,会大大增加网络的负担和leader节点的压力。该策略适用于:过期数据量不大,业务方依赖于数据的TTL进行业务逻辑处理。
+
+但是,在一般情况下,业务方并不会依赖于数据的TTL进行业务逻辑处理。相反的,业务方关注的是,数据需要保持一定的时间(半年、三个月、一周等),过了保存期,这些数据可以自动失效、删除,即在业务上数据已经没有用了,业务方不会再次访问或者关注这些数据,存储集群可以自行删除数据。在这种情况下,采用非一致性本地删除策略更加的合适。
+
+### 非一致性本地删除
+
+在非一致性本地删除的策略下,数据删除操作由各个集群节点自己进行,删除操作不通过raft协议进行。
+
+具体的:数据删除操作由存储层自动进行,数据处理逻辑层不需要参与。ZanKV集群中的每个节点每间隔5min会扫描一次过期数据,并对扫描出的待删除数据进行本地直接删除。
+
+相比于一致性同步过期策略,该策略具有以下特性:
+
+更大量的数据过期支持。过期数据删除不需要集群节点之间的通信,可以避免在大量数据过期情况下造成的网络和性能处理瓶颈,进而支撑更大量的数据过期服务。
+具有更高的数据吞吐量。该策略考虑到用户不关注于具体的TTL时间,优化了存储数据的编码格式,减少了数据存储时的写放大,提供更好的性能支持。
+数据过期扫描间隔为5min。保证数据一定不会被提前删除,但是不保证数据删除的实时性,业务方不应该依赖于数据是否还存在进行业务逻辑处理。
+用户无法通过TTL指令获取数据生存时间,也不支持Persist指令对过期数据进行持久化。这是为了提高吞吐量,提供更大量的数据过期支持所做的trade off。
+
+### 前缀清理删除
+
+非一致性删除虽然提供了更好的性能, 但是面对非常海量的过期数据时, 依然会产生大量的本地删除操作, 增加底层rocksdb的压力, 为了进一步减少删除过期的影响, 对于具有时效性特点的数据, ZanKV支持按照前缀清理. 这种模式下, 业务方的数据都是时效性的, 比如监控数据或者日志数据, 写入时业务会在key前面加上时间戳前缀. 使用删除API的时候, 指定业务前缀, 可以删除特定时间内的所有数据, 大大提高删除的效率. 前缀删除的API底层使用了rocksdb的DelRange方法, 此方法相比大量Delete操作, 对rocksdb的压力大大减少.
\ No newline at end of file
diff --git a/doc/examples.md b/doc/examples.md
deleted file mode 100644
index df635b4e..00000000
--- a/doc/examples.md
+++ /dev/null
@@ -1 +0,0 @@
-# Examples
diff --git a/doc/getting_started.md b/doc/getting_started.md
index 10124373..5cc0e17a 100644
--- a/doc/getting_started.md
+++ b/doc/getting_started.md
@@ -9,37 +9,32 @@ apt-get install libsnappy1 libsnappy-dev (for Debian/Ubuntu)
brew install snappy (for Mac)
-Build the rocksdb
+Build the rocksdb with jemalloc
git clone https://github.com/absolute8511/rocksdb.git
cd rocksdb
-git checkout v5.8.8-share-rate-limiter
-make static_lib
+git checkout v6.4.6-patched
+PORTABLE=1 USE_SSE=1 USE_PCLMUL=1 WITH_JEMALLOC_FLAG=1 JEMALLOC=1 make static_lib
-Install the dependency:
+Install the dependency (for old go version only, if using go1.13+, it will be done in go modules):
-CGO_CFLAGS="-I/path/to/rocksdb/include" CGO_LDFLAGS="-L/path/to/rocksdb -lrocksdb -lstdc++ -lm -lsnappy -lrt" go get github.com/absolute8511/gorocksdb
+CGO_CFLAGS="-I/path/to/rocksdb/include" CGO_LDFLAGS="-L/path/to/rocksdb -lrocksdb -lstdc++ -lm -lsnappy -lrt -ljemalloc" go get github.com/youzan/gorocksdb
-CGO_CFLAGS="-I/path/to/rocksdb/include" CGO_LDFLAGS="-L/path/to/rocksdb -lrocksdb -lstdc++ -lm -lsnappy" go get github.com/absolute8511/gorocksdb (for MacOS)
+CGO_CFLAGS="-I/path/to/rocksdb/include" CGO_LDFLAGS="-L/path/to/rocksdb -lrocksdb -lstdc++ -lm -lsnappy -ljemalloc" go get github.com/youzan/gorocksdb (for MacOS)
-use the `gpm` to install other dependencies
-
-wget https://raw.githubusercontent.com/pote/gpm/v1.4.0/bin/gpm && chmod +x gpm && sudo mv gpm /usr/local/bin
-gpm get
-
+use the `dep ensure` to install other dependencies, or use go modules for go1.13+
-
-Build zankv and placedriver from the source (only support go version 1.7.4+, gcc 4.9+ or xcode-command-line-tools on Mac):
+Build zankv and placedriver from the source (only support go version 1.10.8+, gcc 4.9+ or xcode-command-line-tools on Mac):
-make
+ROCKSDB=/path/to/rocksdb make
If you want package the binary release run the scripts
./pre-dist.sh
-./dist.sh
+ROCKSDB=/path/to/rocksdb ./dist.sh
## Deploy
@@ -124,6 +119,12 @@ auto_balance_and_migrate = true
}
}
```
+You should at least 3 zankv nodes if you want make replicator=3.
+
+ * Init a namespace by sending the create the namespace API to placedriver
+ ```
+curl -X POST '127.0.0.1:13801/cluster/namespace/create?namespace=test_p4&partition_num=4&replicator=3'
+ ```
## API
@@ -134,7 +135,8 @@ placedriver has several HTTP APIs to manager the namespace
- list the placedriver nodes: `GET /listpd`
- query the namespace meta info: `GET /query/namespace_name`
- create the namespace (handle only by leader) : `POST /cluster/namespace/create?namespace=test_p16&partition_num=16&replicator=3`
-- delete the namespace (handle only by leader): `POST /cluster/namespace/delete?namespace=test_p16&partition=**`
+- create the namespace with the new dynamic ttl support (handle only by leader) : `POST /cluster/namespace/create?namespace=test_p16&partition_num=16&replicator=3&data_version=value_header_v1&expiration_policy=wait_compact`
+- delete the namespace (handle only by leader): `DELETE /cluster/namespace/delete?namespace=test_p16&partition=**`
storage server HTTP APIs for stats:
@@ -156,4 +158,4 @@ Golang client SDK : [client-sdk] , a redis proxy can be deployed
based on this golang sdk if you want use the redis client in other language.
-[client-sdk]: https://github.com/absolute8511/go-zanredisdb
+[client-sdk]: https://github.com/youzan/go-zanredisdb
diff --git a/doc/operation-guide.md b/doc/operation-guide.md
new file mode 100644
index 00000000..6fc450e5
--- /dev/null
+++ b/doc/operation-guide.md
@@ -0,0 +1,457 @@
+# 运维指南
+
+## 部署配置说明
+
+### 默认配置说明
+
+zankv数据节点配置参数说明:
+```
+{
+ "server_conf": {
+ "cluster_id": "test-qa-alpha-1", ### 集群id, 用于区分不同的集群
+ "etcd_cluster_addresses": "http://127.0.0.1:2379,http://127.0.0.2:2379", ### etcd 集群地址列表
+ "broadcast_interface": "eth0", ### 绑定的网卡, 使用网卡可以避免每台机子配置绑定ip
+ "broadcast_addr": "", ### 绑定ip, 推荐使用网卡配置, 此项可以留空
+ "redis_api_port": 12381, ### redis协议监听地址端口
+ "http_api_port": 12380, ### http api 端口
+ "grpc_api_port": 12382, ### grpc内部集群通信端口
+ "profile_port": 0, ### debug数据端口, 默认是6666
+ "data_dir": "/data/zankv", ### 数据目录
+ "data_rsync_module": "zankv", ### rsync 模块, 名字必须保持和rsync配置吻合, rsync模块配置的数据目录路径必须和本配置的数据目录一致
+ "local_raft_addr": "http://0.0.0.0:12379", ### 内部raft 传输层监听地址
+ "tags": null, ### tag属性, 用于标识机器属性, rack-aware会使用此配置
+ "syncer_write_only": false, ### 此配置用于跨机房多集群部署, 默认不需要
+ "syncer_normal_init": false, ### 此配置用于跨机房数据同步初始化, 默认不需要
+ "learner_role": "", ### 配置raft learner角色, 用于跨机房同步, 默认不需要
+ "remote_sync_cluster": "", ### 跨机房集群的备机房地址, 默认不需要
+ "state_machine_type": "", ### 状态机类型, 用于未来区分不同的状态机, 暂时不需要配置,目前仅支持rocksdb
+ "rsync_limit": 0, ### 限制rsync传输的速度, 一般不需要配置, 会使用默认限制
+ "election_tick": 30, ### raft leader失效间隔, 建议使用默认值
+ "tick_ms": 200, ### raft 心跳包间隔, 建议使用默认值
+ "use_redis_v2": true, ### 是否在raft entry里面启用新的redis序列化, 默认不开启, 0.8.4以上版本支持, 不兼容低版本, 开启后可以提升写入性能
+ "log_dir": "/data/logs/zankv", ### 设置glog目录, 0.8.4以上版本支持
+ "use_rocks_wal": false, ### 是否使用rocksdb存储raft wal日志, 默认不启用, 启用后可以优化内存占用, 性能会有一定影响
+ "shared_rocks_wal": true, ### 是否在不同raft分组使用共享的rocksdb来存储wal, 默认不启用
+ "wal_rocksdb_opts": {
+ "use_shared_cache":true,
+ "use_shared_rate_limiter":true,
+ "rate_bytes_per_sec":80000000,
+ "max_write_buffer_number":10,
+ "optimize_filters_for_hits":true ### 用于存储wal时, 可以配置此项提升性能, 因为wal查询都会命中
+ },
+ "rocksdb_opts": { ### rocksdb参数参见调优
+ "verify_read_checksum": false,
+ "use_shared_cache": true,
+ "use_shared_rate_limiter": true
+ },
+ "max_scan_job": 0 ### 允许的最大scan任务并行数量, 一般使用内置的默认值
+ }
+}
+
+```
+
+namespace创建参数建议3副本, 分区数可以预估集群最大规模, 一般初始化设置为集群最大机器数*4, 预留部分扩容能力. 一般来说集群建议最小规模是4台机器, 16分区. 理论上可以通过加机器扩容到 50TB容量左右. 如果需要继续扩容, 可以再创建更多分区的新namespace来完成.
+
+注意如果需要在较少的机器上创建较多分区数的namespace, 需要适当调小 rocksdb的 write_buffer_size参数以及namespace的snap_count参数, 以减少每个分区的内存占用
+
+### rocksdb调优
+
+由于底层使用rocksdb存储, 为了更好的适应各种硬件配置, 需要了解部分rocksdb的配置, 并根据具体环境做配置优化.
+具体可配置的参数含义如下:
+```
+{
+ "verify_read_checksum": false, ### 读是否校验crc, 禁用可以减少CPU的使用, 大部分情况下可以禁用
+ "block_size": 0, ### block大小, 一般使用默认值即可, 建议8KB~64KB之间
+ "block_cache": 0, ### 建议使用默认值, 并且启用 use_shared_cache=true, 内部会自动根据操作系统可用内存调整.
+ "cache_index_and_filter_blocks": true, ### 是否将索引和filter放入block cache, 建议小内存机器设置true, 避免数据增长占用过多内存, 大内存>64GB内存可以使用false, 会将block索引都缓存在内存, 加速访问
+ // 以下几个参数设置时, 注意保证 level0_file_num_compaction_trigger * write_buffer_size * min_write_buffer_number_tomerge = max_bytes_for_level_base减少写放大效应, 建议使用默认值, 如果机器内存较小, 可以适当等比例缩小
+ "write_buffer_size": 0,
+ "min_write_buffer_number_to_merge": 0,
+ "level0_file_num_compaction_trigger": 0,
+ "max_bytes_for_level_base": 0,
+ "max_write_buffer_number": 4, #### 建议 2~6之间, 根据操作系统不同内存大小调整
+ "target_file_size_base": 0, ### 建议使用默认值
+ "max_background_flushes": 0, ### 建议使用默认值, rocksdb刷盘线程
+ "max_background_compactions": 0, ### 建议使用默认值, rocksdb compact线程
+ "min_level_to_compress": 0, ### 建议使用默认值
+ "max_mainifest_file_size": 0, ### 建议使用默认值
+ "rate_bytes_per_sec": 20000000, ### rocksdb后台IO操作限速, 建议设置避免IO毛刺, 建议限速 20MB ~ 50MB 之间
+ "use_shared_cache": true, ### 建议true, 所有rocksdb实例共享block cache
+ "engine_type": "", ### 支持rocksdb和pebble两种, 默认使用rocksdb
+ "use_shared_rate_limiter": true ### 建议true, 所有实例共享限速指标
+}
+```
+
+
+## 创建namespace
+
+往placedriver的leader节点发送如下API可以动态创建新的namespace
+
+```
+POST /cluster/namespace/create?namespace=test_p16&partition_num=16&replicator=3&data_version=value_header_v1&expiration_policy=wait_compact
+
+data_version: 存储的数据版本, 不同版本序列化格式会有区别, namespace初始化后不能动态修改, 默认使用老版本, value_header_v1是目前唯一的新版本用于支持精确过期功能
+expiration_policy: 配置过期策略, 默认使用非精确过期, 新版本支持wait_compact精确过期策略, 此策略下过期的数据不会返回给客户端, 过期数据的真实清理会等待compact时再判断是否需要清理.
+```
+
+关于ttl的说明:
+
+默认使用非精确ttl, 非精确ttl使用的是报错过期key列表并且定期扫描的策略, 因此只能支持设置一次过期时间, 并且过期精度取决于扫描周期(默认5分钟).
+
+如果需要类似redis的精确ttl(秒级)支持, 可以使用新的`wait_compact`过期策略, 这种过期策略会将过期时间和key的元数据放到一起, 每次读写的时候会检查是否已经过期, 从而实现更加精确的过期判断能力. 由于只是检查过期的元数据, 并不会真实删除, 因此需要等待底层compact的时候才能物理删除, 理论上空间回收会滞后. 注意, 新的过期策略必须使用新的数据版本`value_header_v1`
+
+## 慢写动态限流说明
+
+v0.8新版本开始, 增加了慢写入动态限流和预排队功能, 用于减少某些慢写入命令对其他命令的rt影响. 内部会周期性汇总写入命令在底层DB的rt耗时, 超过一定阈值后, 会被判断为不同程度的慢, 针对不同程度的慢写入, 会使用不同的预排队队列进行排队, 队列长度也会不同, 从而控制这些慢写入命令同时进入raft请求的个数, 来避免这些慢写入在raft apply队列排队写入时占用过多时间, 从而影响队列里面其他的写入rt. 如果超过一定的阈值, 还会触发直接拒绝限流.
+
+动态限流可以通过配置关闭 `POST /conf/set?type=int&key=slow_limiter_switch&value=0`
+
+
+## 监控项说明
+
+除了默认的stats接口, 还有如下几个新增的监控数据, 可以查看内部状态.
+
+### Prometheus
+
+通过数据节点的HTTP端口的`/metric`, 可以查看更多的监控数据, 主要有一下几个大类:
+
+- 延迟监控: 包括db存储层的写入rt, raft各个阶段的rt, 集群端到端响应rt等
+- 慢写监控: 包括表+命令纬度的底层DB慢写入统计, 统计历史rt分布, 慢写限流次数, 慢写预排队次数, 慢写预排队时间分布
+- 事件监控: 包括各类事件的发送次数统计, 比如leader切换事件, 各类错误发生次数统计
+- 队列监控: 包括各类队列深度的监控, 比如raft待提交队列深度, 状态机待apply队列深度, 网络层待传输队列深度等
+- 集合大小监控: 统计各种集合类型的集合大小分布, 用于判断大集合在各个表的分布情况
+
+### topn
+
+通过HTTP端口的`/stats?table_detail=true`, 可以查看更详细的key级别统计的topn数据, 包括:
+
+- 按写入次数统计的topn写入key
+- 按集合元素大小排序的对应的topn 集合key
+
+统计结果按照一定的次数和最近时间综合进行淘汰
+
+### SLOW_LOGS
+
+部分重要影响集群性能的操作会以慢查日志的方式进行输出, 用于监控和排查一些业务不合理的数据使用导致的性能隐患. 目前有以下几种情况会输出慢日志:
+
+- 写入命令在存储层执行时间超过一定的阈值
+- 当某个集合元素大小超过一定的阈值
+
+慢日志可以通过API `POST /slowlog/set?loglevel=0` 单独设置输出级别, 不同级别的输出阈值不同. (-1表示关闭)
+
+## 操作和接口说明
+
+rsync进程:用于异常恢复时传输zankv的备份数据
+
+传输速度可以动态配置限速:
+
+```json
+POST /rsynclimit?limit=80000
+```
+
+集群的一些统计数据, 可以分别从placedriver和zankv的接口获取,统计数据接口如下:
+
+placedriver API
+
+```
+/cluster/stats
+获取集群总体状态, 标识集群是否稳定, 此请求需要发送到leader节点
+
+/datanodes
+获取存活的数据节点(zankv)信息, 存活节点数量有变更需要warning级别报警
+
+POST /cluster/node/remove?remove_node=xxx
+下线不用的节点, xxx信息使用获取节点数信息返回的node_id串替换, 下线节点会触发数据迁移, 等待迁移完成后, 观察log输出再停掉下线的节点.
+
+GET /namespaces
+获取集群的namespace列表
+
+GET /listpd
+获取集群pd节点列表
+
+GET /query/:namespace?epoch=xxx&disable_cache=false
+查询namespace的数据分布情况
+
+GET /querytable/stats/:table?leader_only=true
+查询集群表数据详情
+
+POST /learner/stop
+停止learner节点, 写入状态值false, 仅用于learner角色的pd, learner一般作数据异步同步用
+
+POST /learner/start
+启动learner节点, 开启同步状态, 写入状态值true
+
+GET /learner/state
+查询当前的learner状态, 注意如果状态值是false才会返回false, 其他都是true(包括非法状态)
+
+GET /cluster/stats
+查询集群稳定状态, 需要发给当前的leader节点, 当集群raft都正常是返回true, 否则返回false. 值为false时一定有不稳定的情况, 但是值为true时也可能有其他问题.
+
+POST /cluster/balance?enable=false
+切换开启或禁用平衡功能, 需要发给当前leader节点
+
+POST /cluster/node/remove?remove_node=xxx
+下线指定的数据节点, 指定的nid可以通过查询datanodes. 用于永久下线某个数据节点
+
+DELETE /cluster/partition/remove_node?node=xxx&namespace=xxx&partition=xx
+将namespace的某个分区从指定节点删除, 用于修复部分分区的数据, 一般用于异常情况处理, 比如这个数据节点上的这个分区数据已经损坏.
+
+POST /cluster/upgrade/begin
+开启集群升级状态, 此操作屏蔽集群数据漂移, 用于长时间操作集群单节点升级
+
+POST /cluster/upgrade/done
+结束集群升级状态, 重启开始数据平衡能力.
+
+DELETE /cluster/namespace/delete?namespace=xxx&partition=**
+删除指定的namespace, partition指定时删除指定分区, 建议一般情况下使用**来删除所有分区
+
+POST /cluster/namespace/meta/update?namespace=xxx&replicator=xx&optimizefsync=true&snapcount=xxx
+更新namespace的元数据, 比如副本数, 是否开启fsync优化, 快照间隔等
+
+POST /stable/nodenum?number=xx
+为了保护集群大规模故障时, 触发数据平衡, 会自动维护当前集群的最大节点数(自动增加, 不会减少), 如果需要缩容时, 需要手动减少稳定节点数
+
+```
+
+zankv API
+
+```
+POST /kv/optimize/:namespace/:table
+为了避免太多删除数据影响性能, 可以定期执行此API清理优化性能, rocksdb v6及以上新版本已经有部分优化, 因此可以不用执行. 建议指定namespace和table缩小范围, 避免优化时影响读写访问请求
+
+POST /kv/optimize_expire/:namespace
+优化过期时间元数据, 大量过期清理后, 可能会影响扫描过期数据的效率, 可以指定compact这一部分数据
+
+POST /kv/backup/:namespace
+触发指定namespace的快照备份
+
+POST --header "Content-Type: application/json" --data '{"start_from":"","end_to":"","delete_all":true, "dryrun":false}' /kv/delrange/:namespace/:table
+删除指定表的区间数据, 区间由start_from和end_to指定(注意这里需要将内容做base64编码传入, 比如20200301传入MjAyMDAzMDE=), 如果要删除整个表, 需要指定delete_all=true
+
+POST /slowlog/set?loglevel=xx
+调整慢查日志的级别, 越大打印越多的慢查日志.
+
+POST /staleread?allow=true
+开启stale读, 开启后, 非leader状态也能提供读请求, 可能读到老数据
+
+POST /topn/enable/:namespace
+开启topn写入统计, 会统计最近一段时间的写入量最大的几个表统计
+
+POST /topn/disable/:namespace
+关闭topn写入统计
+
+POST /topn/clear/:namespace
+清理当前统计的topn写入数据
+
+/stats
+获取统计数据,其中db_write_stats, cluster_write_stats中两个长度为16的数据对应的数据, 标识对应区间统计的计数器. 其中db_write_stats代表存储层的统计数据, cluster_write_stats表示服务端协议层的统计数据(从收到网络请求开始, 到回复网络请求结束), 具体的统计区间含义可以参考代码WriteStats结构的定义.
+
+如果需要获取集群表统计信息, 需要加上参数table_details=true
+
+/raft/stats
+获取raft集群状态, 用于判断异常信息
+
+```
+
+动态配置支持int和string两种类型, 对应的更改和获取接口如下:
+
+```json
+POST /conf/set?type=int&key=xxx&value=xxx
+POST /conf/set?type=str&key=xxx&value=xxx
+
+GET /conf/get?type=int&key=xxx
+GET /conf/get?type=str&key=xxx
+
+目前可以动态配置的参数如下:
+int类型:
+check_snap_timeout - 检查快照超时时间, 单位秒
+check_raft_timeout - 检查raft是否同步超时时间, 单位秒
+max_remote_recover - 跨机房同步时, 最大同时同步的分区数
+string类型:
+ignore_startup_nobackup - 启动时是否忽略快照不存在的错误.
+ignore_remote_file_sync - 是否忽略跨机房同步的快照传输
+```
+
+动态调整部分rocksdb参数使用如下API:
+```
+POST /db/options/set?key=xxx&value=xxx
+```
+说明: 目前仅有rocksdb底层存储支持动态调整, 可调整的key如下:
+- "rate_limiter_bytes_per_sec": rocksdb的ratelimiter限流字节数调整
+- "max_background_compactions": rocksdb后台compaction任务上限调整
+- "max_background_jobs": rocksdb后台jobs任务上限调整
+
+
+## 备份恢复
+
+### 备份文件格式
+备份工具备份的文件名格式:
+datatype:yyyy-mm-dd:namespace:table.db, 每个不同类型的数据备份文件有一个相同的备份文件头, 数据内容和具体的数据格式有关.
+
+#### 公共文件头格式
+
+在所有的文件头会写入如下几个信息:
+
+|字段|字节数|说明|
+|----|----|----|
+|MAGIC|5|魔数,目前为"ZANKV"|
+|VERSION|4| 版本,目前为"0001"|
+|NameSpace Len|4|namespace的长度|
+|NameSpace| 变长,由namespace len指定| namespace名称|
+|TableName Len| 4| table name的长度|
+|TableName|变长|table name|
+|Type|1|类型。0:kv;1:hash;2:set;3:zset;4:list|
+
+#### kv文件格式
+
+kv文件中除去公共文件格式之外,就是key-value的内容。格式如下:
+
+|字段|字节数|说明|
+|----|----|----|
+|Key Len| 4 |key的长度|
+|Key|变长| key的内容|
+|Value Len| 4 |value的长度|
+|Value| 变长 |value的内容|
+
+
+#### hash文件格式
+
+除去公共文件格式之外,就是key-field-value的内容。格式如下:
+
+|字段|字节数|说明|
+|----|----|----|
+|Key Len| 4 |key的长度|
+|Key|变长| key的内容|
+|Field number| 4 |所有field个数|
+|field Len |4| field长度|
+|field | 变长 |field内容|
+|Value Len| 4 |value的长度|
+|Value| 变长 |value的内容|
+
+#### set文件格式
+
+#### list文件格式
+
+#### zset文件格式
+
+### 备份工具backup
+
+```
+backup -lookup lookuplist -ns namespace -table table_name [-data_dir backup -type all|kv[,hash,set,zset,list] -qps 100]
+
+参数说明:
+-lookup zankv lookup服务器,可以是用","分割的一组服务器
+-ns 要备份的namespace
+-table 要备份的table
+-data_dir 备份目录,默认当前目录下打data目录
+-type 要备份的数据结构类型,支持kv,hash,set,zset,list,all表示备份所有类型,输入all了,就不能输入其他的类型了,默认为all
+-qps 速度控制,默认1000 qps
+```
+
+### 恢复工具restore
+
+```
+restore -lookup lookuplist -data restore [-ns namespace -table table_name -qps 1000]
+参数说明:
+-lookup zankv lookup服务器,可以是用","分割的一组服务器
+-data 需要恢复的备份文件
+-ns 要备份的namespace,如果不输入,则恢复的时候,按照备份的ns进行恢复
+-table 要备份的table,如果不输入,则恢复的时候,按照备份的table进行恢复
+-qps 速度控制,默认1000 qps
+```
+
+
+## 跨机房运维
+
+同城3机房的情况, 使用默认的跨机房大集群模式部署即可, 使用raft自动同步和做故障切换.
+
+异地机房部署, 为了减少同步延迟, 一般部署成跨机房异步同步模式, 运维流程如下:
+假设部署了A机房和B机房集群, 开始使用A机房做主
+
+### 初始化
+
+zankv 0.9.0版本之前
+
+- B机房集群用于同步, 使用相同配置, 更改其中的cluster_id和A机房不同, 以及etcd地址使用B机房, B机房zankv配置增加一项: "syncer_write_only": true, 逐一启动
+- B机房调用API创建初始化namespace, 保持和A机房一样
+- B机房选择2台机子做同步程序部署. 用于A到B机房同步
+- 反向同步部署到A机房, 用于切换后从B到A同步. (反向同步准备好配置, 正常不启动)
+- B机房部署的同步管理程序placedriver和数据同步程序zankv都和A机房配置一样, 都需要增加 learner_role="role_log_syncer" 配置, 表明该程序只做数据同步, 同步程序placedriver只需部署一台, 同步程序zankv部署2台做主备, 同步程序zankv, 配置还增加需要同步的目标集群(B机房集群)地址 "remote_sync_cluster": "remoteip:port"
+- 启动同步placedriver, 再启动2台同步程序zankv.
+- 等待数据同步初始化
+- 反向同步, 配置对应修改好, 不启动
+
+zankv 0.9.0版本以及更新版本之后
+
+- B机房集群用于同步, 使用相同配置, 更改其中的cluster_id和A机房不同, 以及etcd地址使用B机房, B机房zankv配置增加一项: "syncer_write_only": true, 逐一启动
+- B机房调用API创建初始化namespace, 保持和A机房一样
+- B机房选择2台机子做同步程序部署. 用于A到B机房同步
+- 反向同步部署到A机房, 用于切换后从B到A同步.
+- B机房部署的同步管理程序placedriver和数据同步程序zankv都和A机房配置一样, 都需要增加 learner_role="role_log_syncer" 配置, 表明该程序只做数据同步, 同步程序placedriver只需部署一台, 同步程序zankv部署2台做主备, 同步程序zankv, 配置还增加需要同步的目标集群(B机房集群)地址 "remote_sync_cluster": "remoteip:port"
+- 启动同步placedriver, 再启动2台同步程序zankv.
+- 发送API部署在B机房的同步管理程序placedriver, 启动A到B集群的同步: `curl -XPOST "127.0.0.1:18001/learner/start"`
+- 等待数据同步初始化
+- 反向同步, 配置对应修改好, 启动后, 发送API命令配置为禁用状态, `curl -XPOST "127.0.0.1:18001/learner/stop"`
+
+部署好之后, 架构如下:
+![zankv-cluster-sync](resource/zankv-sync-cluster.png)
+
+### 机房正常切换
+
+zankv 0.9.0版本之前
+
+- A机房集群禁止客户端写入, 所有节点调用API设置只允许同步程序写入 POST /synceronly?enable=true
+- 观察数据同步完成, 停止用于同步A集群到B机房的数据同步程序和同步管理程序. (停同步程序, 向管理程序发送删除raft learner请求, 清理磁盘数据)
+- 启动事先准备好的反向同步, 用于同步B集群到A机房, 注意配置修改, 增加 syncer_normal_init=true用于正常初始化
+- 数据同步程序会自动初始化同步数据(获取B集群各个分区最新的term-index数据, 设置A集群的同步起始term-index为B集群的最新term-index), 观察同步启动初始化完成后, 去掉syncer_normal_init配置重启数据同步程序.
+- B集群API设置允许非同步程序写入. B机房集群开始接收新的客户端写入 POST /synceronly?enable=false, 并去掉配置文件中的 "syncer_write_only": true
+
+zankv 0.9.0版本以及更新版本之后
+
+- A机房集群禁止客户端写入, 所有节点调用API设置只允许同步程序写入 POST /synceronly?enable=true
+- 观察数据同步完成, 发送API给部署在B机房的同步管理程序placedriver, 停止用于同步A集群到B机房的数据同步并清理同步程序的磁盘数据. `curl -XPOST "127.0.0.1:18001/learner/stop"`
+- 观察等待数据同步停止, 使用 `curl "127.0.0.1:3801/syncer/runnings"` 查看同步程序的运行列表是否为空.
+- 确认A到B同步停止后, 先发送API给部署在A机房的同步节点, 初始化反向同步状态: `curl -XPOST "127.0.0.1:3801/syncer/normalinit?enable=true"`
+- 然后发送API给部署在A机房的同步管理程序placedriver, 启动事先准备好的反向同步, 用于同步B集群到A机房.`curl -XPOST "127.0.0.1:18001/learner/start"`
+- 数据同步程序会自动初始化同步数据(获取B集群各个分区最新的term-index数据, 设置A集群的同步起始term-index为B集群的最新term-index), 观察同步启动初始化完成后, 使用API重置syncer_normal_init状态, `curl -XPOST "127.0.0.1:3801/syncer/normalinit?enable=false"`.
+- B集群API设置允许非同步程序写入. B机房集群开始接收新的客户端写入 POST /synceronly?enable=false
+
+切换完成
+
+### 机房异常切换
+
+- 机房异常因此A机房集群无法访问.
+- 记录此时的正常B机房从A已经完成的同步位移点term-index, 以及当前B集群自己的term-index数据.
+- B机房调用API允许非同步程序写入, 恢复客户端读写 POST /synceronly?enable=false, 去掉配置文件中的 "syncer_write_only": true
+
+故障时切换完成, 后继等待A机房故障恢复.
+
+- 等待异常A机房恢复后,(最好先确保网络不能跨机房) , 先调用API只允许同步程序写入POST /synceronly?enable=true , 禁止客户端写入. 记录此时A集群自己的term-index, 并导出之前同步位移点之后的数据用于校验补偿.(对于同步程序如果是slave, 可以开启同步日志, 记录原始同步数据, 便于补偿数据)
+- 等待同步程序将故障时的数据同步到B完成. B机房集群此时会根据时间戳信息决定同步的KV类型数据是覆盖还是忽略(记录冲突log). 其他类型数据, 如果根据时间戳判断期间没有写入过, 则同步成功, 否则报错记录冲突log, 需要人工修订.
+- 同步完故障期间数据后, 停掉A集群到B机房的数据同步程序
+- 启动反向同步程序, 用于将B机房数据同步到A机房, 此时A机房转换为备集群 (故障会需要传输全量数据, 注意网络情况)
+- 修复数据, A机房集群故障时记录的同步位移点之后的数据, B集群故障切换之后写入的新数据在A机房未同步的数据集合做对比.
+
+
+## 故障处理
+
+### 单分区数据故障
+
+假如某些异常导致单个分区下的一个副本数据异常, 可以尝试将该副本手动剔除来尝试恢复一致. 操作如下:
+
+```
+DELETE /cluster/partition/remove_node?namespace=xxx&partition=xxx&node=xxxx
+```
+往placedriver的leader节点发送以上命令, 其中node需要输入节点在该namespace的唯一节点id, 可以通过/query/xxx 查询namespace元数据方式获取对应的node_id字段.
+
+观察集群是否自动将该分组下的异常节点剔除, 剔除后, 集群会自动重新分配新的副本, 确保集群有足够的副本数.
+
+### raft某个副本分组无法和etcd保持一致
+
+假如raft副本集合和etcd不一致, 正常情况下, 会通过调度使得最终副本集合和etcd上配置一致, 假如某些原因导致raft分组无法增加或者调整副本集, 需要手动强制重置副本, 并修复etcd配置, 首先尝试使用单分区数据故障处理方式, 将异常副本剔除, 如果可以的话, 一个个剔除后, 理论上可以恢复. 如果不行, 说明leader节点已经无法形成, raft分组无法选举, 那么针对某个分组使用如下API强制重新组成新的单副本集群(注意选择数据完整的节点发送此API).
+
+```
+POST /cluster/raft/forcenew/namespace-pid
+```
+
+然后, 修改etcd元数据, 使得namespace对应分区的副本集元数据只包含这个新的节点id, 然后等待副本自动增加.
diff --git a/doc/resource/hash-partition.png b/doc/resource/hash-partition.png
new file mode 100644
index 00000000..b8553a55
Binary files /dev/null and b/doc/resource/hash-partition.png differ
diff --git a/doc/resource/zankv-datanode.png b/doc/resource/zankv-datanode.png
new file mode 100644
index 00000000..03998458
Binary files /dev/null and b/doc/resource/zankv-datanode.png differ
diff --git a/doc/resource/zankv-index-create.png b/doc/resource/zankv-index-create.png
new file mode 100644
index 00000000..4f4ceaf4
Binary files /dev/null and b/doc/resource/zankv-index-create.png differ
diff --git a/doc/resource/zankv-kv-mapping.png b/doc/resource/zankv-kv-mapping.png
new file mode 100644
index 00000000..ee94c20c
Binary files /dev/null and b/doc/resource/zankv-kv-mapping.png differ
diff --git a/doc/resource/zankv-list-mapping.png b/doc/resource/zankv-list-mapping.png
new file mode 100644
index 00000000..2d2087fe
Binary files /dev/null and b/doc/resource/zankv-list-mapping.png differ
diff --git a/doc/resource/zankv-sync-cluster.png b/doc/resource/zankv-sync-cluster.png
new file mode 100644
index 00000000..6d5090a4
Binary files /dev/null and b/doc/resource/zankv-sync-cluster.png differ
diff --git a/doc/user-guide.md b/doc/user-guide.md
new file mode 100644
index 00000000..0cd33e6a
--- /dev/null
+++ b/doc/user-guide.md
@@ -0,0 +1,286 @@
+# 用户使用指南
+
+## 注意事项
+
+- Key的格式必须为namespace:table:real-key,否则将返回错误。
+- key本身的长度尽可能短, 最长不要超过1KB.
+- 子key里面的单个value大小不能大于8MB, 子key总数不限制。
+- 默认使用“非一致性本地删除”策略进行数据过期,数据过期功能仅保证数据不会被提前删除,不保证删除的实时性。不支持使用TTL指令获取数据生存时间,和persist指令持久化数据, 只支持固定的过期时间, 不支持动态调整过期, 过期时间一旦确定, 不能变更. 即使key发生了后继更新操作(set, setex), 过期时间依然保持第一次的不变, 如果需要动态调整TTL, 需要在创建namespace时指定使用一致性过期删除策略.
+- del, expire, persist, ttl, exists 操作仅用于kv类型数据, 对其他类型数据无效, 其他数据类型因为是集合类型, 针对key的删除, 需要删除整个集合的所有数据, 因此需要使用对应类型的扩展命令(hclear, hexpire, hkeyexist,httl,hpersist, sclear, zclear, zexpire等)
+- 为了防止一次获取太多数据, 对HASH (hgetall, hmget, hkeys, hvals等), list(lrange等), set (smembers等), zset (zrange等) 集合类型的数据批量获取命令做了最大限制(服务端目前配置最大一次性获取5000, 超过会直接返回错误信息), 如果需要超过此限制, 建议使用分页hscan, sscan, zscan等操作, 限制每一次获取的数据数量.
+
+## 协议兼容性
+
+支持的命令列表如下:
+
+### KV String类型
+
+|Command|说明|
+| ---- | ---- |
+|set|√|
+|setex|√|
+|get|√|
+|getset|√|
+|expire|√|
+|del|√|
+|ttl|√|
+|persist|√|
+|incr|√|
+|incrby|√|
+|exists|√|
+|mget|√|
+|decr|√|
+|decrby|√|
+|setnx|√|
+
+#### Hash数据类型
+
+|Command|说明|
+| ---- | ---- |
+|hget|√|
+|hset|√|
+|hdel|√|
+|hgetall|√|
+|hmget|√|
+|hmset|√|
+|hexists|√|
+|hincrby|√|
+|hkeys| √|
+|hlen |√|
+|hclear |扩展命令|
+|hexpire|扩展命令|
+|httl |扩展命令|
+|hpersist|扩展命令|
+|hkeyexist|扩展命令|
+
+#### List数据类型
+
+|Command|说明|
+| ---- | ---- |
+|lindex|√|
+|llen|√|
+|lrange|√|
+|lset|√|
+|lpush| √|
+|lpop|√|
+|ltrim| √|
+|rpop|√|
+|rpush| √|
+|lclear|扩展命令|
+|lexpire|扩展命令|
+|lttl|扩展命令|
+|lpersist|扩展命令|
+|lkeyexist|扩展命令|
+
+#### Set 数据类型
+
+|Command|说明|
+| ---- | ---- |
+|scard|√|
+|sismember|√|
+|smembers|√|
+|srandmember|√, 按顺序返回|
+|sadd|√|
+|srem|√|
+|spop|√|
+|sclear|扩展命令|
+|smclear|扩展命令|
+|sexpire|扩展命令|
+|sttl|扩展命令|
+|spersist|扩展命令|
+|skeyexist|扩展命令|
+
+#### Zset 数据类型
+
+|Command|说明|
+| ---- | ---- |
+|zscore|√|
+|zcount|√|
+|zcard|√|
+|zlexcount|√|
+|zrange |√|
+|zrevrange|√|
+|zrangebylex|√|
+|zrangebyscore|√|
+|zrevrangebyscore|√|
+|zrank|√|
+|zrevrank|√|
+|zadd|√|
+|zincrby|√|
+|zrem|√|
+|zremrangebyrank|√|
+|zremrangebyscore|√|
+|zremrangebylex|√|
+|zclear |扩展命令|
+|zexpire|扩展命令|
+|zttl|扩展命令|
+|zpersist|扩展命令|
+|zkeyexist|扩展命令|
+
+#### HyperLogLog数据类型
+
+共享kv类型命令
+
+|Command|说明|
+| ---- | ---- |
+|pfadd|√|
+|pfcount|√|
+
+#### GeoHash数据类型
+
+共享zset类型命令
+
+|Command|说明|
+| ---- | ---- |
+|geoadd |√|
+|geohash|√|
+|geopos |√|
+|geodist|√|
+|georadius|√|
+|georadiusbymember|√|
+
+#### scan命令
+
+|Command|说明|
+| ---- | ---- |
+|scan|仅支持扫描kv数据的key|
+|advscan|扩展命令, 支持扫描其他类型数据的key列表|
+|hscan|√|
+|sscan|√|
+|zscan|√|
+
+说明:
+
+扫表命令系列的用法和官方文档基本一致, 除了cursor的使用有所不同, cursor是字符串标示, 不一定是整型字符串, 扫表终结标示是空字符串. (如果需要只扫描其中的一个表, 那么判断终止的条件应该是看cursor的table前缀是否一致.)
+
+scan的扫表仅支持kv类型数据, 并且cursor必须包含 namespace和table前缀. 可以使用 advscan 来扫描其他类型的key列表.
+
+注意扫表结果的顺序, 跨分区的情况, 只保证分区内的数据顺序.
+
+advscan命令语法
+```
+ADVSCAN cursor datatype [MATCH pattern] [COUNT count]
+其中datatype为字符串, 可以为: KV, HASH, LIST, ZSET, SET
+```
+
+scan示例
+```
+127.0.0.1:12381> scan yz_cp_simple:test5:0000000100 match *1180* count 10
+1) "test5:0000011807"
+2) 1) "0000001180"
+ 2) "0000011180"
+ 3) "0000011800"
+ 4) "0000011801"
+ 5) "0000011802"
+ 6) "0000011803"
+ 7) "0000011804"
+ 8) "0000011805"
+ 9) "0000011806"
+ 10) "0000011807"
+
+继续scan, 使用返回的cursor继续, 直到cursor返回空串:
+
+127.0.0.1:12381> scan yz_cp_simple:test5:0000011807 count 1
+1) "test5:0000011808"
+2) 1) "0000011808"
+```
+
+advscan示例
+```
+> advscan yz_cp_simple:test5 kv count 10
+1) "test5:0000000010"
+2) 1) "0000000001"
+ 2) "0000000002"
+ 3) "0000000003"
+ 4) "0000000004"
+ 5) "0000000005"
+ 6) "0000000006"
+ 7) "0000000007"
+ 8) "0000000008"
+ 9) "0000000009"
+ 10) "0000000010"
+
+> advscan yz_cp_simple:tt zset count 10
+1) "tt:myzsetkey16"
+2) 1) "myzsetkey0"
+ 2) "myzsetkey1"
+ 3) "myzsetkey10"
+ 4) "myzsetkey100"
+ 5) "myzsetkey11"
+ 6) "myzsetkey12"
+ 7) "myzsetkey13"
+ 8) "myzsetkey14"
+ 9) "myzsetkey15"
+ 10) "myzsetkey16"
+```
+
+hscan示例(SSCAN, ZSCAN类似):
+```
+> hmset yz_cp_simple:test5:hash name Jack age 33
+OK
+
+> hscan yz_cp_simple:test5:hash
+1) "name"
+2) 1) "age"
+ 2) "33"
+ 3) "name"
+ 4) "Jack"
+
+> hscan yz_cp_simple:test5:hash name
+1) ""
+2) (empty list or set)
+> hscan yz_cp_simple:test5:hash age
+1) "name"
+2) 1) "name"
+ 2) "Jack"
+```
+
+#### JSON扩展命令
+
+支持大部分官方json扩展里面的命令: http://rejson.io/ , 其中几个命令说明如下:
+
+|Command|说明|
+| ---- | ---- |
+| json.del|√|
+| json.get |√(不支持格式化等参数)|
+| json.mkget|√(等价于json.mget)|
+| json.set|√|
+| json.type|√|
+| json.arrappend|√|
+| json.arrlen|√|
+| json.arrpop|√|
+| json.objkeys|√|
+| json.objlen|√|
+
+注意json命令的path语法仅支持'.', 不支持'[ ]'形式
+
+示例:
+```
+{
+ "name": {"first": "Tom", "last": "Anderson"},
+ "age":37,
+ "children": ["Sara","Alex","Jack"],
+ "fav.movie": "Deer Hunter",
+ "friends": [
+ {"first": "Dale", "last": "Murphy", "age": 44},
+ {"first": "Roger", "last": "Craig", "age": 68},
+ {"first": "Jane", "last": "Murphy", "age": 47}
+ ]
+}
+
+path使用示例:
+"name.last" >> "Anderson"
+"age" >> 37
+"children" >> ["Sara","Alex","Jack"]
+"children.1" >> "Alex"
+"fav\.movie" >> "Deer Hunter"
+"friends.1.last" >> "Craig"
+```
+
+## 其他语言支持
+
+使用go-sdk, 可以构建一个proxy支持redis协议, 其他语言使用redis协议客户端直接访问proxy即可
+
+## FAQ
+
+## Examples
\ No newline at end of file
diff --git a/engine/btree.go b/engine/btree.go
new file mode 100644
index 00000000..99bf664c
--- /dev/null
+++ b/engine/btree.go
@@ -0,0 +1,906 @@
+// Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use
+// of this source code is governed by a BSD-style license that can be found in
+// the LICENSE file.
+
+package engine
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "unsafe"
+)
+
+type kvitem struct {
+ key []byte
+ value []byte
+ isNil bool
+}
+
+func (kvi *kvitem) String() string {
+ return fmt.Sprintf("%s:%s", kvi.key, kvi.value)
+}
+
+func (kvi *kvitem) IsNil() bool {
+ return kvi.isNil
+}
+
+func cmpItem(l *kvitem, r *kvitem) int {
+ return bytes.Compare(l.key, r.key)
+}
+
+const (
+ degree = 16
+ maxItems = 2*degree - 1
+ minItems = degree - 1
+)
+
+type itemContainer [maxItems]atomic.Value
+
+func copyItemArray(litems []atomic.Value, ritems []atomic.Value) {
+ n := len(litems)
+ if n > len(ritems) {
+ n = len(ritems)
+ }
+ for i := 0; i < n; i++ {
+ v := ritems[i].Load()
+ litems[i].Store(v)
+ }
+}
+
+func (ic itemContainer) getItem(i int) *kvitem {
+ v := ic[i].Load()
+ if v == nil {
+ return nil
+ }
+ vv := v.(*kvitem)
+ if vv.isNil {
+ return nil
+ }
+ return vv
+}
+
+func (ic itemContainer) setItem(i int, v *kvitem) {
+ if v == nil {
+ v = &kvitem{key: nil, value: nil, isNil: true}
+ }
+ ic[i].Store(v)
+}
+
+type leafNode struct {
+ ref int32
+ count int16
+ leaf bool
+ items [maxItems]*kvitem
+}
+
+type node struct {
+ leafNode
+ children [maxItems + 1]*node
+}
+
+//go:nocheckptr casts a ptr to a smaller struct to a ptr to a larger struct.
+func leafToNode(ln *leafNode) *node {
+ return (*node)(unsafe.Pointer(ln))
+}
+
+func nodeToLeaf(n *node) *leafNode {
+ return (*leafNode)(unsafe.Pointer(n))
+}
+
+var leafPool = sync.Pool{
+ New: func() interface{} {
+ return new(leafNode)
+ },
+}
+
+var nodePool = sync.Pool{
+ New: func() interface{} {
+ return new(node)
+ },
+}
+
+func newLeafNode() *node {
+ n := leafToNode(leafPool.Get().(*leafNode))
+ n.leaf = true
+ n.ref = 1
+ return n
+}
+
+func newNode() *node {
+ n := nodePool.Get().(*node)
+ n.ref = 1
+ return n
+}
+
+// mut creates and returns a mutable node reference. If the node is not shared
+// with any other trees then it can be modified in place. Otherwise, it must be
+// cloned to ensure unique ownership. In this way, we enforce a copy-on-write
+// policy which transparently incorporates the idea of local mutations, like
+// Clojure's transients or Haskell's ST monad, where nodes are only copied
+// during the first time that they are modified between Clone operations.
+//
+// When a node is cloned, the provided pointer will be redirected to the new
+// mutable node.
+func mut(n **node) *node {
+ if atomic.LoadInt32(&(*n).ref) == 1 {
+ // Exclusive ownership. Can mutate in place.
+ return *n
+ }
+ // If we do not have unique ownership over the node then we
+ // clone it to gain unique ownership. After doing so, we can
+ // release our reference to the old node. We pass recursive
+ // as true because even though we just observed the node's
+ // reference count to be greater than 1, we might be racing
+ // with another call to decRef on this node.
+ c := (*n).clone()
+ (*n).decRef(true /* recursive */)
+ *n = c
+ return *n
+}
+
+// incRef acquires a reference to the node.
+func (n *node) incRef() {
+ atomic.AddInt32(&n.ref, 1)
+}
+
+// decRef releases a reference to the node. If requested, the method
+// will recurse into child nodes and decrease their refcounts as well.
+func (n *node) decRef(recursive bool) {
+ if atomic.AddInt32(&n.ref, -1) > 0 {
+ // Other references remain. Can't free.
+ return
+ }
+ // Clear and release node into memory pool.
+ if n.leaf {
+ ln := nodeToLeaf(n)
+ *ln = leafNode{}
+ leafPool.Put(ln)
+ } else {
+ // Release child references first, if requested.
+ if recursive {
+ for i := int16(0); i <= n.count; i++ {
+ n.children[i].decRef(true /* recursive */)
+ }
+ }
+ *n = node{}
+ nodePool.Put(n)
+ }
+}
+
+// clone creates a clone of the receiver with a single reference count.
+func (n *node) clone() *node {
+ var c *node
+ if n.leaf {
+ c = newLeafNode()
+ } else {
+ c = newNode()
+ }
+ // NB: copy field-by-field without touching n.ref to avoid
+ // triggering the race detector and looking like a data race.
+ c.count = n.count
+ c.items = n.items
+ if !c.leaf {
+ // Copy children and increase each refcount.
+ c.children = n.children
+ for i := int16(0); i <= c.count; i++ {
+ c.children[i].incRef()
+ }
+ }
+ return c
+}
+
+func (n *node) insertAt(index int, item *kvitem, nd *node) {
+ if index < int(n.count) {
+ copy(n.items[index+1:n.count+1], n.items[index:n.count])
+ if !n.leaf {
+ copy(n.children[index+2:n.count+2], n.children[index+1:n.count+1])
+ }
+ }
+ n.items[index] = item
+ if !n.leaf {
+ n.children[index+1] = nd
+ }
+ n.count++
+}
+
+func (n *node) pushBack(item *kvitem, nd *node) {
+ n.items[n.count] = item
+ if !n.leaf {
+ n.children[n.count+1] = nd
+ }
+ n.count++
+}
+
+func (n *node) pushFront(item *kvitem, nd *node) {
+ if !n.leaf {
+ copy(n.children[1:n.count+2], n.children[:n.count+1])
+ n.children[0] = nd
+ }
+ copy(n.items[1:n.count+1], n.items[:n.count])
+ n.items[0] = item
+ n.count++
+}
+
+// removeAt removes a value at a given index, pulling all subsequent values
+// back.
+func (n *node) removeAt(index int) (*kvitem, *node) {
+ var child *node
+ if !n.leaf {
+ child = n.children[index+1]
+ copy(n.children[index+1:n.count], n.children[index+2:n.count+1])
+ n.children[n.count] = nil
+ }
+ n.count--
+ out := n.items[index]
+ copy(n.items[index:n.count], n.items[index+1:n.count+1])
+ n.items[n.count] = nil
+ return out, child
+}
+
+// popBack removes and returns the last element in the list.
+func (n *node) popBack() (*kvitem, *node) {
+ n.count--
+ out := n.items[n.count]
+ n.items[n.count] = nil
+ if n.leaf {
+ return out, nil
+ }
+ child := n.children[n.count+1]
+ n.children[n.count+1] = nil
+ return out, child
+}
+
+// popFront removes and returns the first element in the list.
+func (n *node) popFront() (*kvitem, *node) {
+ n.count--
+ var child *node
+ if !n.leaf {
+ child = n.children[0]
+ copy(n.children[:n.count+1], n.children[1:n.count+2])
+ n.children[n.count+1] = nil
+ }
+ out := n.items[0]
+ copy(n.items[:n.count], n.items[1:n.count+1])
+ n.items[n.count] = nil
+ return out, child
+}
+
+// find returns the index where the given item should be inserted into this
+// list. 'found' is true if the item already exists in the list at the given
+// index.
+func (n *node) find(cmp func(*kvitem, *kvitem) int, item *kvitem) (index int, found bool) {
+ // Logic copied from sort.Search. Inlining this gave
+ // an 11% speedup on BenchmarkBTreeDeleteInsert.
+ i, j := 0, int(n.count)
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ v := cmp(item, n.items[h])
+ if v == 0 {
+ return h, true
+ } else if v > 0 {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ return i, false
+}
+
+// split splits the given node at the given index. The current node shrinks,
+// and this function returns the item that existed at that index and a new
+// node containing all items/children after it.
+//
+// Before:
+//
+// +-----------+
+// | x y z |
+// +--/-/-\-\--+
+//
+// After:
+//
+// +-----------+
+// | y |
+// +----/-\----+
+// / \
+// v v
+// +-----------+ +-----------+
+// | x | | z |
+// +-----------+ +-----------+
+//
+func (n *node) split(i int) (*kvitem, *node) {
+ out := n.items[i]
+ var next *node
+ if n.leaf {
+ next = newLeafNode()
+ } else {
+ next = newNode()
+ }
+ next.count = n.count - int16(i+1)
+ copy(next.items[:], n.items[i+1:n.count])
+ for j := int16(i); j < n.count; j++ {
+ n.items[j] = nil
+ }
+ if !n.leaf {
+ copy(next.children[:], n.children[i+1:n.count+1])
+ for j := int16(i + 1); j <= n.count; j++ {
+ n.children[j] = nil
+ }
+ }
+ n.count = int16(i)
+ return out, next
+}
+
+// insert inserts a item into the subtree rooted at this node, making sure no
+// nodes in the subtree exceed maxItems items. Returns true if an existing item
+// was replaced and false if a item was inserted.
+func (n *node) insert(cmp func(*kvitem, *kvitem) int, item *kvitem) (replaced bool) {
+ i, found := n.find(cmp, item)
+ if found {
+ n.items[i] = item
+ return true
+ }
+ if n.leaf {
+ n.insertAt(i, item, nil)
+ return false
+ }
+ if n.children[i].count >= maxItems {
+ splitLa, splitNode := mut(&n.children[i]).split(maxItems / 2)
+ n.insertAt(i, splitLa, splitNode)
+
+ switch cmp := cmp(item, n.items[i]); {
+ case cmp < 0:
+ // no change, we want first split node
+ case cmp > 0:
+ i++ // we want second split node
+ default:
+ n.items[i] = item
+ return true
+ }
+ }
+ replaced = mut(&n.children[i]).insert(cmp, item)
+ return replaced
+}
+
+// removeMax removes and returns the maximum item from the subtree rooted
+// at this node.
+func (n *node) removeMax() *kvitem {
+ if n.leaf {
+ n.count--
+ out := n.items[n.count]
+ n.items[n.count] = nil
+ return out
+ }
+ child := mut(&n.children[n.count])
+ if child.count <= minItems {
+ n.rebalanceOrMerge(int(n.count))
+ return n.removeMax()
+ }
+ return child.removeMax()
+}
+
+// remove removes a item from the subtree rooted at this node. Returns
+// the item that was removed or nil if no matching item was found.
+func (n *node) remove(cmp func(*kvitem, *kvitem) int, item *kvitem) (out *kvitem) {
+ i, found := n.find(cmp, item)
+ if n.leaf {
+ if found {
+ out, _ = n.removeAt(i)
+ return out
+ }
+ return nil
+ }
+ if n.children[i].count <= minItems {
+ // Child not large enough to remove from.
+ n.rebalanceOrMerge(i)
+ return n.remove(cmp, item)
+ }
+ child := mut(&n.children[i])
+ if found {
+ // Replace the item being removed with the max item in our left child.
+ out = n.items[i]
+ n.items[i] = child.removeMax()
+ return out
+ }
+ // Latch is not in this node and child is large enough to remove from.
+ out = child.remove(cmp, item)
+ return out
+}
+
+// rebalanceOrMerge grows child 'i' to ensure it has sufficient room to remove
+// a item from it while keeping it at or above minItems.
+func (n *node) rebalanceOrMerge(i int) {
+ switch {
+ case i > 0 && n.children[i-1].count > minItems:
+ // Rebalance from left sibling.
+ //
+ // +-----------+
+ // | y |
+ // +----/-\----+
+ // / \
+ // v v
+ // +-----------+ +-----------+
+ // | x | | |
+ // +----------\+ +-----------+
+ // \
+ // v
+ // a
+ //
+ // After:
+ //
+ // +-----------+
+ // | x |
+ // +----/-\----+
+ // / \
+ // v v
+ // +-----------+ +-----------+
+ // | | | y |
+ // +-----------+ +/----------+
+ // /
+ // v
+ // a
+ //
+ left := mut(&n.children[i-1])
+ child := mut(&n.children[i])
+ xLa, grandChild := left.popBack()
+ yLa := n.items[i-1]
+ child.pushFront(yLa, grandChild)
+ n.items[i-1] = xLa
+
+ case i < int(n.count) && n.children[i+1].count > minItems:
+ // Rebalance from right sibling.
+ //
+ // +-----------+
+ // | y |
+ // +----/-\----+
+ // / \
+ // v v
+ // +-----------+ +-----------+
+ // | | | x |
+ // +-----------+ +/----------+
+ // /
+ // v
+ // a
+ //
+ // After:
+ //
+ // +-----------+
+ // | x |
+ // +----/-\----+
+ // / \
+ // v v
+ // +-----------+ +-----------+
+ // | y | | |
+ // +----------\+ +-----------+
+ // \
+ // v
+ // a
+ //
+ right := mut(&n.children[i+1])
+ child := mut(&n.children[i])
+ xLa, grandChild := right.popFront()
+ yLa := n.items[i]
+ child.pushBack(yLa, grandChild)
+ n.items[i] = xLa
+
+ default:
+ // Merge with either the left or right sibling.
+ //
+ // +-----------+
+ // | u y v |
+ // +----/-\----+
+ // / \
+ // v v
+ // +-----------+ +-----------+
+ // | x | | z |
+ // +-----------+ +-----------+
+ //
+ // After:
+ //
+ // +-----------+
+ // | u v |
+ // +-----|-----+
+ // |
+ // v
+ // +-----------+
+ // | x y z |
+ // +-----------+
+ //
+ if i >= int(n.count) {
+ i = int(n.count - 1)
+ }
+ child := mut(&n.children[i])
+ // Make mergeChild mutable, bumping the refcounts on its children if necessary.
+ _ = mut(&n.children[i+1])
+ mergeLa, mergeChild := n.removeAt(i)
+ child.items[child.count] = mergeLa
+ copy(child.items[child.count+1:], mergeChild.items[:mergeChild.count])
+ if !child.leaf {
+ copy(child.children[child.count+1:], mergeChild.children[:mergeChild.count+1])
+ }
+ child.count += mergeChild.count + 1
+
+ mergeChild.decRef(false /* recursive */)
+ }
+}
+
+// btree is an implementation of a B-Tree.
+//
+// btree stores items in an ordered structure, allowing easy insertion,
+// removal, and iteration. The B-Tree stores items in order based on cmp. The
+// first level of the LSM uses a cmp function that compares sequence numbers.
+// All other levels compare using the item.Smallest.
+//
+// Write operations are not safe for concurrent mutation by multiple
+// goroutines, but Read operations are.
+type btree struct {
+ root *node
+ length int
+ cmp func(*kvitem, *kvitem) int
+}
+
+// Reset removes all items from the btree. In doing so, it allows memory
+// held by the btree to be recycled. Failure to call this method before
+// letting a btree be GCed is safe in that it won't cause a memory leak,
+// but it will prevent btree nodes from being efficiently re-used.
+func (t *btree) Reset() {
+ if t.root != nil {
+ t.root.decRef(true /* recursive */)
+ t.root = nil
+ }
+ t.length = 0
+}
+
+func (t *btree) Destroy() {
+ t.Reset()
+}
+
+// Clone clones the btree, lazily. It does so in constant time.
+func (t *btree) Clone() btree {
+ c := *t
+ if c.root != nil {
+ // Incrementing the reference count on the root node is sufficient to
+ // ensure that no node in the cloned tree can be mutated by an actor
+ // holding a reference to the original tree and vice versa. This
+ // property is upheld because the root node in the receiver btree and
+ // the returned btree will both necessarily have a reference count of at
+ // least 2 when this method returns. All tree mutations recursively
+ // acquire mutable node references (see mut) as they traverse down the
+ // tree. The act of acquiring a mutable node reference performs a clone
+ // if a node's reference count is greater than one. Cloning a node (see
+ // clone) increases the reference count on each of its children,
+ // ensuring that they have a reference count of at least 2. This, in
+ // turn, ensures that any of the child nodes that are modified will also
+ // be copied-on-write, recursively ensuring the immutability property
+ // over the entire tree.
+ c.root.incRef()
+ }
+ return c
+}
+
+// Delete removes a item equal to the passed in item from the tree.
+func (t *btree) Delete(item *kvitem) {
+ if t.root == nil || t.root.count == 0 {
+ return
+ }
+ if out := mut(&t.root).remove(t.cmp, item); out != nil {
+ t.length--
+ }
+ if t.root.count == 0 {
+ old := t.root
+ if t.root.leaf {
+ t.root = nil
+ } else {
+ t.root = t.root.children[0]
+ }
+ old.decRef(false /* recursive */)
+ }
+}
+
+// Set adds the given item to the tree. If a item in the tree already
+// equals the given one, it is replaced with the new item.
+func (t *btree) Set(item *kvitem) {
+ if t.root == nil {
+ t.root = newLeafNode()
+ } else if t.root.count >= maxItems {
+ splitLa, splitNode := mut(&t.root).split(maxItems / 2)
+ newRoot := newNode()
+ newRoot.count = 1
+ newRoot.items[0] = splitLa
+ newRoot.children[0] = t.root
+ newRoot.children[1] = splitNode
+ t.root = newRoot
+ }
+ if replaced := mut(&t.root).insert(t.cmp, item); !replaced {
+ t.length++
+ }
+}
+
+// MakeIter returns a new iterator object. It is not safe to continue using an
+// iterator after modifications are made to the tree. If modifications are made,
+// create a new iterator.
+func (t *btree) MakeIter() biterator {
+ return biterator{r: t.root, pos: -1, cmp: t.cmp}
+}
+
+// Height returns the height of the tree.
+func (t *btree) Height() int {
+ if t.root == nil {
+ return 0
+ }
+ h := 1
+ n := t.root
+ for !n.leaf {
+ n = n.children[0]
+ h++
+ }
+ return h
+}
+
+// Len returns the number of items currently in the tree.
+func (t *btree) Len() int {
+ return t.length
+}
+
+// String returns a string description of the tree. The format is
+// similar to the https://en.wikipedia.org/wiki/Newick_format.
+func (t *btree) String() string {
+ if t.length == 0 {
+ return ";"
+ }
+ var b strings.Builder
+ t.root.writeString(&b)
+ return b.String()
+}
+
+func (n *node) writeString(b *strings.Builder) {
+ if n.leaf {
+ for i := int16(0); i < n.count; i++ {
+ if i != 0 {
+ b.WriteString(",")
+ }
+ b.WriteString(n.items[i].String())
+ }
+ return
+ }
+ for i := int16(0); i <= n.count; i++ {
+ b.WriteString("(")
+ n.children[i].writeString(b)
+ b.WriteString(")")
+ if i < n.count {
+ b.WriteString(n.items[i].String())
+ }
+ }
+}
+
+// iterStack represents a stack of (node, pos) tuples, which captures
+// iteration state as an iterator descends a btree.
+type iterStack struct {
+ a iterStackArr
+ aLen int16 // -1 when using s
+ s []iterFrame
+}
+
+// Used to avoid allocations for stacks below a certain size.
+type iterStackArr [3]iterFrame
+
+type iterFrame struct {
+ n *node
+ pos int16
+}
+
+func (is *iterStack) push(f iterFrame) {
+ if is.aLen == -1 {
+ is.s = append(is.s, f)
+ } else if int(is.aLen) == len(is.a) {
+ is.s = make([]iterFrame, int(is.aLen)+1, 2*int(is.aLen))
+ copy(is.s, is.a[:])
+ is.s[int(is.aLen)] = f
+ is.aLen = -1
+ } else {
+ is.a[is.aLen] = f
+ is.aLen++
+ }
+}
+
+func (is *iterStack) pop() iterFrame {
+ if is.aLen == -1 {
+ f := is.s[len(is.s)-1]
+ is.s = is.s[:len(is.s)-1]
+ return f
+ }
+ is.aLen--
+ return is.a[is.aLen]
+}
+
+func (is *iterStack) len() int {
+ if is.aLen == -1 {
+ return len(is.s)
+ }
+ return int(is.aLen)
+}
+
+func (is *iterStack) reset() {
+ if is.aLen == -1 {
+ is.s = is.s[:0]
+ } else {
+ is.aLen = 0
+ }
+}
+
+// biterator is responsible for search and traversal within a btree.
+type biterator struct {
+ r *node
+ n *node
+ pos int16
+ cmp func(*kvitem, *kvitem) int
+ s iterStack
+}
+
+func (i *biterator) reset() {
+ i.n = i.r
+ i.pos = -1
+ i.s.reset()
+}
+
+func (i *biterator) Close() {
+ i.reset()
+}
+
+func (i *biterator) descend(n *node, pos int16) {
+ i.s.push(iterFrame{n: n, pos: pos})
+ i.n = n.children[pos]
+ i.pos = 0
+}
+
+// ascend ascends up to the current node's parent and resets the position
+// to the one previously set for this parent node.
+func (i *biterator) ascend() {
+ f := i.s.pop()
+ i.n = f.n
+ i.pos = f.pos
+}
+
+func (i *biterator) Seek(key []byte) {
+ i.SeekGE(&kvitem{key: key})
+}
+
+// SeekGE seeks to the first item greater-than or equal to the provided
+// item.
+func (i *biterator) SeekGE(item *kvitem) {
+ i.reset()
+ if i.n == nil {
+ return
+ }
+ for {
+ pos, found := i.n.find(i.cmp, item)
+ i.pos = int16(pos)
+ if found {
+ return
+ }
+ if i.n.leaf {
+ if i.pos == i.n.count {
+ i.Next()
+ }
+ return
+ }
+ i.descend(i.n, i.pos)
+ }
+}
+
+func (i *biterator) SeekForPrev(key []byte) {
+ i.SeekLT(&kvitem{key: key})
+}
+
+// SeekLT seeks to the first item less-than the provided item.
+func (i *biterator) SeekLT(item *kvitem) {
+ i.reset()
+ if i.n == nil {
+ return
+ }
+ for {
+ pos, found := i.n.find(i.cmp, item)
+ i.pos = int16(pos)
+ if found || i.n.leaf {
+ i.Prev()
+ return
+ }
+ i.descend(i.n, i.pos)
+ }
+}
+
+// First seeks to the first item in the btree.
+func (i *biterator) First() {
+ i.reset()
+ if i.n == nil {
+ return
+ }
+ for !i.n.leaf {
+ i.descend(i.n, 0)
+ }
+ i.pos = 0
+}
+
+// Last seeks to the last item in the btree.
+func (i *biterator) Last() {
+ i.reset()
+ if i.n == nil {
+ return
+ }
+ for !i.n.leaf {
+ i.descend(i.n, i.n.count)
+ }
+ i.pos = i.n.count - 1
+}
+
+// Next positions the iterator to the item immediately following
+// its current position.
+func (i *biterator) Next() {
+ if i.n == nil {
+ return
+ }
+
+ if i.n.leaf {
+ i.pos++
+ if i.pos < i.n.count {
+ return
+ }
+ for i.s.len() > 0 && i.pos >= i.n.count {
+ i.ascend()
+ }
+ return
+ }
+
+ i.descend(i.n, i.pos+1)
+ for !i.n.leaf {
+ i.descend(i.n, 0)
+ }
+ i.pos = 0
+}
+
+// Prev positions the iterator to the item immediately preceding
+// its current position.
+func (i *biterator) Prev() {
+ if i.n == nil {
+ return
+ }
+
+ if i.n.leaf {
+ i.pos--
+ if i.pos >= 0 {
+ return
+ }
+ for i.s.len() > 0 && i.pos < 0 {
+ i.ascend()
+ i.pos--
+ }
+ return
+ }
+
+ i.descend(i.n, i.pos)
+ for !i.n.leaf {
+ i.descend(i.n, i.n.count)
+ }
+ i.pos = i.n.count - 1
+}
+
+// Valid returns whether the iterator is positioned at a valid position.
+func (i *biterator) Valid() bool {
+ return i.pos >= 0 && i.pos < i.n.count
+}
+
+// Cur returns the item at the iterator's current position. It is illegal
+// to call Cur if the iterator is not valid.
+func (i *biterator) Cur() *kvitem {
+ return i.n.items[i.pos]
+}
+
+func (i *biterator) Key() []byte {
+ return i.Cur().key
+}
+func (i *biterator) Value() []byte {
+ return i.Cur().value
+}
diff --git a/engine/iterator.go b/engine/iterator.go
new file mode 100644
index 00000000..6404d158
--- /dev/null
+++ b/engine/iterator.go
@@ -0,0 +1,259 @@
+package engine
+
+import (
+ "bytes"
+ "fmt"
+ "runtime"
+
+ "github.com/youzan/ZanRedisDB/common"
+)
+
+// make sure keep the same with rockredis
+const (
+ // for data
+ KVType byte = 21
+ HashType byte = 22
+ tsLen = 8
+)
+
+type IteratorGetter interface {
+ GetIterator(IteratorOpts) (Iterator, error)
+}
+
+type Iterator interface {
+ Next()
+ Prev()
+ Valid() bool
+ Seek([]byte)
+ SeekForPrev([]byte)
+ SeekToFirst()
+ SeekToLast()
+ Close()
+ RefKey() []byte
+ Key() []byte
+ RefValue() []byte
+ Value() []byte
+ NoTimestamp(vt byte)
+}
+type emptyIterator struct {
+}
+
+func (eit *emptyIterator) Valid() bool {
+ return false
+}
+
+func (eit *emptyIterator) Next() {
+}
+func (eit *emptyIterator) Prev() {
+}
+func (eit *emptyIterator) Seek([]byte) {
+}
+func (eit *emptyIterator) SeekForPrev([]byte) {
+}
+func (eit *emptyIterator) SeekToFirst() {
+}
+func (eit *emptyIterator) SeekToLast() {
+}
+func (eit *emptyIterator) Close() {
+}
+func (eit *emptyIterator) RefKey() []byte {
+ return nil
+}
+func (eit *emptyIterator) Key() []byte {
+ return nil
+}
+func (eit *emptyIterator) RefValue() []byte {
+ return nil
+}
+func (eit *emptyIterator) Value() []byte {
+ return nil
+}
+func (eit *emptyIterator) NoTimestamp(vt byte) {
+}
+
+type Range struct {
+ Min []byte
+ Max []byte
+ Type uint8
+}
+
+type Limit struct {
+ Offset int
+ Count int
+}
+
+type IteratorOpts struct {
+ Range
+ Limit
+ Reverse bool
+ IgnoreDel bool
+ WithSnap bool
+}
+
+// note: all the iterator use the prefix iterator flag. Which means it may skip the keys for different table
+// prefix.
+func NewDBRangeLimitIteratorWithOpts(ig IteratorGetter, opts IteratorOpts) (rit *RangeLimitedIterator, err error) {
+ var dbit Iterator
+ dbit, err = ig.GetIterator(opts)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if e := recover(); e != nil {
+ dbit.Close()
+ buf := make([]byte, 4096)
+ n := runtime.Stack(buf, false)
+ buf = buf[0:n]
+ err = fmt.Errorf("init iterator panic: %s:%v", buf, e)
+ }
+ }()
+ if !opts.Reverse {
+ rit = NewRangeLimitIterator(dbit, &opts.Range,
+ &opts.Limit)
+ } else {
+ rit = NewRevRangeLimitIterator(dbit, &opts.Range,
+ &opts.Limit)
+ }
+ return
+}
+
+// note: all the iterator use the prefix iterator flag. Which means it may skip the keys for different table
+// prefix.
+func NewDBRangeIteratorWithOpts(ig IteratorGetter, opts IteratorOpts) (rit *RangeLimitedIterator, err error) {
+ var dbit Iterator
+ dbit, err = ig.GetIterator(opts)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if e := recover(); e != nil {
+ dbit.Close()
+ buf := make([]byte, 4096)
+ n := runtime.Stack(buf, false)
+ buf = buf[0:n]
+ err = fmt.Errorf("init iterator panic: %s:%v", buf, e)
+ }
+ }()
+ if !opts.Reverse {
+ rit = NewRangeIterator(dbit, &opts.Range)
+ } else {
+ rit = NewRevRangeIterator(dbit, &opts.Range)
+ }
+ return
+}
+
+type RangeLimitedIterator struct {
+ Iterator
+ l Limit
+ r Range
+ // maybe step should not auto increase, we need count for actually element
+ step int
+ reverse bool
+}
+
+func (it *RangeLimitedIterator) Valid() bool {
+ if it.l.Offset < 0 {
+ return false
+ }
+ if it.l.Count >= 0 && it.step >= it.l.Count {
+ return false
+ }
+ if !it.Iterator.Valid() {
+ return false
+ }
+
+ if !it.reverse {
+ if it.r.Max != nil {
+ r := bytes.Compare(it.Iterator.RefKey(), it.r.Max)
+ if it.r.Type&common.RangeROpen > 0 {
+ return !(r >= 0)
+ } else {
+ return !(r > 0)
+ }
+ }
+ } else {
+ if it.r.Min != nil {
+ r := bytes.Compare(it.Iterator.RefKey(), it.r.Min)
+ if it.r.Type&common.RangeLOpen > 0 {
+ return !(r <= 0)
+ } else {
+ return !(r < 0)
+ }
+ }
+ }
+ return true
+}
+
+func (it *RangeLimitedIterator) Next() {
+ it.step++
+ if !it.reverse {
+ it.Iterator.Next()
+ } else {
+ it.Iterator.Prev()
+ }
+}
+
+func NewRangeLimitIterator(i Iterator, r *Range, l *Limit) *RangeLimitedIterator {
+ return rangeLimitIterator(i, r, l, false)
+}
+func NewRevRangeLimitIterator(i Iterator, r *Range, l *Limit) *RangeLimitedIterator {
+ return rangeLimitIterator(i, r, l, true)
+}
+func NewRangeIterator(i Iterator, r *Range) *RangeLimitedIterator {
+ return rangeLimitIterator(i, r, &Limit{0, -1}, false)
+}
+func NewRevRangeIterator(i Iterator, r *Range) *RangeLimitedIterator {
+ return rangeLimitIterator(i, r, &Limit{0, -1}, true)
+}
+func rangeLimitIterator(i Iterator, r *Range, l *Limit, reverse bool) *RangeLimitedIterator {
+ it := &RangeLimitedIterator{
+ Iterator: i,
+ l: *l,
+ r: *r,
+ reverse: reverse,
+ step: 0,
+ }
+ if l.Offset < 0 {
+ return it
+ }
+ if !reverse {
+ if r.Min == nil {
+ it.Iterator.SeekToFirst()
+ } else {
+ it.Iterator.Seek(r.Min)
+ if r.Type&common.RangeLOpen > 0 {
+ if it.Iterator.Valid() && bytes.Compare(it.Iterator.RefKey(), r.Min) <= 0 {
+ it.Iterator.Next()
+ }
+ }
+ }
+ } else {
+ if r.Max == nil {
+ it.Iterator.SeekToLast()
+ } else {
+ it.Iterator.SeekForPrev(r.Max)
+ if !it.Iterator.Valid() {
+ it.Iterator.SeekToFirst()
+ if it.Iterator.Valid() && bytes.Compare(it.Iterator.RefKey(), r.Max) == 1 {
+ dbLog.Infof("iterator seek to last key %v should not great than seek to max %v", it.Iterator.RefKey(), r.Max)
+ }
+ }
+ if r.Type&common.RangeROpen > 0 {
+ if it.Iterator.Valid() && bytes.Compare(it.Iterator.RefKey(), r.Max) >= 0 {
+ it.Iterator.Prev()
+ }
+ }
+ }
+ }
+ for i := 0; i < l.Offset; i++ {
+ if !it.Valid() {
+ break
+ }
+ if !it.reverse {
+ it.Iterator.Next()
+ } else {
+ it.Iterator.Prev()
+ }
+ }
+ return it
+}
diff --git a/engine/kv.go b/engine/kv.go
new file mode 100644
index 00000000..ddfebb2f
--- /dev/null
+++ b/engine/kv.go
@@ -0,0 +1,247 @@
+package engine
+
+import (
+ "errors"
+ "path"
+
+ "github.com/shirou/gopsutil/mem"
+ "github.com/youzan/ZanRedisDB/common"
+)
+
+var (
+ errDBEngClosed = errors.New("db engine is closed")
+ errIntNumber = errors.New("invalid integer")
+)
+
+type RefSlice interface {
+ // ref data
+ Data() []byte
+ Free()
+ // copied data if need
+ Bytes() []byte
+}
+
+const (
+ compactThreshold = 5000000
+)
+
+var dbLog = common.NewLevelLogger(common.LOG_INFO, common.NewLogger())
+
+func SetLogLevel(level int32) {
+ dbLog.SetLevel(level)
+}
+
+func SetLogger(level int32, logger common.Logger) {
+ dbLog.SetLevel(level)
+ dbLog.Logger = logger
+}
+
+type CRange struct {
+ Start []byte
+ Limit []byte
+}
+
+type SharedRockConfig interface {
+ Destroy()
+ ChangeLimiter(bytesPerSec int64)
+}
+
+type RockEngConfig struct {
+ DataDir string
+ ReadOnly bool
+ DataTool bool
+ SharedConfig SharedRockConfig
+ EnableTableCounter bool
+ AutoCompacted bool
+ RockOptions
+}
+
+func NewRockConfig() *RockEngConfig {
+ c := &RockEngConfig{
+ EnableTableCounter: true,
+ }
+ FillDefaultOptions(&c.RockOptions)
+ return c
+}
+
+type RockOptions struct {
+ VerifyReadChecksum bool `json:"verify_read_checksum"`
+ BlockSize int `json:"block_size"`
+ BlockCache int64 `json:"block_cache"`
+ CacheIndexAndFilterBlocks bool `json:"cache_index_and_filter_blocks"`
+ EnablePartitionedIndexFilter bool `json:"enable_partitioned_index_filter"`
+ WriteBufferSize int `json:"write_buffer_size"`
+ MaxWriteBufferNumber int `json:"max_write_buffer_number"`
+ MinWriteBufferNumberToMerge int `json:"min_write_buffer_number_to_merge"`
+ Level0FileNumCompactionTrigger int `json:"level0_file_num_compaction_trigger"`
+ MaxBytesForLevelBase uint64 `json:"max_bytes_for_level_base"`
+ TargetFileSizeBase uint64 `json:"target_file_size_base"`
+ MaxBackgroundFlushes int `json:"max_background_flushes"`
+ MaxBackgroundCompactions int `json:"max_background_compactions"`
+ MinLevelToCompress int `json:"min_level_to_compress"`
+ MaxMainifestFileSize uint64 `json:"max_mainifest_file_size"`
+ RateBytesPerSec int64 `json:"rate_bytes_per_sec"`
+ BackgroundHighThread int `json:"background_high_thread,omitempty"`
+ BackgroundLowThread int `json:"background_low_thread,omitempty"`
+ AdjustThreadPool bool `json:"adjust_thread_pool,omitempty"`
+ UseSharedCache bool `json:"use_shared_cache,omitempty"`
+ UseSharedRateLimiter bool `json:"use_shared_rate_limiter,omitempty"`
+ DisableWAL bool `json:"disable_wal,omitempty"`
+ DisableMergeCounter bool `json:"disable_merge_counter,omitempty"`
+ OptimizeFiltersForHits bool `json:"optimize_filters_for_hits,omitempty"`
+ // note do not change this dynamic for existing db
+ LevelCompactionDynamicLevelBytes bool `json:"level_compaction_dynamic_level_bytes,omitempty"`
+ InsertHintFixedLen int `json:"insert_hint_fixed_len"`
+ EngineType string `json:"engine_type,omitempty"`
+}
+
+func FillDefaultOptions(opts *RockOptions) {
+ // use large block to reduce index block size for hdd
+ // if using ssd, should use the default value
+ if opts.BlockSize <= 0 {
+ // for hdd use 64KB and above
+ // for ssd use 32KB and below
+ opts.BlockSize = 1024 * 8
+ }
+ // should about 20% less than host RAM
+ // http://smalldatum.blogspot.com/2016/09/tuning-rocksdb-block-cache.html
+ if opts.BlockCache <= 0 {
+ v, err := mem.VirtualMemory()
+ if err != nil {
+ opts.BlockCache = 1024 * 1024 * 128
+ } else {
+ opts.BlockCache = int64(v.Total / 100)
+ if opts.UseSharedCache {
+ opts.BlockCache *= 10
+ } else {
+ if opts.BlockCache < 1024*1024*64 {
+ opts.BlockCache = 1024 * 1024 * 64
+ } else if opts.BlockCache > 1024*1024*1024*8 {
+ opts.BlockCache = 1024 * 1024 * 1024 * 8
+ }
+ }
+ }
+ }
+ // keep level0_file_num_compaction_trigger * write_buffer_size * min_write_buffer_number_tomerge = max_bytes_for_level_base to minimize write amplification
+ if opts.WriteBufferSize <= 0 {
+ opts.WriteBufferSize = 1024 * 1024 * 64
+ }
+ if opts.MaxWriteBufferNumber <= 0 {
+ opts.MaxWriteBufferNumber = 6
+ }
+ if opts.MinWriteBufferNumberToMerge <= 0 {
+ opts.MinWriteBufferNumberToMerge = 2
+ }
+ if opts.Level0FileNumCompactionTrigger <= 0 {
+ opts.Level0FileNumCompactionTrigger = 2
+ }
+ if opts.MaxBytesForLevelBase <= 0 {
+ opts.MaxBytesForLevelBase = 1024 * 1024 * 256
+ }
+ if opts.TargetFileSizeBase <= 0 {
+ opts.TargetFileSizeBase = 1024 * 1024 * 64
+ }
+ if opts.MaxBackgroundFlushes <= 0 {
+ opts.MaxBackgroundFlushes = 2
+ }
+ if opts.MaxBackgroundCompactions <= 0 {
+ opts.MaxBackgroundCompactions = 8
+ }
+ if opts.MinLevelToCompress <= 0 {
+ opts.MinLevelToCompress = 3
+ }
+ if opts.MaxMainifestFileSize <= 0 {
+ opts.MaxMainifestFileSize = 1024 * 1024 * 32
+ }
+ if opts.AdjustThreadPool {
+ if opts.BackgroundHighThread <= 0 {
+ opts.BackgroundHighThread = 2
+ }
+ if opts.BackgroundLowThread <= 0 {
+ opts.BackgroundLowThread = 16
+ }
+ }
+}
+
+type KVCheckpoint interface {
+ Save(path string, notify chan struct{}) error
+}
+
+type ICompactFilter interface {
+ Name() string
+ Filter(level int, key, value []byte) (bool, []byte)
+}
+
+type KVEngine interface {
+ NewWriteBatch() WriteBatch
+ DefaultWriteBatch() WriteBatch
+ GetDataDir() string
+ SetMaxBackgroundOptions(maxCompact int, maxBackJobs int) error
+ CheckDBEngForRead(fullPath string) error
+ OpenEng() error
+ Write(wb WriteBatch) error
+ DeletedBeforeCompact() int64
+ AddDeletedCnt(c int64)
+ LastCompactTime() int64
+ CompactRange(rg CRange)
+ CompactAllRange()
+ DisableManualCompact(bool)
+ GetApproximateTotalKeyNum() int
+ GetApproximateKeyNum(ranges []CRange) uint64
+ GetApproximateSizes(ranges []CRange, includeMem bool) []uint64
+ IsClosed() bool
+ CloseEng() bool
+ CloseAll()
+ GetStatistics() string
+ GetInternalStatus() map[string]interface{}
+ GetInternalPropertyStatus(p string) string
+ GetBytesNoLock(key []byte) ([]byte, error)
+ GetBytes(key []byte) ([]byte, error)
+ MultiGetBytes(keyList [][]byte, values [][]byte, errs []error)
+ Exist(key []byte) (bool, error)
+ ExistNoLock(key []byte) (bool, error)
+ GetRef(key []byte) (RefSlice, error)
+ GetRefNoLock(key []byte) (RefSlice, error)
+ GetValueWithOp(key []byte, op func([]byte) error) error
+ GetValueWithOpNoLock(key []byte, op func([]byte) error) error
+ DeleteFilesInRange(rg CRange)
+ GetIterator(opts IteratorOpts) (Iterator, error)
+ NewCheckpoint(printToStdoutAlso bool) (KVCheckpoint, error)
+ SetOptsForLogStorage()
+ SetCompactionFilter(ICompactFilter)
+}
+
+func GetDataDirFromBase(engType string, base string) (string, error) {
+ if engType == "" || engType == "rocksdb" {
+ return path.Join(base, "rocksdb"), nil
+ }
+ if engType == "pebble" {
+ return path.Join(base, "pebble"), nil
+ }
+ if engType == "mem" {
+ return path.Join(base, "mem"), nil
+ }
+ return "", errors.New("unknown engine type for: " + engType)
+}
+
+func NewKVEng(cfg *RockEngConfig) (KVEngine, error) {
+ if cfg.EngineType == "" || cfg.EngineType == "rocksdb" {
+ return NewRockEng(cfg)
+ } else if cfg.EngineType == "pebble" {
+ return NewPebbleEng(cfg)
+ } else if cfg.EngineType == "mem" {
+ return NewMemEng(cfg)
+ }
+ return nil, errors.New("unknown engine type for: " + cfg.EngineType)
+}
+
+func NewSharedEngConfig(cfg RockOptions) (SharedRockConfig, error) {
+ if cfg.EngineType == "" || cfg.EngineType == "rocksdb" {
+ return newSharedRockConfig(cfg), nil
+ } else if cfg.EngineType == "pebble" {
+ return newSharedPebblekConfig(cfg), nil
+ } else if cfg.EngineType == "mem" {
+ return newSharedMemConfig(cfg), nil
+ }
+ return nil, errors.New("unknown engine type for: " + cfg.EngineType)
+}
diff --git a/engine/kv_skiplist.cc b/engine/kv_skiplist.cc
new file mode 100644
index 00000000..99392a8f
--- /dev/null
+++ b/engine/kv_skiplist.cc
@@ -0,0 +1,152 @@
+#include "kv_skiplist.h"
+
+#include
+#include
+#include
+
+static char* copyString(const char* str, size_t sz) {
+ char* result = (char*)(malloc(sizeof(char) * sz));
+ memcpy(result, str, sizeof(char) * sz);
+ return result;
+}
+
+void kv_skiplist_remove_node(skiplist_raw* l, kv_node* entry) {
+ // Detach `entry` from skiplist.
+ skiplist_erase_node(l, &entry->snode);
+ // Release `entry`, to free its memory.
+ skiplist_release_node(&entry->snode);
+ skiplist_wait_for_free(&entry->snode);
+ // Free `entry` after it becomes safe.
+ kv_skiplist_node_free(entry);
+}
+
+skiplist_raw* kv_skiplist_create() {
+ skiplist_raw *slist;
+ slist = (skiplist_raw*)malloc(sizeof(skiplist_raw));
+ skiplist_init(slist, kv_cmp);
+ return slist;
+}
+
+void kv_skiplist_destroy(skiplist_raw* slist) {
+ // Iterate and free all nodes.
+ skiplist_node* cursor = skiplist_begin(slist);
+ while (cursor) {
+ kv_node* entry = _get_entry(cursor, kv_node, snode);
+ // Get next `cursor`.
+ cursor = skiplist_next(slist, cursor);
+ kv_skiplist_remove_node(slist, entry);
+ }
+ skiplist_free(slist);
+}
+
+kv_node* kv_skiplist_node_create(const char* key, size_t ksz, const char* value, size_t vsz) {
+ kv_node *n;
+ n = (kv_node*)malloc(sizeof(kv_node));
+ skiplist_init_node(&n->snode);
+ pthread_mutex_init(&n->dlock, NULL);
+ n->key = copyString(key, ksz);
+ n->key_sz = ksz;
+ n->value = copyString(value, vsz);
+ n->value_sz = vsz;
+ return n;
+}
+
+void kv_skiplist_node_free(kv_node *n) {
+ skiplist_free_node(&n->snode);
+ if (n->key) {
+ free(n->key);
+ }
+ if (n->value) {
+ free(n->value);
+ }
+ pthread_mutex_destroy(&n->dlock);
+ free(n);
+}
+
+int kv_skiplist_insert(skiplist_raw* l, const char* key, size_t ksz, const char* value, size_t vsz) {
+ kv_node* n = kv_skiplist_node_create(key, ksz, value, vsz);
+ return skiplist_insert_nodup(l, &n->snode);
+}
+
+int kv_skiplist_update(skiplist_raw* l, const char* key, size_t ksz, const char* value, size_t vsz) {
+ kv_node* n = kv_skiplist_node_create(key, ksz, value, vsz);
+ int ret = skiplist_insert_nodup(l, &n->snode);
+ if (ret == 0) {
+ return 0;
+ }
+ kv_skiplist_node_free(n);
+
+ kv_node q;
+ q.key = copyString(key, ksz);
+ q.key_sz = ksz;
+ // the key already exist, we remove it and reinsert
+ skiplist_node* cur = skiplist_find(l, &q.snode);
+ if (!cur) {
+ return -1;
+ }
+ kv_node* found = _get_entry(cur, kv_node, snode);
+ pthread_mutex_lock(&found->dlock);
+ found->value = copyString(value, vsz);
+ found->value_sz = vsz;
+ skiplist_release_node(&found->snode);
+ pthread_mutex_unlock(&found->dlock);
+ return 0;
+}
+
+char* kv_skiplist_get(skiplist_raw* l, const char* key, size_t ksz, size_t* vsz) {
+ kv_node n;
+ n.key = copyString(key, ksz);
+ n.key_sz = ksz;
+ skiplist_node* cur = skiplist_find(l, &n.snode);
+ if (!cur) {
+ return NULL;
+ }
+ char* v = kv_skiplist_get_node_value(cur, vsz);
+ skiplist_release_node(cur);
+ return v;
+}
+
+char* kv_skiplist_get_node_value(skiplist_node* n, size_t* sz) {
+ kv_node* found = _get_entry(n, kv_node, snode);
+ pthread_mutex_lock(&found->dlock);
+ *sz = found->value_sz;
+ char* v = copyString(found->value, found->value_sz);
+ pthread_mutex_unlock(&found->dlock);
+ return v;
+}
+
+char* kv_skiplist_get_node_key(skiplist_node* n, size_t* sz) {
+ kv_node* found = _get_entry(n, kv_node, snode);
+ pthread_mutex_lock(&found->dlock);
+ *sz = found->key_sz;
+ char* v = copyString(found->key, found->key_sz);
+ pthread_mutex_unlock(&found->dlock);
+ return v;
+}
+
+int kv_skiplist_del(skiplist_raw* l, const char* key, size_t ksz) {
+ kv_node n;
+ n.key = copyString(key, ksz);
+ n.key_sz = ksz;
+ skiplist_node* cur = skiplist_find(l, &n.snode);
+ if (!cur) {
+ return 0;
+ }
+ kv_node* found = _get_entry(cur, kv_node, snode);
+ kv_skiplist_remove_node(l, found);
+ return 1;
+}
+
+skiplist_node* kv_skiplist_find_ge(skiplist_raw* l, const char* key, size_t ksz) {
+ kv_node n;
+ n.key = copyString(key, ksz);
+ n.key_sz = ksz;
+ return skiplist_find_greater_or_equal(l, &n.snode);
+}
+
+skiplist_node* kv_skiplist_find_le(skiplist_raw* l, const char* key, size_t ksz) {
+ kv_node n;
+ n.key = copyString(key, ksz);
+ n.key_sz = ksz;
+ return skiplist_find_smaller_or_equal(l, &n.snode);
+}
diff --git a/engine/kv_skiplist.h b/engine/kv_skiplist.h
new file mode 100644
index 00000000..853acee5
--- /dev/null
+++ b/engine/kv_skiplist.h
@@ -0,0 +1,63 @@
+#ifndef _KV_SKIPLIST_H
+#define _KV_SKIPLIST_H (1)
+
+#include "skiplist.h"
+
+#include
+#include
+#include
+#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Define a node that contains key and value pair.
+typedef struct {
+ // Metadata for skiplist node.
+ skiplist_node snode;
+ // My data here: {int, int} pair.
+ pthread_mutex_t dlock;
+ char* key;
+ size_t key_sz;
+ char* value;
+ size_t value_sz;
+} kv_node;
+
+static int kv_cmp(skiplist_node* a, skiplist_node* b, void* aux) {
+ kv_node *aa, *bb;
+ aa = _get_entry(a, kv_node, snode);
+ bb = _get_entry(b, kv_node, snode);
+ size_t sz;
+ sz = aa->key_sz;
+ if (bb->key_sz < sz) {
+ sz = bb->key_sz;
+ }
+ int ret = memcmp(aa->key, bb->key, sz);
+ if (ret != 0 || aa->key_sz == bb->key_sz) {
+ return ret;
+ }
+ return aa->key_sz < bb->key_sz?-1:1;
+}
+
+skiplist_raw* kv_skiplist_create();
+void kv_skiplist_destroy(skiplist_raw* slist);
+kv_node* kv_skiplist_node_create(const char* key, size_t ksz, const char* value, size_t vsz);
+void kv_skiplist_node_free(kv_node *n);
+
+int kv_skiplist_insert(skiplist_raw* l, const char* key, size_t ksz, const char* value, size_t vsz);
+int kv_skiplist_update(skiplist_raw* l, const char* key, size_t ksz, const char* value, size_t vsz);
+
+char* kv_skiplist_get(skiplist_raw* l, const char* key, size_t ksz, size_t* vsz);
+int kv_skiplist_del(skiplist_raw* l, const char* key, size_t ksz);
+skiplist_node* kv_skiplist_find_ge(skiplist_raw* l, const char* key, size_t ksz);
+skiplist_node* kv_skiplist_find_le(skiplist_raw* l, const char* key, size_t ksz);
+
+char* kv_skiplist_get_node_key(skiplist_node* n, size_t* sz);
+char* kv_skiplist_get_node_value(skiplist_node* n, size_t* sz);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
\ No newline at end of file
diff --git a/engine/kv_test.go b/engine/kv_test.go
new file mode 100644
index 00000000..78dc7668
--- /dev/null
+++ b/engine/kv_test.go
@@ -0,0 +1,444 @@
+package engine
+
+import (
+ "flag"
+ "io/ioutil"
+ "os"
+ "path"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/common"
+)
+
+func TestMain(m *testing.M) {
+ flag.Parse()
+ if testing.Verbose() {
+ SetLogLevel(int32(common.LOG_DETAIL))
+ }
+ ret := m.Run()
+ os.Exit(ret)
+}
+
+func TestRocksdbCheckpointData(t *testing.T) {
+ testCheckpointData(t, "rocksdb")
+}
+
+func TestPebbleEngCheckpointData(t *testing.T) {
+ testCheckpointData(t, "pebble")
+}
+
+func TestMemEngBtreeCheckpointData(t *testing.T) {
+ old := useMemType
+ useMemType = memTypeBtree
+ defer func() {
+ useMemType = old
+ }()
+ testCheckpointData(t, "mem")
+}
+
+func TestMemEngRadixCheckpointData(t *testing.T) {
+ old := useMemType
+ useMemType = memTypeRadix
+ defer func() {
+ useMemType = old
+ }()
+ testCheckpointData(t, "mem")
+}
+
+func TestMemEngSkiplistCheckpointData(t *testing.T) {
+ old := useMemType
+ useMemType = memTypeSkiplist
+ defer func() {
+ useMemType = old
+ }()
+ testCheckpointData(t, "mem")
+}
+
+func testCheckpointData(t *testing.T, engType string) {
+ SetLogger(0, nil)
+ cfg := NewRockConfig()
+ tmpDir, err := ioutil.TempDir("", "checkpoint_data")
+ assert.Nil(t, err)
+ t.Log(tmpDir)
+ defer os.RemoveAll(tmpDir)
+ cfg.DataDir = tmpDir
+ cfg.EngineType = engType
+ eng, err := NewKVEng(cfg)
+ assert.Nil(t, err)
+ err = eng.OpenEng()
+ assert.Nil(t, err)
+ defer eng.CloseAll()
+
+ ck, err := eng.NewCheckpoint(false)
+ assert.Nil(t, err)
+ // test save should not block, so lastTs should be updated soon
+ ckpath := path.Join(tmpDir, "newCk")
+ os.MkdirAll(ckpath, common.DIR_PERM)
+ // since the open engine will add rocksdb as subdir, we save it to the right place
+ err = ck.Save(path.Join(ckpath, engType), make(chan struct{}))
+ assert.Nil(t, err)
+
+ wb := eng.DefaultWriteBatch()
+ knum := 3
+ for j := 0; j < knum; j++ {
+ wb.Put([]byte("test"+strconv.Itoa(j)), []byte("test"+strconv.Itoa(j)))
+ }
+ eng.Write(wb)
+ wb.Clear()
+
+ ck2, err := eng.NewCheckpoint(false)
+ assert.Nil(t, err)
+ // test save should not block, so lastTs should be updated soon
+ ckpath2 := path.Join(tmpDir, "newCk2")
+ os.MkdirAll(ckpath2, common.DIR_PERM)
+ err = ck2.Save(path.Join(ckpath2, engType), make(chan struct{}))
+ assert.Nil(t, err)
+
+ cfgCK := *cfg
+ cfgCK.DataDir = ckpath
+ engCK, err := NewKVEng(&cfgCK)
+ assert.Nil(t, err)
+ err = engCK.OpenEng()
+ assert.Nil(t, err)
+ defer engCK.CloseAll()
+
+ cfgCK2 := *cfg
+ cfgCK2.DataDir = ckpath2
+ engCK2, err := NewKVEng(&cfgCK2)
+ assert.Nil(t, err)
+ err = engCK2.OpenEng()
+ assert.Nil(t, err)
+ defer engCK2.CloseAll()
+
+ for j := 0; j < knum; j++ {
+ key := []byte("test" + strconv.Itoa(j))
+ origV, err := eng.GetBytes(key)
+ assert.Equal(t, key, origV)
+ v, err := engCK.GetBytes(key)
+ assert.Nil(t, err)
+ assert.Nil(t, v)
+ v2, err := engCK2.GetBytes(key)
+ assert.Nil(t, err)
+ assert.Equal(t, key, v2)
+ assert.Equal(t, origV, v2)
+ }
+ if engType == "mem" && useMemType == memTypeRadix {
+ kn := engCK2.GetApproximateTotalKeyNum()
+ assert.Equal(t, knum, kn)
+ }
+ time.Sleep(time.Second)
+}
+
+func TestMemEngBtreeIterator(t *testing.T) {
+ old := useMemType
+ useMemType = memTypeBtree
+ defer func() {
+ useMemType = old
+ }()
+ testKVIterator(t, "mem")
+}
+
+func TestMemEngRadixIterator(t *testing.T) {
+ old := useMemType
+ useMemType = memTypeRadix
+ defer func() {
+ useMemType = old
+ }()
+ testKVIterator(t, "mem")
+}
+
+func TestMemEngSkiplistIterator(t *testing.T) {
+ old := useMemType
+ useMemType = memTypeSkiplist
+ defer func() {
+ useMemType = old
+ }()
+ testKVIterator(t, "mem")
+}
+
+func TestRockEngIterator(t *testing.T) {
+ testKVIterator(t, "rocksdb")
+}
+
+func TestPebbleEngIterator(t *testing.T) {
+ testKVIterator(t, "pebble")
+}
+
+func testKVIterator(t *testing.T, engType string) {
+ SetLogger(0, nil)
+ cfg := NewRockConfig()
+ tmpDir, err := ioutil.TempDir("", "iterator_data")
+ assert.Nil(t, err)
+ t.Log(tmpDir)
+ defer os.RemoveAll(tmpDir)
+ cfg.DataDir = tmpDir
+ cfg.EngineType = engType
+ eng, err := NewKVEng(cfg)
+ assert.Nil(t, err)
+ err = eng.OpenEng()
+ assert.Nil(t, err)
+ defer eng.CloseAll()
+
+ wb := eng.NewWriteBatch()
+ key := []byte("test")
+ wb.Put(key, key)
+ eng.Write(wb)
+ wb.Clear()
+ v, err := eng.GetBytes(key)
+ assert.Nil(t, err)
+ assert.Equal(t, key, v)
+ key2 := []byte("test2")
+ wb.Put(key2, key2)
+ eng.Write(wb)
+ wb.Clear()
+ v, err = eng.GetBytes(key2)
+ assert.Nil(t, err)
+ assert.Equal(t, key2, v)
+ key3 := []byte("test3")
+ wb.Put(key3, key3)
+ eng.Write(wb)
+ wb.Clear()
+ v, err = eng.GetBytes(key3)
+ assert.Nil(t, err)
+ assert.Equal(t, key3, v)
+ key4 := []byte("test4")
+ wb.Put(key4, key4)
+ eng.Write(wb)
+ wb.Clear()
+ v, err = eng.GetBytes(key4)
+ assert.Nil(t, err)
+ assert.Equal(t, key4, v)
+ it, _ := eng.GetIterator(IteratorOpts{})
+ defer it.Close()
+
+ // test seek part of prefix
+ it.Seek([]byte("tes"))
+ assert.True(t, it.Valid())
+ assert.Equal(t, key, it.Key())
+ assert.Equal(t, key, it.Value())
+ it.Seek(key)
+ assert.True(t, it.Valid())
+ assert.Equal(t, key, it.Key())
+ assert.Equal(t, key, it.Value())
+ it.Seek(key2)
+ assert.True(t, it.Valid())
+ assert.Equal(t, key2, it.Key())
+ assert.Equal(t, key2, it.Value())
+ it.Seek(key4)
+ assert.True(t, it.Valid())
+ assert.Equal(t, key4, it.Key())
+ assert.Equal(t, key4, it.Value())
+ it.Seek([]byte("test44"))
+ assert.True(t, !it.Valid())
+
+ it.SeekToFirst()
+ // change value after iterator should not change the snapshot iterator?
+ if engType != "mem" || useMemType == memTypeRadix {
+ // for btree, the write will be blocked while the iterator is open
+ // for skiplist, we do not support snapshot
+ wb.Put(key4, []byte(string(key4)+"update"))
+ eng.Write(wb)
+ wb.Clear()
+ }
+ if engType == "mem" && useMemType == memTypeRadix {
+ kn := eng.GetApproximateTotalKeyNum()
+ assert.Equal(t, 4, kn)
+ }
+ assert.True(t, it.Valid())
+ assert.Equal(t, key, it.Key())
+ assert.Equal(t, key, it.Value())
+ it.Next()
+ assert.True(t, it.Valid())
+ assert.Equal(t, key2, it.Key())
+ assert.Equal(t, key2, it.Value())
+ it.Next()
+ assert.True(t, it.Valid())
+ assert.Equal(t, key3, it.Key())
+ assert.Equal(t, key3, it.Value())
+ it.Prev()
+ assert.True(t, it.Valid())
+ assert.Equal(t, key2, it.Key())
+ assert.Equal(t, key2, it.Value())
+ it.SeekToLast()
+ assert.True(t, it.Valid())
+ assert.Equal(t, key4, it.Key())
+ assert.Equal(t, key4, it.Value())
+ it.Prev()
+ assert.True(t, it.Valid())
+ assert.Equal(t, key3, it.Key())
+ assert.Equal(t, key3, it.Value())
+
+ if useMemType != memTypeBtree && engType != "pebble" {
+ it.SeekForPrev(key3)
+ assert.True(t, it.Valid())
+ assert.Equal(t, key3, it.Key())
+ assert.Equal(t, key3, it.Value())
+ }
+
+ it.SeekForPrev([]byte("test5"))
+ assert.True(t, it.Valid())
+ assert.Equal(t, key4, it.Key())
+ assert.Equal(t, key4, it.Value())
+
+ it.SeekForPrev([]byte("test1"))
+ assert.True(t, it.Valid())
+ assert.Equal(t, key, it.Key())
+ assert.Equal(t, key, it.Value())
+ it.Prev()
+ assert.True(t, !it.Valid())
+}
+
+func TestPebbleEngSnapshotIterator(t *testing.T) {
+ testKVSnapshotIterator(t, "pebble")
+}
+func TestRocksdbEngSnapshotIterator(t *testing.T) {
+ testKVSnapshotIterator(t, "rocksdb")
+}
+func TestMemEngSnapshotIteratorRadix(t *testing.T) {
+ // snapshot iterator for btree or skiplist currently not supported
+ old := useMemType
+ useMemType = memTypeRadix
+ defer func() {
+ useMemType = old
+ }()
+ testKVSnapshotIterator(t, "mem")
+}
+
+func testKVSnapshotIterator(t *testing.T, engType string) {
+ SetLogger(0, nil)
+ cfg := NewRockConfig()
+ tmpDir, err := ioutil.TempDir("", "iterator_data")
+ assert.Nil(t, err)
+ t.Log(tmpDir)
+ defer os.RemoveAll(tmpDir)
+ cfg.DataDir = tmpDir
+ cfg.EngineType = engType
+ eng, err := NewKVEng(cfg)
+ assert.Nil(t, err)
+ err = eng.OpenEng()
+ assert.Nil(t, err)
+ defer eng.CloseAll()
+
+ wb := eng.NewWriteBatch()
+ key := []byte("test")
+ wb.Put(key, key)
+ key2 := []byte("test2")
+ wb.Put(key2, key2)
+ key3 := []byte("test3")
+ wb.Put(key3, key3)
+ eng.Write(wb)
+ wb.Clear()
+
+ it, _ := eng.GetIterator(IteratorOpts{})
+ defer it.Close()
+ // modify after iterator snapshot
+ wb = eng.NewWriteBatch()
+ wb.Put(key2, []byte("changed"))
+ wb.Put(key3, []byte("changed"))
+ eng.Write(wb)
+ wb.Clear()
+
+ it.Seek(key)
+ assert.True(t, it.Valid())
+ assert.Equal(t, key, it.Key())
+ assert.Equal(t, key, it.Value())
+ it.Seek(key2)
+ assert.True(t, it.Valid())
+ assert.Equal(t, key2, it.Key())
+ assert.Equal(t, key2, it.Value())
+ it.Seek(key3)
+ assert.True(t, it.Valid())
+ assert.Equal(t, key3, it.Key())
+ assert.Equal(t, key3, it.Value())
+
+ it2, _ := eng.GetIterator(IteratorOpts{})
+ defer it2.Close()
+
+ it2.Seek(key)
+ assert.True(t, it2.Valid())
+ assert.Equal(t, key, it2.Key())
+ assert.Equal(t, key, it2.Value())
+ it2.Seek(key2)
+ assert.True(t, it2.Valid())
+ assert.Equal(t, key2, it2.Key())
+ assert.Equal(t, []byte("changed"), it2.Value())
+ it2.Seek(key3)
+ assert.True(t, it2.Valid())
+ assert.Equal(t, key3, it2.Key())
+ assert.Equal(t, []byte("changed"), it2.Value())
+}
+
+func TestSpecialDataSeekForRocksdb(t *testing.T) {
+ testSpecialDataSeekForAnyType(t, "rocksdb")
+}
+func TestSpecialDataSeekForPebble(t *testing.T) {
+ testSpecialDataSeekForAnyType(t, "pebble")
+}
+func TestSpecialDataSeekForBtree(t *testing.T) {
+ old := useMemType
+ useMemType = memTypeBtree
+ defer func() {
+ useMemType = old
+ }()
+ testSpecialDataSeekForAnyType(t, "mem")
+}
+
+func TestSpecialDataSeekForSkiplist(t *testing.T) {
+ old := useMemType
+ useMemType = memTypeSkiplist
+ defer func() {
+ useMemType = old
+ }()
+ testSpecialDataSeekForAnyType(t, "mem")
+}
+
+func TestSpecialDataSeekForRadix(t *testing.T) {
+ old := useMemType
+ useMemType = memTypeRadix
+ defer func() {
+ useMemType = old
+ }()
+ testSpecialDataSeekForAnyType(t, "mem")
+}
+
+func testSpecialDataSeekForAnyType(t *testing.T, engType string) {
+ base := []byte{1, 0, 1, 0}
+ key := append([]byte{}, base...)
+ key2 := append([]byte{}, base...)
+ minKey := []byte{1, 0, 1}
+
+ SetLogger(0, nil)
+ cfg := NewRockConfig()
+ tmpDir, err := ioutil.TempDir("", "iterator_data")
+ assert.Nil(t, err)
+ t.Log(tmpDir)
+ defer os.RemoveAll(tmpDir)
+ cfg.DataDir = tmpDir
+ cfg.EngineType = engType
+ eng, err := NewKVEng(cfg)
+ assert.Nil(t, err)
+ err = eng.OpenEng()
+ assert.Nil(t, err)
+ defer eng.CloseAll()
+
+ wb := eng.NewWriteBatch()
+ value := []byte{1}
+
+ wb.Put(key, value)
+ eng.Write(wb)
+ wb.Clear()
+ key2 = append(key2, []byte{1}...)
+ wb.Put(key2, value)
+ eng.Write(wb)
+ wb.Clear()
+
+ it, _ := eng.GetIterator(IteratorOpts{})
+ defer it.Close()
+ it.Seek(minKey)
+ assert.True(t, it.Valid())
+ assert.Equal(t, key, it.Key())
+ assert.Equal(t, value, it.Value())
+}
diff --git a/engine/mem_eng.go b/engine/mem_eng.go
new file mode 100644
index 00000000..3dae010d
--- /dev/null
+++ b/engine/mem_eng.go
@@ -0,0 +1,629 @@
+package engine
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "sync"
+ "sync/atomic"
+
+ "github.com/youzan/ZanRedisDB/common"
+)
+
+const (
+ defBucket = "default"
+)
+
+type memType int
+
+const (
+ memTypeSkiplist memType = iota
+ memTypeRadix
+ memTypeBtree
+)
+
+var useMemType memType = memTypeRadix
+
+type memRefSlice struct {
+ b []byte
+ needCopy bool
+}
+
+func (rs *memRefSlice) Free() {
+}
+
+// ref data
+func (rs *memRefSlice) Data() []byte {
+ return rs.b
+}
+
+// copied data if need
+func (rs *memRefSlice) Bytes() []byte {
+ if !rs.needCopy || rs.b == nil {
+ return rs.b
+ }
+ d := make([]byte, len(rs.b))
+ copy(d, rs.b)
+ return d
+}
+
+type sharedMemConfig struct {
+}
+
+func newSharedMemConfig(opt RockOptions) *sharedMemConfig {
+ sc := &sharedMemConfig{}
+ return sc
+}
+
+func (sc *sharedMemConfig) ChangeLimiter(bytesPerSec int64) {
+}
+
+func (sc *sharedMemConfig) Destroy() {
+}
+
+type memEng struct {
+ rwmutex sync.RWMutex
+ cfg *RockEngConfig
+ eng *btree
+ slEng *skipList
+ radixMemI *radixMemIndex
+ engOpened int32
+ lastCompact int64
+ deletedCnt int64
+ quit chan struct{}
+}
+
+func NewMemEng(cfg *RockEngConfig) (*memEng, error) {
+ if len(cfg.DataDir) == 0 {
+ return nil, errors.New("config error")
+ }
+
+ if !cfg.ReadOnly {
+ err := os.MkdirAll(cfg.DataDir, common.DIR_PERM)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if !cfg.DisableMergeCounter {
+ if cfg.EnableTableCounter {
+ // do merger
+ }
+ } else {
+ cfg.EnableTableCounter = false
+ }
+ db := &memEng{
+ cfg: cfg,
+ quit: make(chan struct{}),
+ }
+ if cfg.AutoCompacted {
+ go db.compactLoop()
+ }
+
+ return db, nil
+}
+
+func (me *memEng) NewWriteBatch() WriteBatch {
+ wb, err := newMemWriteBatch(me)
+ if err != nil {
+ return nil
+ }
+ return wb
+}
+
+func (me *memEng) DefaultWriteBatch() WriteBatch {
+ return me.NewWriteBatch()
+}
+
+func (me *memEng) GetDataDir() string {
+ return path.Join(me.cfg.DataDir, "mem")
+}
+
+func (me *memEng) SetCompactionFilter(ICompactFilter) {
+}
+func (me *memEng) SetMaxBackgroundOptions(maxCompact int, maxBackJobs int) error {
+ return nil
+}
+
+func (me *memEng) compactLoop() {
+}
+
+func (me *memEng) CheckDBEngForRead(fullPath string) error {
+ return nil
+}
+
+func (me *memEng) getDataFileName() string {
+ return path.Join(me.GetDataDir(), "mem.dat")
+}
+
+func (me *memEng) OpenEng() error {
+ if !me.IsClosed() {
+ dbLog.Warningf("engine already opened: %v, should close it before reopen", me.GetDataDir())
+ return errors.New("open failed since not closed")
+ }
+ me.rwmutex.Lock()
+ defer me.rwmutex.Unlock()
+ if !me.cfg.ReadOnly {
+ os.MkdirAll(me.GetDataDir(), common.DIR_PERM)
+ }
+ switch useMemType {
+ case memTypeRadix:
+ eng, err := NewRadix()
+ if err != nil {
+ return err
+ }
+ err = loadMemDBFromFile(me.getDataFileName(), func(key []byte, value []byte) error {
+ w := eng.memkv.Txn(true)
+ eng.Put(w, key, value)
+ w.Commit()
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ me.radixMemI = eng
+ case memTypeBtree:
+ eng := &btree{
+ cmp: cmpItem,
+ }
+ err := loadMemDBFromFile(me.getDataFileName(), func(key []byte, value []byte) error {
+ item := &kvitem{
+ key: make([]byte, len(key)),
+ value: make([]byte, len(value)),
+ }
+ copy(item.key, key)
+ copy(item.value, value)
+ eng.Set(item)
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ me.eng = eng
+ default:
+ sleng := NewSkipList()
+ err := loadMemDBFromFile(me.getDataFileName(), func(key []byte, value []byte) error {
+ return sleng.Set(key, value)
+ })
+ if err != nil {
+ return err
+ }
+ me.slEng = sleng
+ }
+ atomic.StoreInt32(&me.engOpened, 1)
+ dbLog.Infof("engine opened: %v", me.GetDataDir())
+ return nil
+}
+
+func (me *memEng) Write(wb WriteBatch) error {
+ return wb.Commit()
+}
+
+func (me *memEng) DeletedBeforeCompact() int64 {
+ return atomic.LoadInt64(&me.deletedCnt)
+}
+
+func (me *memEng) AddDeletedCnt(c int64) {
+ atomic.AddInt64(&me.deletedCnt, c)
+}
+
+func (me *memEng) LastCompactTime() int64 {
+ return atomic.LoadInt64(&me.lastCompact)
+}
+
+func (me *memEng) CompactRange(rg CRange) {
+}
+
+func (me *memEng) CompactAllRange() {
+ me.CompactRange(CRange{})
+}
+
+func (me *memEng) DisableManualCompact(disable bool) {
+}
+
+func (me *memEng) GetApproximateTotalKeyNum() int {
+ me.rwmutex.RLock()
+ defer me.rwmutex.RUnlock()
+ switch useMemType {
+ case memTypeBtree:
+ return me.eng.Len()
+ case memTypeRadix:
+ return int(me.radixMemI.Len())
+ default:
+ return int(me.slEng.Len())
+ }
+}
+
+func (me *memEng) GetApproximateKeyNum(ranges []CRange) uint64 {
+ return 0
+}
+
+func (me *memEng) SetOptsForLogStorage() {
+ return
+}
+
+func (me *memEng) GetApproximateSizes(ranges []CRange, includeMem bool) []uint64 {
+ me.rwmutex.RLock()
+ defer me.rwmutex.RUnlock()
+ sizeList := make([]uint64, len(ranges))
+ if me.IsClosed() {
+ return sizeList
+ }
+ // TODO: estimate the size
+ return sizeList
+}
+
+func (me *memEng) IsClosed() bool {
+ if atomic.LoadInt32(&me.engOpened) == 0 {
+ return true
+ }
+ return false
+}
+
+func (me *memEng) CloseEng() bool {
+ me.rwmutex.Lock()
+ defer me.rwmutex.Unlock()
+ if atomic.CompareAndSwapInt32(&me.engOpened, 1, 0) {
+ switch useMemType {
+ case memTypeBtree:
+ if me.eng != nil {
+ me.eng.Destroy()
+ }
+ case memTypeRadix:
+ if me.radixMemI != nil {
+ me.radixMemI.Destroy()
+ }
+ default:
+ if me.slEng != nil {
+ me.slEng.Destroy()
+ }
+ }
+ dbLog.Infof("engine closed: %v", me.GetDataDir())
+ return true
+ }
+ return false
+}
+
+func (me *memEng) CloseAll() {
+ select {
+ case <-me.quit:
+ default:
+ close(me.quit)
+ }
+ me.CloseEng()
+}
+
+func (me *memEng) GetStatistics() string {
+ me.rwmutex.RLock()
+ defer me.rwmutex.RUnlock()
+ if me.IsClosed() {
+ return ""
+ }
+ return ""
+}
+
+func (me *memEng) GetInternalStatus() map[string]interface{} {
+ s := make(map[string]interface{})
+ s["internal"] = me.GetStatistics()
+ return s
+}
+
+func (me *memEng) GetInternalPropertyStatus(p string) string {
+ return p
+}
+
+func (me *memEng) GetBytesNoLock(key []byte) ([]byte, error) {
+ v, err := me.GetRefNoLock(key)
+ if err != nil {
+ return nil, err
+ }
+ if v != nil {
+ value := v.Bytes()
+ v.Free()
+ return value, nil
+ }
+ return nil, nil
+}
+
+func (me *memEng) GetBytes(key []byte) ([]byte, error) {
+ me.rwmutex.RLock()
+ defer me.rwmutex.RUnlock()
+ if me.IsClosed() {
+ return nil, errDBEngClosed
+ }
+ return me.GetBytesNoLock(key)
+}
+
+func (me *memEng) MultiGetBytes(keyList [][]byte, values [][]byte, errs []error) {
+ me.rwmutex.RLock()
+ defer me.rwmutex.RUnlock()
+ if me.IsClosed() {
+ for i, _ := range errs {
+ errs[i] = errDBEngClosed
+ }
+ return
+ }
+ for i, k := range keyList {
+ values[i], errs[i] = me.GetBytesNoLock(k)
+ }
+}
+
+func (me *memEng) Exist(key []byte) (bool, error) {
+ me.rwmutex.RLock()
+ defer me.rwmutex.RUnlock()
+ if me.IsClosed() {
+ return false, errDBEngClosed
+ }
+ return me.ExistNoLock(key)
+}
+
+func (me *memEng) ExistNoLock(key []byte) (bool, error) {
+ v, err := me.GetRefNoLock(key)
+ if err != nil {
+ return false, err
+ }
+ if v == nil {
+ return false, nil
+ }
+ ok := v.Data() != nil
+ v.Free()
+ return ok, nil
+}
+
+func (me *memEng) GetRefNoLock(key []byte) (RefSlice, error) {
+ switch useMemType {
+ case memTypeBtree:
+ bt := me.eng
+ bi := bt.MakeIter()
+
+ bi.SeekGE(&kvitem{key: key})
+ if !bi.Valid() {
+ return nil, nil
+ }
+ item := bi.Cur()
+ if bytes.Equal(item.key, key) {
+ return &memRefSlice{b: item.value, needCopy: true}, nil
+ }
+ return nil, nil
+ case memTypeRadix:
+ v, err := me.radixMemI.Get(key)
+ if err != nil {
+ return nil, err
+ }
+ return &memRefSlice{b: v, needCopy: true}, nil
+ default:
+ v, err := me.slEng.Get(key)
+ if err != nil {
+ return nil, err
+ }
+ return &memRefSlice{b: v, needCopy: false}, nil
+ }
+}
+
+func (me *memEng) GetRef(key []byte) (RefSlice, error) {
+ me.rwmutex.RLock()
+ defer me.rwmutex.RUnlock()
+ if me.IsClosed() {
+ return nil, errDBEngClosed
+ }
+ return me.GetRefNoLock(key)
+}
+
+func (me *memEng) GetValueWithOp(key []byte,
+ op func([]byte) error) error {
+ me.rwmutex.RLock()
+ defer me.rwmutex.RUnlock()
+ if me.IsClosed() {
+ return errDBEngClosed
+ }
+
+ return me.GetValueWithOpNoLock(key, op)
+}
+
+func (me *memEng) GetValueWithOpNoLock(key []byte,
+ op func([]byte) error) error {
+ val, err := me.GetRef(key)
+ if err != nil {
+ return err
+ }
+ if val != nil {
+ defer val.Free()
+ return op(val.Data())
+ }
+ return op(nil)
+}
+
+func (me *memEng) DeleteFilesInRange(rg CRange) {
+ return
+}
+
+func (me *memEng) GetIterator(opts IteratorOpts) (Iterator, error) {
+ mit, err := newMemIterator(me, opts)
+ if err != nil {
+ return nil, err
+ }
+ return mit, nil
+}
+
+func (me *memEng) NewCheckpoint(printToStdout bool) (KVCheckpoint, error) {
+ return &memEngCheckpoint{
+ me: me,
+ printToStdout: true,
+ }, nil
+}
+
+type memEngCheckpoint struct {
+ me *memEng
+ printToStdout bool
+}
+
+func (pck *memEngCheckpoint) Save(cpath string, notify chan struct{}) error {
+ tmpFile := path.Join(cpath, "mem.dat.tmp")
+ err := os.Mkdir(cpath, common.DIR_PERM)
+ if err != nil && !os.IsExist(err) {
+ return err
+ }
+ it, err := pck.me.GetIterator(IteratorOpts{})
+ if err != nil {
+ return err
+ }
+ var dataNum int64
+ switch useMemType {
+ case memTypeBtree:
+ dataNum = int64(pck.me.eng.Len())
+ case memTypeRadix:
+ dataNum = pck.me.radixMemI.Len()
+ default:
+ dataNum = pck.me.slEng.Len()
+ }
+
+ it.SeekToFirst()
+ if notify != nil {
+ close(notify)
+ }
+
+ n, fs, err := saveMemDBToFile(it, tmpFile, dataNum, pck.printToStdout)
+ // release the lock early to avoid blocking while sync file
+ it.Close()
+
+ if err != nil {
+ dbLog.Infof("save checkpoint to %v failed: %s", cpath, err.Error())
+ return err
+ }
+ if fs != nil {
+ err = fs.Sync()
+ if err != nil {
+ dbLog.Errorf("save checkpoint to %v sync failed: %v ", cpath, err.Error())
+ return err
+ }
+ fs.Close()
+ }
+ err = os.Rename(tmpFile, path.Join(cpath, "mem.dat"))
+ if err != nil {
+ dbLog.Errorf("save checkpoint to %v failed: %v ", cpath, err.Error())
+ } else {
+ dbLog.Infof("save checkpoint to %v done: %v bytes", cpath, n)
+ }
+ return err
+}
+
+func loadMemDBFromFile(fileName string, loader func([]byte, []byte) error) error {
+ // read from checkpoint file
+ fs, err := os.Open(fileName)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+ header := make([]byte, 22)
+ _, err = fs.Read(header)
+ if err != nil {
+ return err
+ }
+ lenBuf := make([]byte, 8)
+ dataKeyBuf := make([]byte, 0, 1024)
+ dataValueBuf := make([]byte, 0, 1024)
+ for {
+ _, err := fs.Read(lenBuf)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ vl := binary.BigEndian.Uint64(lenBuf)
+ if uint64(len(dataKeyBuf)) < vl {
+ dataKeyBuf = make([]byte, vl)
+ }
+ _, err = fs.Read(dataKeyBuf[:vl])
+ if err != nil {
+ return err
+ }
+ key := dataKeyBuf[:vl]
+ _, err = fs.Read(lenBuf)
+ if err != nil {
+ return err
+ }
+ vl = binary.BigEndian.Uint64(lenBuf)
+ if uint64(len(dataValueBuf)) < vl {
+ dataValueBuf = make([]byte, vl)
+ }
+ _, err = fs.Read(dataValueBuf[:vl])
+ if err != nil {
+ return err
+ }
+ value := dataValueBuf[:vl]
+ err = loader(key, value)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func saveMemDBToFile(it Iterator, fileName string, dataNum int64, printToStdoutAlso bool) (int64, *os.File, error) {
+ fs, err := os.OpenFile(fileName, os.O_CREATE|os.O_WRONLY, common.FILE_PERM)
+ if err != nil {
+ return 0, nil, err
+ }
+ defer func() {
+ if err != nil {
+ fs.Close()
+ }
+ }()
+ total := int64(0)
+ n := int(0)
+ n, err = fs.Write([]byte("v001\n"))
+ if err != nil {
+ return total, nil, err
+ }
+ total += int64(n)
+ n, err = fs.Write([]byte(fmt.Sprintf("%016d\n", dataNum)))
+ if err != nil {
+ return total, nil, err
+ }
+ if printToStdoutAlso {
+ fmt.Printf("data num: %v\n", dataNum)
+ }
+ total += int64(n)
+ buf := make([]byte, 8)
+ for it.SeekToFirst(); it.Valid(); it.Next() {
+ k := it.RefKey()
+ // save item to path
+ vlen := uint64(len(k))
+ binary.BigEndian.PutUint64(buf, vlen)
+ n, err = fs.Write(buf[:8])
+ if err != nil {
+ return total, nil, err
+ }
+ total += int64(n)
+ n, err = fs.Write(k)
+ if err != nil {
+ return total, nil, err
+ }
+ total += int64(n)
+ v := it.RefValue()
+ if printToStdoutAlso {
+ fmt.Printf("key: (%v)##\n", k)
+ fmt.Printf("value: (%v)##\n", v)
+ }
+ vlen = uint64(len(v))
+ binary.BigEndian.PutUint64(buf, vlen)
+ n, err = fs.Write(buf[:8])
+ if err != nil {
+ return total, nil, err
+ }
+ total += int64(n)
+ n, err = fs.Write(v)
+ if err != nil {
+ return total, nil, err
+ }
+ total += int64(n)
+ }
+ return total, fs, nil
+}
diff --git a/engine/mem_eng_test.go b/engine/mem_eng_test.go
new file mode 100644
index 00000000..d5672c3c
--- /dev/null
+++ b/engine/mem_eng_test.go
@@ -0,0 +1,29 @@
+package engine
+
+import (
+ "io/ioutil"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestMemEngReopenAndCheck(t *testing.T) {
+ SetLogger(0, nil)
+ cfg := NewRockConfig()
+ tmpDir, err := ioutil.TempDir("", "checkpoint")
+ assert.Nil(t, err)
+ t.Log(tmpDir)
+ defer os.RemoveAll(tmpDir)
+ cfg.DataDir = tmpDir
+ pe, err := NewMemEng(cfg)
+ t.Logf("%s", err)
+ assert.Nil(t, err)
+ err = pe.OpenEng()
+ t.Logf("%s", err)
+ assert.Nil(t, err)
+
+ pe.CloseAll()
+ time.Sleep(time.Second * 10)
+}
diff --git a/engine/mem_iter.go b/engine/mem_iter.go
new file mode 100644
index 00000000..d2a7dcba
--- /dev/null
+++ b/engine/mem_iter.go
@@ -0,0 +1,136 @@
+package engine
+
+import (
+ "github.com/youzan/ZanRedisDB/common"
+)
+
+type memIter interface {
+ Next()
+ Prev()
+ Valid() bool
+ Seek([]byte)
+ SeekForPrev([]byte)
+ First()
+ Last()
+ Close()
+ Key() []byte
+ Value() []byte
+}
+
+type memIterator struct {
+ db *memEng
+ memit memIter
+ opts IteratorOpts
+ upperBound []byte
+ lowerBound []byte
+ removeTsType byte
+}
+
+// low_bound is inclusive
+// upper bound is exclusive
+func newMemIterator(db *memEng, opts IteratorOpts) (*memIterator, error) {
+ db.rwmutex.RLock()
+ if db.IsClosed() {
+ db.rwmutex.RUnlock()
+ return nil, errDBEngClosed
+ }
+ upperBound := opts.Max
+ lowerBound := opts.Min
+ if opts.Type&common.RangeROpen <= 0 && upperBound != nil {
+ // range right not open, we need inclusive the max,
+ // however upperBound is exclusive
+ upperBound = append(upperBound, 0)
+ }
+
+ dbit := &memIterator{
+ db: db,
+ lowerBound: lowerBound,
+ upperBound: upperBound,
+ opts: opts,
+ }
+ if useMemType == memTypeBtree {
+ bit := db.eng.MakeIter()
+ dbit.memit = &bit
+ } else if useMemType == memTypeRadix {
+ var err error
+ dbit.memit, err = db.radixMemI.NewIterator()
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ dbit.memit = db.slEng.NewIterator()
+ }
+
+ return dbit, nil
+}
+
+func (it *memIterator) Next() {
+ it.memit.Next()
+}
+
+func (it *memIterator) Prev() {
+ it.memit.Prev()
+}
+
+func (it *memIterator) Seek(key []byte) {
+ it.memit.Seek(key)
+}
+
+func (it *memIterator) SeekForPrev(key []byte) {
+ it.memit.SeekForPrev(key)
+}
+
+func (it *memIterator) SeekToFirst() {
+ it.memit.First()
+}
+
+func (it *memIterator) SeekToLast() {
+ it.memit.Last()
+}
+
+func (it *memIterator) Valid() bool {
+ return it.memit.Valid()
+}
+
+// the bytes returned will be freed after next
+func (it *memIterator) RefKey() []byte {
+ return it.memit.Key()
+}
+
+func (it *memIterator) Key() []byte {
+ d := it.RefKey()
+ if useMemType == memTypeSkiplist {
+ return d
+ }
+ c := make([]byte, len(d))
+ copy(c, d)
+ return c
+}
+
+// the bytes returned will be freed after next
+func (it *memIterator) RefValue() []byte {
+ v := it.memit.Value()
+ if (it.removeTsType == KVType || it.removeTsType == HashType) && len(v) >= tsLen {
+ v = v[:len(v)-tsLen]
+ }
+ return v
+}
+
+func (it *memIterator) Value() []byte {
+ d := it.RefValue()
+ if useMemType == memTypeSkiplist {
+ return d
+ }
+ c := make([]byte, len(d))
+ copy(c, d)
+ return c
+}
+
+func (it *memIterator) NoTimestamp(vt byte) {
+ it.removeTsType = vt
+}
+
+func (it *memIterator) Close() {
+ it.memit.Close()
+ it.db.rwmutex.RUnlock()
+}
diff --git a/engine/mem_writebatch.go b/engine/mem_writebatch.go
new file mode 100644
index 00000000..ff5fcc21
--- /dev/null
+++ b/engine/mem_writebatch.go
@@ -0,0 +1,316 @@
+package engine
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+
+ memdb "github.com/youzan/ZanRedisDB/engine/radixdb"
+)
+
+type wop int
+
+const (
+ NotOp wop = iota
+ DeleteOp
+ PutOp
+ MergeOp
+ DeleteRangeOp
+)
+
+type writeOp struct {
+ op wop
+ key []byte
+ value []byte
+}
+
+type memWriteBatch struct {
+ db *memEng
+ ops []writeOp
+ writer *memdb.Txn
+ hasErr error
+ cachedForMerge map[string][]byte
+}
+
+func newMemWriteBatch(db *memEng) (*memWriteBatch, error) {
+ b := &memWriteBatch{
+ db: db,
+ ops: make([]writeOp, 0, 10),
+ }
+
+ return b, nil
+}
+
+func (wb *memWriteBatch) Destroy() {
+ wb.ops = wb.ops[:0]
+ wb.hasErr = nil
+ wb.cachedForMerge = nil
+ if useMemType == memTypeRadix {
+ if wb.writer != nil {
+ wb.writer.Abort()
+ }
+ }
+}
+
+func (wb *memWriteBatch) commitSkiplist() error {
+ defer wb.Clear()
+ var err error
+ for _, w := range wb.ops {
+ switch w.op {
+ case DeleteOp:
+ err = wb.db.slEng.Delete(w.key)
+ case PutOp:
+ err = wb.db.slEng.Set(w.key, w.value)
+ case DeleteRangeOp:
+ it := wb.db.slEng.NewIterator()
+ it.Seek(w.key)
+ keys := make([][]byte, 0, 100)
+ for ; it.Valid(); it.Next() {
+ k := it.Key()
+ if w.value != nil && bytes.Compare(k, w.value) >= 0 {
+ break
+ }
+ keys = append(keys, k)
+ }
+ it.Close()
+ for _, k := range keys {
+ wb.db.slEng.Delete(k)
+ }
+ case MergeOp:
+ cur, err := GetRocksdbUint64(wb.db.GetBytesNoLock(w.key))
+ if err != nil {
+ return err
+ }
+ vint, err := GetRocksdbUint64(w.value, nil)
+ if err != nil {
+ return err
+ }
+ nv := cur + vint
+ buf := make([]byte, 8)
+ binary.LittleEndian.PutUint64(buf, nv)
+ err = wb.db.slEng.Set(w.key, buf)
+ default:
+ return errors.New("unknown write operation")
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (wb *memWriteBatch) Commit() error {
+ switch useMemType {
+ case memTypeBtree:
+ return wb.commitBtree()
+ case memTypeRadix:
+ return wb.commitRadixMem()
+ default:
+ return wb.commitSkiplist()
+ }
+}
+
+func (wb *memWriteBatch) commitBtree() error {
+ wb.db.rwmutex.Lock()
+ defer wb.db.rwmutex.Unlock()
+ defer wb.Clear()
+ for _, w := range wb.ops {
+ item := &kvitem{key: w.key, value: w.value}
+ switch w.op {
+ case DeleteOp:
+ wb.db.eng.Delete(item)
+ case PutOp:
+ wb.db.eng.Set(item)
+ case DeleteRangeOp:
+ bit := wb.db.eng.MakeIter()
+ end := item.value
+ item.value = nil
+ bit.SeekGE(item)
+ keys := make([][]byte, 0, 100)
+ for ; bit.Valid(); bit.Next() {
+ if end != nil && bytes.Compare(bit.Cur().key, end) >= 0 {
+ break
+ }
+ keys = append(keys, bit.Cur().key)
+ }
+ for _, k := range keys {
+ wb.db.eng.Delete(&kvitem{key: k})
+ }
+ case MergeOp:
+ v, err := wb.db.GetBytesNoLock(item.key)
+ cur, err := GetRocksdbUint64(v, err)
+ if err != nil {
+ return err
+ }
+ vint, err := GetRocksdbUint64(item.value, nil)
+ if err != nil {
+ return err
+ }
+ nv := cur + vint
+ buf := make([]byte, 8)
+ binary.LittleEndian.PutUint64(buf, nv)
+ item.value = buf
+ wb.db.eng.Set(item)
+ default:
+ return errors.New("unknown write operation")
+ }
+ }
+ return nil
+}
+
+func (wb *memWriteBatch) commitRadixMem() error {
+ defer wb.Clear()
+ if wb.hasErr != nil {
+ wb.writer.Abort()
+ return wb.hasErr
+ }
+ if wb.writer != nil {
+ wb.writer.Commit()
+ }
+ return nil
+}
+
+func (wb *memWriteBatch) Clear() {
+ wb.ops = wb.ops[:0]
+ wb.hasErr = nil
+ wb.cachedForMerge = nil
+ if useMemType == memTypeRadix {
+ if wb.writer != nil {
+ wb.writer.Abort()
+ // TODO: maybe reset for reuse if possible
+ wb.writer = nil
+ }
+ }
+}
+
+func (wb *memWriteBatch) DeleteRange(start, end []byte) {
+ if useMemType == memTypeRadix {
+ if wb.writer == nil {
+ wb.writer = wb.db.radixMemI.memkv.Txn(true)
+ }
+ it, err := wb.db.radixMemI.NewIterator()
+ if err != nil {
+ wb.hasErr = err
+ return
+ }
+ it.Seek(start)
+ for ; it.Valid(); it.Next() {
+ k := it.Key()
+ if end != nil && bytes.Compare(k, end) >= 0 {
+ break
+ }
+ err = wb.db.radixMemI.Delete(wb.writer, k)
+ if err != nil {
+ wb.hasErr = err
+ break
+ }
+ if wb.cachedForMerge != nil {
+ delete(wb.cachedForMerge, string(k))
+ }
+ }
+ it.Close()
+ }
+ wb.ops = append(wb.ops, writeOp{
+ op: DeleteRangeOp,
+ key: start,
+ value: end,
+ })
+}
+
+func (wb *memWriteBatch) Delete(key []byte) {
+ if useMemType == memTypeRadix {
+ if wb.writer == nil {
+ wb.writer = wb.db.radixMemI.memkv.Txn(true)
+ }
+ err := wb.db.radixMemI.Delete(wb.writer, key)
+ if err != nil {
+ wb.hasErr = err
+ }
+ if wb.cachedForMerge != nil {
+ delete(wb.cachedForMerge, string(key))
+ }
+ return
+ }
+ wb.ops = append(wb.ops, writeOp{
+ op: DeleteOp,
+ key: key,
+ value: nil,
+ })
+}
+
+func (wb *memWriteBatch) Put(key []byte, value []byte) {
+ if useMemType == memTypeRadix {
+ if wb.writer == nil {
+ wb.writer = wb.db.radixMemI.memkv.Txn(true)
+ }
+ err := wb.db.radixMemI.Put(wb.writer, key, value)
+ if err != nil {
+ wb.hasErr = err
+ } else {
+ if wb.cachedForMerge == nil {
+ wb.cachedForMerge = make(map[string][]byte, 4)
+ }
+ wb.cachedForMerge[string(key)] = value
+ }
+ return
+ }
+ item := &kvitem{}
+ item.key = make([]byte, len(key))
+ item.value = make([]byte, len(value))
+ copy(item.key, key)
+ copy(item.value, value)
+ wb.ops = append(wb.ops, writeOp{
+ op: PutOp,
+ key: item.key,
+ value: item.value,
+ })
+}
+
+func (wb *memWriteBatch) Merge(key []byte, value []byte) {
+ if useMemType == memTypeRadix {
+ if wb.writer == nil {
+ wb.writer = wb.db.radixMemI.memkv.Txn(true)
+ }
+ var oldV []byte
+ if wb.cachedForMerge != nil {
+ oldV = wb.cachedForMerge[string(key)]
+ }
+ var err error
+ if oldV == nil {
+ oldV, err = wb.db.GetBytesNoLock(key)
+ }
+ cur, err := GetRocksdbUint64(oldV, err)
+ if err != nil {
+ wb.hasErr = err
+ return
+ }
+ vint, err := GetRocksdbUint64(value, nil)
+ if err != nil {
+ wb.hasErr = err
+ return
+ }
+ nv := cur + vint
+ buf := make([]byte, 8)
+ binary.LittleEndian.PutUint64(buf, nv)
+ err = wb.db.radixMemI.Put(wb.writer, key, buf)
+ if err != nil {
+ wb.hasErr = err
+ } else {
+ if wb.cachedForMerge == nil {
+ wb.cachedForMerge = make(map[string][]byte, 4)
+ }
+ wb.cachedForMerge[string(key)] = buf
+ }
+ return
+ }
+ item := &kvitem{}
+ item.key = make([]byte, len(key))
+ item.value = make([]byte, len(value))
+ copy(item.key, key)
+ copy(item.value, value)
+ wb.ops = append(wb.ops, writeOp{
+ op: MergeOp,
+ key: item.key,
+ value: item.value,
+ })
+}
diff --git a/engine/pebble_eng.go b/engine/pebble_eng.go
new file mode 100644
index 00000000..3d8a03cd
--- /dev/null
+++ b/engine/pebble_eng.go
@@ -0,0 +1,564 @@
+package engine
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+ "os"
+ "path"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/cockroachdb/pebble"
+ "github.com/cockroachdb/pebble/bloom"
+ "github.com/shirou/gopsutil/mem"
+ "github.com/youzan/ZanRedisDB/common"
+)
+
+const (
+ numOfLevels = 7
+)
+
+type pebbleRefSlice struct {
+ b []byte
+ c io.Closer
+}
+
+func (rs *pebbleRefSlice) Free() {
+ if rs.c != nil {
+ rs.c.Close()
+ }
+}
+
+func (rs *pebbleRefSlice) Bytes() []byte {
+ if rs.b == nil {
+ return nil
+ }
+ b := make([]byte, len(rs.b))
+ copy(b, rs.b)
+ return b
+}
+
+func (rs *pebbleRefSlice) Data() []byte {
+ return rs.b
+}
+
+func GetRocksdbUint64(v []byte, err error) (uint64, error) {
+ if err != nil {
+ return 0, err
+ } else if v == nil || len(v) == 0 {
+ return 0, nil
+ } else if len(v) != 8 {
+ return 0, errIntNumber
+ }
+
+ return binary.LittleEndian.Uint64(v), nil
+}
+
+type Uint64AddMerger struct {
+ buf []byte
+}
+
+func (m *Uint64AddMerger) MergeNewer(value []byte) error {
+ cur, err := GetRocksdbUint64(m.buf, nil)
+ if err != nil {
+ return err
+ }
+ vint, err := GetRocksdbUint64(value, nil)
+ if err != nil {
+ return err
+ }
+ nv := cur + vint
+ if m.buf == nil {
+ m.buf = make([]byte, 8)
+ }
+ binary.LittleEndian.PutUint64(m.buf, nv)
+ return nil
+}
+
+func (m *Uint64AddMerger) MergeOlder(value []byte) error {
+ return m.MergeNewer(value)
+}
+
+func (m *Uint64AddMerger) Finish() ([]byte, io.Closer, error) {
+ return m.buf, nil, nil
+}
+
+func newUint64AddMerger() *pebble.Merger {
+ return &pebble.Merger{
+ Merge: func(key, value []byte) (pebble.ValueMerger, error) {
+ res := &Uint64AddMerger{}
+ res.MergeNewer(value)
+ return res, nil
+ },
+ // the name should match the rocksdb default merge name
+ Name: "UInt64AddOperator",
+ }
+}
+
+type sharedPebbleConfig struct {
+ SharedCache *pebble.Cache
+}
+
+func newSharedPebblekConfig(opt RockOptions) *sharedPebbleConfig {
+ sc := &sharedPebbleConfig{}
+ if opt.UseSharedCache {
+ if opt.BlockCache <= 0 {
+ v, err := mem.VirtualMemory()
+ if err != nil {
+ opt.BlockCache = 1024 * 1024 * 128 * 10
+ } else {
+ opt.BlockCache = int64(v.Total / 10)
+ if opt.CacheIndexAndFilterBlocks || opt.EnablePartitionedIndexFilter {
+ opt.BlockCache *= 2
+ }
+ }
+ }
+ sc.SharedCache = pebble.NewCache(opt.BlockCache)
+ }
+ return sc
+}
+
+func (sc *sharedPebbleConfig) ChangeLimiter(bytesPerSec int64) {
+}
+
+func (sc *sharedPebbleConfig) Destroy() {
+ if sc.SharedCache != nil {
+ sc.SharedCache.Unref()
+ }
+}
+
+type PebbleEng struct {
+ rwmutex sync.RWMutex
+ cfg *RockEngConfig
+ eng *pebble.DB
+ opts *pebble.Options
+ wo *pebble.WriteOptions
+ ito *pebble.IterOptions
+ wb *pebbleWriteBatch
+ engOpened int32
+ lastCompact int64
+ deletedCnt int64
+ quit chan struct{}
+}
+
+func NewPebbleEng(cfg *RockEngConfig) (*PebbleEng, error) {
+ if len(cfg.DataDir) == 0 {
+ return nil, errors.New("config error")
+ }
+
+ if !cfg.ReadOnly {
+ err := os.MkdirAll(cfg.DataDir, common.DIR_PERM)
+ if err != nil {
+ return nil, err
+ }
+ }
+ lopts := make([]pebble.LevelOptions, 0)
+ for l := 0; l < numOfLevels; l++ {
+ compress := pebble.SnappyCompression
+ if l <= cfg.MinLevelToCompress {
+ compress = pebble.NoCompression
+ }
+ filter := bloom.FilterPolicy(10)
+ opt := pebble.LevelOptions{
+ Compression: compress,
+ BlockSize: cfg.BlockSize,
+ TargetFileSize: int64(cfg.TargetFileSizeBase),
+ FilterPolicy: filter,
+ }
+ opt.EnsureDefaults()
+ lopts = append(lopts, opt)
+ }
+
+ opts := &pebble.Options{
+ Levels: lopts,
+ MaxManifestFileSize: int64(cfg.MaxMainifestFileSize),
+ MemTableSize: cfg.WriteBufferSize,
+ MemTableStopWritesThreshold: cfg.MaxWriteBufferNumber,
+ LBaseMaxBytes: int64(cfg.MaxBytesForLevelBase),
+ L0CompactionThreshold: cfg.Level0FileNumCompactionTrigger,
+ MaxOpenFiles: -1,
+ MaxConcurrentCompactions: cfg.MaxBackgroundCompactions,
+ EventListener: pebble.MakeLoggingEventListener(nil),
+ }
+ opts.EventListener.WALCreated = nil
+ opts.EventListener.WALDeleted = nil
+ opts.EventListener.FlushBegin = nil
+ opts.EventListener.FlushEnd = nil
+ opts.EventListener.TableCreated = nil
+ opts.EventListener.TableDeleted = nil
+ opts.EventListener.ManifestCreated = nil
+ opts.EventListener.ManifestDeleted = nil
+ if cfg.DisableWAL {
+ opts.DisableWAL = true
+ }
+ // prefix search
+ comp := *pebble.DefaultComparer
+ opts.Comparer = &comp
+ opts.Comparer.Split = func(a []byte) int {
+ if len(a) <= 3 {
+ return len(a)
+ }
+ return 3
+ }
+ if !cfg.DisableMergeCounter {
+ if cfg.EnableTableCounter {
+ opts.Merger = newUint64AddMerger()
+ }
+ } else {
+ cfg.EnableTableCounter = false
+ }
+ db := &PebbleEng{
+ cfg: cfg,
+ opts: opts,
+ ito: &pebble.IterOptions{},
+ wo: &pebble.WriteOptions{
+ Sync: !cfg.DisableWAL,
+ },
+ quit: make(chan struct{}),
+ }
+ if cfg.AutoCompacted {
+ go db.compactLoop()
+ }
+
+ return db, nil
+}
+
+func (pe *PebbleEng) NewWriteBatch() WriteBatch {
+ if pe.eng == nil {
+ panic("nil engine, should only get write batch after db opened")
+ }
+ return newPebbleWriteBatch(pe.eng, pe.wo)
+}
+
+func (pe *PebbleEng) DefaultWriteBatch() WriteBatch {
+ if pe.wb == nil {
+ panic("nil write batch, should only get write batch after db opened")
+ }
+ return pe.wb
+}
+
+func (pe *PebbleEng) GetDataDir() string {
+ return path.Join(pe.cfg.DataDir, "pebble")
+}
+
+func (pe *PebbleEng) SetCompactionFilter(ICompactFilter) {
+}
+
+func (pe *PebbleEng) SetMaxBackgroundOptions(maxCompact int, maxBackJobs int) error {
+ return nil
+}
+
+func (pe *PebbleEng) compactLoop() {
+ ticker := time.NewTicker(time.Hour)
+ interval := (time.Hour / time.Second).Nanoseconds()
+ dbLog.Infof("start auto compact loop : %v", interval)
+ for {
+ select {
+ case <-pe.quit:
+ return
+ case <-ticker.C:
+ if (pe.DeletedBeforeCompact() > compactThreshold) &&
+ (time.Now().Unix()-pe.LastCompactTime()) > interval {
+ dbLog.Infof("auto compact : %v, %v", pe.DeletedBeforeCompact(), pe.LastCompactTime())
+ pe.CompactAllRange()
+ }
+ }
+ }
+}
+
+func (pe *PebbleEng) CheckDBEngForRead(fullPath string) error {
+ ro := pe.opts.Clone()
+ ro.ErrorIfNotExists = true
+ ro.ReadOnly = true
+ //ro.Cache = nil
+ db, err := pebble.Open(fullPath, ro)
+ if err != nil {
+ return err
+ }
+ db.Close()
+ return nil
+}
+
+func (pe *PebbleEng) OpenEng() error {
+ if !pe.IsClosed() {
+ dbLog.Warningf("engine already opened: %v, should close it before reopen", pe.GetDataDir())
+ return errors.New("open failed since not closed")
+ }
+ pe.rwmutex.Lock()
+ defer pe.rwmutex.Unlock()
+ if pe.cfg.UseSharedCache && pe.cfg.SharedConfig != nil {
+ sc, ok := pe.cfg.SharedConfig.(*sharedPebbleConfig)
+ if ok {
+ pe.opts.Cache = sc.SharedCache
+ dbLog.Infof("using shared cache for pebble engine")
+ }
+ } else {
+ cache := pebble.NewCache(pe.cfg.BlockCache)
+ defer cache.Unref()
+ pe.opts.Cache = cache
+ }
+ opt := pe.opts
+ if pe.cfg.ReadOnly {
+ opt = pe.opts.Clone()
+ opt.ErrorIfNotExists = true
+ opt.ReadOnly = true
+ }
+ eng, err := pebble.Open(pe.GetDataDir(), opt)
+ if err != nil {
+ return err
+ }
+ pe.wb = newPebbleWriteBatch(eng, pe.wo)
+ pe.eng = eng
+ atomic.StoreInt32(&pe.engOpened, 1)
+ dbLog.Infof("engine opened: %v", pe.GetDataDir())
+ return nil
+}
+
+func (pe *PebbleEng) Write(wb WriteBatch) error {
+ return wb.Commit()
+}
+
+func (pe *PebbleEng) DeletedBeforeCompact() int64 {
+ return atomic.LoadInt64(&pe.deletedCnt)
+}
+
+func (pe *PebbleEng) AddDeletedCnt(c int64) {
+ atomic.AddInt64(&pe.deletedCnt, c)
+}
+
+func (pe *PebbleEng) LastCompactTime() int64 {
+ return atomic.LoadInt64(&pe.lastCompact)
+}
+
+func (pe *PebbleEng) CompactRange(rg CRange) {
+ atomic.StoreInt64(&pe.lastCompact, time.Now().Unix())
+ atomic.StoreInt64(&pe.deletedCnt, 0)
+ pe.rwmutex.RLock()
+ closed := pe.IsClosed()
+ pe.rwmutex.RUnlock()
+ if closed {
+ return
+ }
+ pe.eng.Compact(rg.Start, rg.Limit)
+}
+
+func (pe *PebbleEng) CompactAllRange() {
+ pe.CompactRange(CRange{})
+}
+
+func (pe *PebbleEng) DisableManualCompact(disable bool) {
+}
+
+func (pe *PebbleEng) GetApproximateTotalKeyNum() int {
+ return 0
+}
+
+func (pe *PebbleEng) GetApproximateKeyNum(ranges []CRange) uint64 {
+ return 0
+}
+
+func (pe *PebbleEng) SetOptsForLogStorage() {
+ return
+}
+
+func (pe *PebbleEng) GetApproximateSizes(ranges []CRange, includeMem bool) []uint64 {
+ pe.rwmutex.RLock()
+ defer pe.rwmutex.RUnlock()
+ sizeList := make([]uint64, len(ranges))
+ if pe.IsClosed() {
+ return sizeList
+ }
+ for i, r := range ranges {
+ sizeList[i], _ = pe.eng.EstimateDiskUsage(r.Start, r.Limit)
+ }
+ return sizeList
+}
+
+func (pe *PebbleEng) IsClosed() bool {
+ if atomic.LoadInt32(&pe.engOpened) == 0 {
+ return true
+ }
+ return false
+}
+
+func (pe *PebbleEng) CloseEng() bool {
+ pe.rwmutex.Lock()
+ defer pe.rwmutex.Unlock()
+ if pe.eng != nil {
+ if atomic.CompareAndSwapInt32(&pe.engOpened, 1, 0) {
+ if pe.wb != nil {
+ pe.wb.Destroy()
+ }
+ pe.eng.Close()
+ dbLog.Infof("engine closed: %v", pe.GetDataDir())
+ return true
+ }
+ }
+ return false
+}
+
+func (pe *PebbleEng) CloseAll() {
+ select {
+ case <-pe.quit:
+ default:
+ close(pe.quit)
+ }
+ pe.CloseEng()
+}
+
+func (pe *PebbleEng) GetStatistics() string {
+ pe.rwmutex.RLock()
+ defer pe.rwmutex.RUnlock()
+ if pe.IsClosed() {
+ return ""
+ }
+ return pe.eng.Metrics().String()
+}
+
+func (pe *PebbleEng) GetInternalStatus() map[string]interface{} {
+ s := make(map[string]interface{})
+ s["internal"] = pe.GetStatistics()
+ return s
+}
+
+func (pe *PebbleEng) GetInternalPropertyStatus(p string) string {
+ return p
+}
+
+func (pe *PebbleEng) GetBytesNoLock(key []byte) ([]byte, error) {
+ val, err := pe.GetRefNoLock(key)
+ if err != nil {
+ return nil, err
+ }
+ if val == nil {
+ return nil, nil
+ }
+ defer val.Free()
+ if val.Data() == nil {
+ return nil, nil
+ }
+ return val.Bytes(), nil
+}
+
+func (pe *PebbleEng) GetBytes(key []byte) ([]byte, error) {
+ pe.rwmutex.RLock()
+ defer pe.rwmutex.RUnlock()
+ if pe.IsClosed() {
+ return nil, errDBEngClosed
+ }
+ return pe.GetBytesNoLock(key)
+}
+
+func (pe *PebbleEng) MultiGetBytes(keyList [][]byte, values [][]byte, errs []error) {
+ pe.rwmutex.RLock()
+ defer pe.rwmutex.RUnlock()
+ if pe.IsClosed() {
+ for i, _ := range errs {
+ errs[i] = errDBEngClosed
+ }
+ return
+ }
+ for i, k := range keyList {
+ values[i], errs[i] = pe.GetBytesNoLock(k)
+ }
+}
+
+func (pe *PebbleEng) Exist(key []byte) (bool, error) {
+ pe.rwmutex.RLock()
+ defer pe.rwmutex.RUnlock()
+ if pe.IsClosed() {
+ return false, errDBEngClosed
+ }
+ return pe.ExistNoLock(key)
+}
+
+func (pe *PebbleEng) ExistNoLock(key []byte) (bool, error) {
+ val, err := pe.GetRefNoLock(key)
+ if err != nil {
+ return false, err
+ }
+ if val == nil {
+ return false, nil
+ }
+ ok := val.Data() != nil
+ val.Free()
+ return ok, nil
+}
+
+func (pe *PebbleEng) GetRefNoLock(key []byte) (RefSlice, error) {
+ val, c, err := pe.eng.Get(key)
+ if err != nil && err != pebble.ErrNotFound {
+ return nil, err
+ }
+ return &pebbleRefSlice{b: val, c: c}, nil
+}
+
+func (pe *PebbleEng) GetRef(key []byte) (RefSlice, error) {
+ pe.rwmutex.RLock()
+ defer pe.rwmutex.RUnlock()
+ if pe.IsClosed() {
+ return nil, errDBEngClosed
+ }
+ return pe.GetRefNoLock(key)
+}
+
+func (pe *PebbleEng) GetValueWithOp(key []byte,
+ op func([]byte) error) error {
+ pe.rwmutex.RLock()
+ defer pe.rwmutex.RUnlock()
+ if pe.IsClosed() {
+ return errDBEngClosed
+ }
+ return pe.GetValueWithOpNoLock(key, op)
+}
+
+func (pe *PebbleEng) GetValueWithOpNoLock(key []byte,
+ op func([]byte) error) error {
+ val, err := pe.GetRef(key)
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ return op(nil)
+ }
+ defer val.Free()
+ return op(val.Data())
+}
+
+func (pe *PebbleEng) DeleteFilesInRange(rg CRange) {
+ return
+}
+
+func (pe *PebbleEng) GetIterator(opts IteratorOpts) (Iterator, error) {
+ dbit, err := newPebbleIterator(pe, opts)
+ if err != nil {
+ return nil, err
+ }
+ return dbit, nil
+}
+
+func (pe *PebbleEng) NewCheckpoint(printToStdoutAlso bool) (KVCheckpoint, error) {
+ return &pebbleEngCheckpoint{
+ pe: pe,
+ }, nil
+}
+
+type pebbleEngCheckpoint struct {
+ pe *PebbleEng
+}
+
+func (pck *pebbleEngCheckpoint) Save(path string, notify chan struct{}) error {
+ pck.pe.rwmutex.RLock()
+ defer pck.pe.rwmutex.RUnlock()
+ if pck.pe.IsClosed() {
+ return errDBEngClosed
+ }
+ if notify != nil {
+ time.AfterFunc(time.Millisecond*20, func() {
+ close(notify)
+ })
+ }
+ return pck.pe.eng.Checkpoint(path)
+}
diff --git a/engine/pebble_eng_test.go b/engine/pebble_eng_test.go
new file mode 100644
index 00000000..898f10c2
--- /dev/null
+++ b/engine/pebble_eng_test.go
@@ -0,0 +1,199 @@
+package engine
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "runtime"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestPebbleCheckpointDuringWrite(t *testing.T) {
+ SetLogger(0, nil)
+ cfg := NewRockConfig()
+ tmpDir, err := ioutil.TempDir("", "checkpoint")
+ assert.Nil(t, err)
+ t.Log(tmpDir)
+ defer os.RemoveAll(tmpDir)
+ cfg.DataDir = tmpDir
+ eng, err := NewPebbleEng(cfg)
+ assert.Nil(t, err)
+ err = eng.OpenEng()
+ assert.Nil(t, err)
+ defer eng.CloseAll()
+
+ start := time.Now()
+ stopC := make(chan struct{})
+ var wg sync.WaitGroup
+ lastTs := time.Now().UnixNano()
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case <-stopC:
+ return
+ default:
+ }
+ begin := time.Now()
+ ck, err := eng.NewCheckpoint(false)
+ assert.Nil(t, err)
+ // test save should not block, so lastTs should be updated soon
+ ckpath := path.Join(tmpDir, "newCk")
+ err = ck.Save(ckpath, make(chan struct{}))
+ assert.Nil(t, err)
+ atomic.StoreInt64(&lastTs, time.Now().UnixNano())
+ if time.Since(begin) > time.Second*5 {
+ t.Logf("checkpoint too long: %v, %v", begin, time.Since(begin))
+ }
+ os.RemoveAll(ckpath)
+ time.Sleep(time.Millisecond * 100)
+ }
+ }()
+ bigV := make([]byte, 8000)
+ var panicTimer *time.Timer
+ for {
+ for i := 0; i < 100; i++ {
+ wb := eng.DefaultWriteBatch()
+ for j := 0; j < 100; j++ {
+ wb.Put([]byte("test"+strconv.Itoa(i+j)), []byte("test"+strconv.Itoa(i+j)+string(bigV)))
+ }
+ eng.Write(wb)
+ wb.Clear()
+ tn := time.Now().UnixNano()
+ if atomic.LoadInt64(&lastTs)+time.Second.Nanoseconds()*30 < tn {
+ t.Errorf("failed to wait checkpoint update: %v, %v", atomic.LoadInt64(&lastTs), tn)
+ panicTimer = time.AfterFunc(time.Second*10, func() {
+ buf := make([]byte, 1024*1024)
+ runtime.Stack(buf, true)
+ fmt.Printf("%s", buf)
+ panic("failed")
+ })
+ break
+ }
+ if time.Since(start) > time.Minute {
+ break
+ }
+ }
+ if panicTimer != nil {
+ break
+ }
+ time.Sleep(time.Microsecond * 10)
+ if time.Since(start) > time.Minute {
+ break
+ }
+ }
+ close(stopC)
+ t.Log("waiting stop")
+ wg.Wait()
+ t.Log("waiting stopped")
+ if panicTimer != nil {
+ panicTimer.Stop()
+ }
+ time.Sleep(time.Second * 2)
+}
+
+func TestPebbleReopenAndCheck(t *testing.T) {
+ SetLogger(0, nil)
+ cfg := NewRockConfig()
+ tmpDir, err := ioutil.TempDir("", "checkpoint")
+ assert.Nil(t, err)
+ t.Log(tmpDir)
+ defer os.RemoveAll(tmpDir)
+ cfg.DataDir = tmpDir
+ pe, err := NewPebbleEng(cfg)
+ err = pe.OpenEng()
+ assert.Nil(t, err)
+ wb := pe.DefaultWriteBatch()
+ wb.Put([]byte("test"), []byte("test"))
+ err = pe.Write(wb)
+ assert.Nil(t, err)
+ wb.Clear()
+ ck, _ := pe.NewCheckpoint(false)
+ err = ck.Save(path.Join(tmpDir, "cktmp"), make(chan struct{}))
+ assert.Nil(t, err)
+
+ err = pe.CheckDBEngForRead(path.Join(tmpDir, "cktmp"))
+ assert.Nil(t, err)
+ pe.CloseEng()
+
+ pe.OpenEng()
+ time.Sleep(time.Second * 10)
+
+ pe.CloseEng()
+ pe.OpenEng()
+
+ pe.CloseAll()
+ time.Sleep(time.Second * 10)
+}
+
+func TestPebbleSharedCacheForMulti(t *testing.T) {
+ SetLogger(0, nil)
+ cfg := NewRockConfig()
+ tmpDir, err := ioutil.TempDir("", "checkpoint")
+ assert.Nil(t, err)
+ t.Log(tmpDir)
+ defer os.RemoveAll(tmpDir)
+ cfg.DataDir = path.Join(tmpDir, "test")
+ cfg.UseSharedCache = true
+ cfg.SharedConfig = newSharedPebblekConfig(cfg.RockOptions)
+ pe, err := NewPebbleEng(cfg)
+ assert.Nil(t, err)
+ err = pe.OpenEng()
+ assert.Nil(t, err)
+ defer pe.CloseAll()
+
+ wb := pe.DefaultWriteBatch()
+ wb.Put([]byte("test"), []byte("test"))
+ err = pe.Write(wb)
+ assert.Nil(t, err)
+ wb.Clear()
+
+ pe.eng.Flush()
+
+ cfg2 := cfg
+ cfg2.DataDir = path.Join(tmpDir, "test2")
+ pe2, err := NewPebbleEng(cfg2)
+ assert.Nil(t, err)
+ err = pe2.OpenEng()
+ assert.Nil(t, err)
+ assert.Equal(t, pe.opts.Cache, pe2.opts.Cache)
+ defer pe2.CloseAll()
+
+ wb2 := pe2.DefaultWriteBatch()
+ wb2.Put([]byte("test"), []byte("test2"))
+ err = pe2.Write(wb2)
+ assert.Nil(t, err)
+ wb2.Clear()
+ pe2.eng.Flush()
+
+ v1, err := pe.GetBytes([]byte("test"))
+ assert.Nil(t, err)
+ assert.Equal(t, []byte("test"), v1)
+ v2, err := pe2.GetBytes([]byte("test"))
+ assert.Nil(t, err)
+ assert.Equal(t, []byte("test2"), v2)
+
+ wb = pe.DefaultWriteBatch()
+ wb.Put([]byte("test"), []byte("test"))
+ err = pe.Write(wb)
+ assert.Nil(t, err)
+ wb.Clear()
+ pe.eng.Flush()
+
+ v1, err = pe.GetBytes([]byte("test"))
+ assert.Nil(t, err)
+ assert.Equal(t, []byte("test"), v1)
+ v2, err = pe2.GetBytes([]byte("test"))
+ assert.Nil(t, err)
+ assert.Equal(t, []byte("test2"), v2)
+
+ time.Sleep(time.Second * 10)
+}
diff --git a/engine/pebble_iter.go b/engine/pebble_iter.go
new file mode 100644
index 00000000..cbf72666
--- /dev/null
+++ b/engine/pebble_iter.go
@@ -0,0 +1,120 @@
+package engine
+
+import (
+ "github.com/cockroachdb/pebble"
+ "github.com/youzan/ZanRedisDB/common"
+)
+
+type pebbleIterator struct {
+ *pebble.Iterator
+ db *PebbleEng
+ opt *pebble.IterOptions
+ snap *pebble.Snapshot
+ removeTsType byte
+}
+
+// low_bound is inclusive
+// upper bound is exclusive
+func newPebbleIterator(db *PebbleEng, opts IteratorOpts) (*pebbleIterator, error) {
+ db.rwmutex.RLock()
+ if db.IsClosed() {
+ db.rwmutex.RUnlock()
+ return nil, errDBEngClosed
+ }
+ upperBound := opts.Max
+ lowerBound := opts.Min
+ if opts.Type&common.RangeROpen <= 0 && upperBound != nil {
+ // range right not open, we need inclusive the max,
+ // however upperBound is exclusive
+ upperBound = append(upperBound, 0)
+ }
+
+ opt := &pebble.IterOptions{}
+ opt.LowerBound = lowerBound
+ opt.UpperBound = upperBound
+ dbit := &pebbleIterator{
+ db: db,
+ opt: opt,
+ }
+
+ if opts.WithSnap {
+ dbit.snap = db.eng.NewSnapshot()
+ dbit.Iterator = dbit.snap.NewIter(opt)
+ } else {
+ dbit.Iterator = db.eng.NewIter(opt)
+ }
+ return dbit, nil
+}
+
+func (it *pebbleIterator) Next() {
+ it.Iterator.Next()
+}
+
+func (it *pebbleIterator) Prev() {
+ it.Iterator.Prev()
+}
+
+func (it *pebbleIterator) Seek(key []byte) {
+ it.Iterator.SeekGE(key)
+}
+
+func (it *pebbleIterator) SeekForPrev(key []byte) {
+ it.Iterator.SeekLT(key)
+}
+
+func (it *pebbleIterator) SeekToFirst() {
+ it.Iterator.First()
+}
+
+func (it *pebbleIterator) SeekToLast() {
+ it.Iterator.Last()
+}
+
+func (it *pebbleIterator) Valid() bool {
+ if it.Iterator.Error() != nil {
+ return false
+ }
+ return it.Iterator.Valid()
+}
+
+// the bytes returned will be freed after next
+func (it *pebbleIterator) RefKey() []byte {
+ return it.Iterator.Key()
+}
+
+func (it *pebbleIterator) Key() []byte {
+ v := it.Iterator.Key()
+ vv := make([]byte, len(v))
+ copy(vv, v)
+ return vv
+}
+
+// the bytes returned will be freed after next
+func (it *pebbleIterator) RefValue() []byte {
+ v := it.Iterator.Value()
+ if (it.removeTsType == KVType || it.removeTsType == HashType) && len(v) >= tsLen {
+ v = v[:len(v)-tsLen]
+ }
+ return v
+}
+
+func (it *pebbleIterator) Value() []byte {
+ v := it.RefValue()
+ vv := make([]byte, len(v))
+ copy(vv, v)
+ return vv
+}
+
+func (it *pebbleIterator) NoTimestamp(vt byte) {
+ it.removeTsType = vt
+}
+
+func (it *pebbleIterator) Close() {
+ if it.Iterator != nil {
+ it.Iterator.Close()
+ }
+ if it.snap != nil {
+ it.snap.Close()
+ }
+ it.db.rwmutex.RUnlock()
+}
diff --git a/engine/radix_iter.go b/engine/radix_iter.go
new file mode 100644
index 00000000..dabe52b2
--- /dev/null
+++ b/engine/radix_iter.go
@@ -0,0 +1,123 @@
+package engine
+
+import (
+ memdb "github.com/youzan/ZanRedisDB/engine/radixdb"
+)
+
+type radixIterator struct {
+ miTxn *memdb.Txn
+ cursor interface{}
+ cursorKey []byte
+ resIter memdb.ResultIterator
+ isReverser bool
+ err error
+}
+
+// Valid returns false only when an Iterator has iterated past either the
+// first or the last key in the database.
+func (iter *radixIterator) Valid() bool {
+ if iter.err != nil {
+ return false
+ }
+ return iter.cursor != nil
+}
+
+// Key returns the key the iterator currently holds.
+func (iter *radixIterator) Key() []byte {
+ if iter.cursorKey != nil {
+ return iter.cursorKey
+ }
+ dbk, _, _ := memdb.KVFromObject(iter.cursor)
+ return dbk
+}
+
+// Value returns the value in the database the iterator currently holds.
+func (iter *radixIterator) Value() []byte {
+ _, dbv, _ := memdb.KVFromObject(iter.cursor)
+ return dbv
+}
+
+// Next moves the iterator to the next sequential key in the database.
+func (iter *radixIterator) Next() {
+ if iter.resIter == nil {
+ return
+ }
+ if iter.isReverser {
+ // we convert iterator to non reverse
+ iter.Seek(iter.Key())
+ if iter.err != nil {
+ return
+ }
+ }
+ iter.cursorKey, iter.cursor = iter.resIter.Next()
+}
+
+// Prev moves the iterator to the previous sequential key in the database.
+func (iter *radixIterator) Prev() {
+ if iter.resIter == nil {
+ return
+ }
+ if !iter.isReverser {
+ iter.SeekForPrev(iter.Key())
+ if iter.err != nil {
+ return
+ }
+ }
+ // for reverse iterator, prev is just next
+ iter.cursorKey, iter.cursor = iter.resIter.Next()
+}
+
+// SeekToFirst moves the iterator to the first key in the database.
+func (iter *radixIterator) First() {
+ resIter, err := iter.miTxn.Get(nil)
+ if err != nil {
+ iter.err = err
+ return
+ }
+ iter.resIter = resIter
+ iter.isReverser = false
+ iter.Next()
+}
+
+// SeekToLast moves the iterator to the last key in the database.
+func (iter *radixIterator) Last() {
+ resIter, err := iter.miTxn.GetReverse(nil)
+ if err != nil {
+ iter.err = err
+ return
+ }
+ iter.resIter = resIter
+ iter.isReverser = true
+ iter.Prev()
+}
+
+// Seek moves the iterator to the position greater than or equal to the key.
+func (iter *radixIterator) Seek(key []byte) {
+ resIter, err := iter.miTxn.LowerBound(key)
+ if err != nil {
+ iter.err = err
+ return
+ }
+ iter.resIter = resIter
+ iter.isReverser = false
+ iter.Next()
+}
+
+// seek to the last key that less than or equal to the target key
+// while enable prefix_extractor, use seek() and prev() doesn't work if seek to the end
+// of the prefix range. use this seekforprev instead
+func (iter *radixIterator) SeekForPrev(key []byte) {
+ resIter, err := iter.miTxn.ReverseLowerBound(key)
+ if err != nil {
+ iter.err = err
+ return
+ }
+ iter.resIter = resIter
+ iter.isReverser = true
+ iter.Prev()
+}
+
+// Close closes the iterator.
+func (iter *radixIterator) Close() {
+ iter.miTxn.Abort()
+}
diff --git a/engine/radix_mem.go b/engine/radix_mem.go
new file mode 100644
index 00000000..365d6282
--- /dev/null
+++ b/engine/radix_mem.go
@@ -0,0 +1,79 @@
+package engine
+
+import (
+ "sync/atomic"
+
+ memdb "github.com/youzan/ZanRedisDB/engine/radixdb"
+)
+
+const (
+ defaultTableName = "default"
+)
+
+type radixMemIndex struct {
+ memkv *memdb.MemDB
+ closed int32
+}
+
+func NewRadix() (*radixMemIndex, error) {
+ memkv, err := memdb.NewMemDB()
+ return &radixMemIndex{
+ memkv: memkv,
+ }, err
+}
+
+func (mi *radixMemIndex) Destroy() {
+ atomic.StoreInt32(&mi.closed, 1)
+}
+
+func (mi *radixMemIndex) IsClosed() bool {
+ return atomic.LoadInt32(&mi.closed) == 1
+}
+
+func (mi *radixMemIndex) Len() int64 {
+ cs := mi.memkv.Size()
+ return int64(cs)
+}
+
+func (mi *radixMemIndex) NewIterator() (*radixIterator, error) {
+ txn := mi.memkv.Snapshot().Txn(false)
+ return &radixIterator{
+ miTxn: txn.Snapshot(),
+ }, nil
+}
+
+func (mi *radixMemIndex) Get(key []byte) ([]byte, error) {
+ sn := mi.memkv.Snapshot()
+ txn := sn.Txn(false)
+ defer txn.Abort()
+
+ // will make sure the key is exactly match if not nil
+ _, v, err := txn.First(key)
+ if err != nil {
+ return nil, err
+ }
+ if v == nil {
+ return nil, nil
+ }
+ _, dbv, err := memdb.KVFromObject(v)
+ if err != nil {
+ return nil, err
+ }
+ return dbv, nil
+}
+
+func (mi *radixMemIndex) Put(txn *memdb.Txn, key []byte, value []byte) error {
+ nk := make([]byte, len(key))
+ nv := make([]byte, len(value))
+ copy(nk, key)
+ copy(nv, value)
+ return txn.Insert(nk, nv)
+}
+
+func (mi *radixMemIndex) Delete(txn *memdb.Txn, key []byte) error {
+ err := txn.Delete(key)
+ if err == memdb.ErrNotFound {
+ return nil
+ }
+ return err
+}
diff --git a/engine/radixdb/memdb.go b/engine/radixdb/memdb.go
new file mode 100644
index 00000000..a45623bb
--- /dev/null
+++ b/engine/radixdb/memdb.go
@@ -0,0 +1,94 @@
+package radixdb
+
+import (
+ "sync"
+ "sync/atomic"
+ "unsafe"
+
+ iradix "github.com/hashicorp/go-immutable-radix"
+)
+
+// MemDB is an in-memory database.
+//
+// MemDB provides a table abstraction to store objects (rows) with multiple
+// indexes based on inserted values. The database makes use of immutable radix
+// trees to provide transactions and MVCC.
+type MemDB struct {
+ root unsafe.Pointer // *iradix.Tree underneath
+ primary bool
+
+ // There can only be a single writer at once
+ writer sync.Mutex
+}
+
+// NewMemDB creates a new MemDB with the given schema
+func NewMemDB() (*MemDB, error) {
+ // Create the MemDB
+ db := &MemDB{
+ root: unsafe.Pointer(iradix.New()),
+ primary: true,
+ }
+ if err := db.initialize(); err != nil {
+ return nil, err
+ }
+
+ return db, nil
+}
+
+// getRoot is used to do an atomic load of the root pointer
+func (db *MemDB) getRoot() *iradix.Tree {
+ root := (*iradix.Tree)(atomic.LoadPointer(&db.root))
+ return root
+}
+
+func (db *MemDB) Size() int {
+ root := db.getRoot()
+ path := indexPath(defTable, id)
+ raw, _ := root.Get(path)
+ indexTree, ok := raw.(*iradix.Tree)
+ if ok {
+ return indexTree.Len()
+ }
+ return 0
+}
+
+// Txn is used to start a new transaction, in either read or write mode.
+// There can only be a single concurrent writer, but any number of readers.
+func (db *MemDB) Txn(write bool) *Txn {
+ if write {
+ db.writer.Lock()
+ }
+ txn := &Txn{
+ db: db,
+ write: write,
+ rootTxn: db.getRoot().Txn(),
+ }
+ return txn
+}
+
+// Snapshot is used to capture a point-in-time snapshot
+// of the database that will not be affected by any write
+// operations to the existing DB.
+func (db *MemDB) Snapshot() *MemDB {
+ clone := &MemDB{
+ root: unsafe.Pointer(db.getRoot()),
+ primary: false,
+ }
+ return clone
+}
+
+// initialize is used to setup the DB for use after creation. This should
+// be called only once after allocating a MemDB.
+func (db *MemDB) initialize() error {
+ root := db.getRoot()
+ index := iradix.New()
+ path := indexPath(defTable, id)
+ root, _, _ = root.Insert(path, index)
+ db.root = unsafe.Pointer(root)
+ return nil
+}
+
+// indexPath returns the path from the root to the given table index
+func indexPath(table, index string) []byte {
+ return []byte(table + "." + index)
+}
diff --git a/engine/radixdb/txn.go b/engine/radixdb/txn.go
new file mode 100644
index 00000000..16c960c7
--- /dev/null
+++ b/engine/radixdb/txn.go
@@ -0,0 +1,535 @@
+package radixdb
+
+import (
+ "errors"
+ "fmt"
+ "sync/atomic"
+ "unsafe"
+
+ iradix "github.com/hashicorp/go-immutable-radix"
+)
+
+const (
+ id = "id"
+ defTable = "default"
+)
+
+var (
+ // ErrNotFound is returned when the requested item is not found
+ ErrNotFound = fmt.Errorf("not found")
+)
+
+type dbItem struct {
+ Key []byte
+ Value []byte
+}
+
+func KVFromObject(obj interface{}) ([]byte, []byte, error) {
+ item, ok := obj.(*dbItem)
+ if !ok {
+ return nil, nil, errors.New("unknown data type")
+ }
+ return item.Key, item.Value, nil
+}
+
+// return the key from obj
+func FromObject(obj interface{}) ([]byte, error) {
+ item, ok := obj.(*dbItem)
+ if !ok {
+ return nil, errors.New("unknown data")
+ }
+ return item.Key, nil
+}
+
+func toIndexKey(key []byte) []byte {
+ if key == nil {
+ return nil
+ }
+ key = append(key, '\x00')
+ return key
+}
+
+func extractFromIndexKey(key []byte) []byte {
+ if len(key) == 0 {
+ return key
+ }
+ return key[:len(key)-1]
+}
+
+// Txn is a transaction against a MemDB.
+// This can be a read or write transaction.
+type Txn struct {
+ db *MemDB
+ write bool
+ rootTxn *iradix.Txn
+ after []func()
+
+ modified *iradix.Txn
+}
+
+// readableIndex returns a transaction usable for reading the given index in a
+// table. If the transaction is a write transaction with modifications, a clone of the
+// modified index will be returned.
+func (txn *Txn) readableIndex(table, index string) *iradix.Txn {
+ // Look for existing transaction
+ if txn.write && txn.modified != nil {
+ return txn.modified.Clone()
+ }
+
+ // Create a read transaction
+ path := indexPath(table, index)
+ raw, _ := txn.rootTxn.Get(path)
+ indexTxn := raw.(*iradix.Tree).Txn()
+ return indexTxn
+}
+
+// writableIndex returns a transaction usable for modifying the
+// given index in a table.
+func (txn *Txn) writableIndex(table, index string) *iradix.Txn {
+ if txn.modified != nil {
+ return txn.modified
+ }
+
+ // Start a new transaction
+ path := indexPath(table, index)
+ raw, _ := txn.rootTxn.Get(path)
+ indexTxn := raw.(*iradix.Tree).Txn()
+
+ // Keep this open for the duration of the txn
+ txn.modified = indexTxn
+ return indexTxn
+}
+
+// Abort is used to cancel this transaction.
+// This is a noop for read transactions.
+func (txn *Txn) Abort() {
+ // Noop for a read transaction
+ if !txn.write {
+ return
+ }
+
+ // Check if already aborted or committed
+ if txn.rootTxn == nil {
+ return
+ }
+
+ // Clear the txn
+ txn.rootTxn = nil
+ txn.modified = nil
+
+ // Release the writer lock since this is invalid
+ txn.db.writer.Unlock()
+}
+
+// Commit is used to finalize this transaction.
+// This is a noop for read transactions.
+func (txn *Txn) Commit() {
+ // Noop for a read transaction
+ if !txn.write {
+ return
+ }
+
+ // Check if already aborted or committed
+ if txn.rootTxn == nil {
+ return
+ }
+
+ // Commit each sub-transaction scoped to (table, index)
+ if txn.modified != nil {
+ path := indexPath(defTable, id)
+ final := txn.modified.CommitOnly()
+ txn.rootTxn.Insert(path, final)
+ }
+
+ // Update the root of the DB
+ newRoot := txn.rootTxn.CommitOnly()
+ atomic.StorePointer(&txn.db.root, unsafe.Pointer(newRoot))
+
+ // Now issue all of the mutation updates (this is safe to call
+ // even if mutation tracking isn't enabled); we do this after
+ // the root pointer is swapped so that waking responders will
+ // see the new state.
+ if txn.modified != nil {
+ txn.modified.Notify()
+ }
+ txn.rootTxn.Notify()
+
+ // Clear the txn
+ txn.rootTxn = nil
+ txn.modified = nil
+
+ // Release the writer lock since this is invalid
+ txn.db.writer.Unlock()
+
+ // Run the deferred functions, if any
+ for i := len(txn.after); i > 0; i-- {
+ fn := txn.after[i-1]
+ fn()
+ }
+}
+
+// Insert is used to add or update an object into the given table
+func (txn *Txn) Insert(key []byte, value []byte) error {
+ if !txn.write {
+ return fmt.Errorf("cannot insert in read-only transaction")
+ }
+
+ idVal := toIndexKey(key)
+ // Lookup the object by ID first, to see if this is an update
+ idTxn := txn.writableIndex(defTable, id)
+ // Update the value of the index
+ // we use nil for Key, since we can get it from radix index, so we avoid save it in value
+ obj := &dbItem{Key: nil, Value: value}
+ idTxn.Insert(idVal, obj)
+ return nil
+}
+
+// Delete is used to delete a single object from the given table
+// This object must already exist in the table
+func (txn *Txn) Delete(key []byte) error {
+ if !txn.write {
+ return fmt.Errorf("cannot delete in read-only transaction")
+ }
+
+ // Lookup the object by ID first, check fi we should continue
+ idTxn := txn.writableIndex(defTable, id)
+ idVal := toIndexKey(key)
+ idTxn.Delete(idVal)
+
+ return nil
+}
+
+// DeletePrefix is used to delete an entire subtree based on a prefix.
+// The given index must be a prefix index, and will be used to perform a scan and enumerate the set of objects to delete.
+// These will be removed from all other indexes, and then a special prefix operation will delete the objects from the given index in an efficient subtree delete operation.
+// This is useful when you have a very large number of objects indexed by the given index, along with a much smaller number of entries in the other indexes for those objects.
+func (txn *Txn) DeletePrefix(prefix []byte) (bool, error) {
+ if !txn.write {
+ return false, fmt.Errorf("cannot delete in read-only transaction")
+ }
+ indexTxn := txn.writableIndex(defTable, id)
+ ok := indexTxn.DeletePrefix(prefix)
+ return ok, nil
+}
+
+// DeleteAll is used to delete all the objects in a given table
+// matching the constraints on the index
+func (txn *Txn) DeleteAll() (int, error) {
+ if !txn.write {
+ return 0, fmt.Errorf("cannot delete in read-only transaction")
+ }
+
+ // Get all the objects
+ iter, err := txn.Get(nil)
+ if err != nil {
+ return 0, err
+ }
+
+ // Put them into a slice so there are no safety concerns while actually
+ // performing the deletes
+ var keys [][]byte
+ for {
+ k, _ := iter.Next()
+ if k == nil {
+ break
+ }
+
+ keys = append(keys, k)
+ }
+
+ // Do the deletes
+ num := 0
+ for _, key := range keys {
+ // the key from ResultIterate.Next() has no \x00
+ if err := txn.Delete(key); err != nil {
+ return num, err
+ }
+ num++
+ }
+ return num, nil
+}
+
+// FirstWatch is used to return the first matching object for
+// the given constraints on the index along with the watch channel
+func (txn *Txn) FirstWatch(key []byte) (<-chan struct{}, []byte, interface{}, error) {
+ // Get the index itself
+ indexTxn := txn.readableIndex(defTable, id)
+
+ key = toIndexKey(key)
+ // Do an exact lookup
+ if key != nil {
+ watch, obj, ok := indexTxn.GetWatch(key)
+ if !ok {
+ return watch, nil, nil, nil
+ }
+ return watch, nil, obj, nil
+ }
+
+ // Handle non-unique index by using an iterator and getting the first value
+ iter := indexTxn.Root().Iterator()
+ watch := iter.SeekPrefixWatch(key)
+ k, value, _ := iter.Next()
+ return watch, extractFromIndexKey(k), value, nil
+}
+
+// LastWatch is used to return the last matching object for
+// the given constraints on the index along with the watch channel
+func (txn *Txn) LastWatch(key []byte) (<-chan struct{}, []byte, interface{}, error) {
+ // Get the index itself
+ indexTxn := txn.readableIndex(defTable, id)
+
+ key = toIndexKey(key)
+ // Do an exact lookup
+ if key != nil {
+ watch, obj, ok := indexTxn.GetWatch(key)
+ if !ok {
+ return watch, nil, nil, nil
+ }
+ return watch, nil, obj, nil
+ }
+
+ // Handle non-unique index by using an iterator and getting the last value
+ iter := indexTxn.Root().ReverseIterator()
+ watch := iter.SeekPrefixWatch(key)
+ k, value, _ := iter.Previous()
+ return watch, extractFromIndexKey(k), value, nil
+}
+
+// First is used to return the first matching object for
+// the given constraints on the index
+func (txn *Txn) First(key []byte) ([]byte, interface{}, error) {
+ _, k, val, err := txn.FirstWatch(key)
+ return k, val, err
+}
+
+// Last is used to return the last matching object for
+// the given constraints on the index
+func (txn *Txn) Last(key []byte) ([]byte, interface{}, error) {
+ _, k, val, err := txn.LastWatch(key)
+ return k, val, err
+}
+
+// LongestPrefix is used to fetch the longest prefix match for the given
+// constraints on the index. Note that this will not work with the memdb
+// StringFieldIndex because it adds null terminators which prevent the
+// algorithm from correctly finding a match (it will get to right before the
+// null and fail to find a leaf node). This should only be used where the prefix
+// given is capable of matching indexed entries directly, which typically only
+// applies to a custom indexer. See the unit test for an example.
+func (txn *Txn) LongestPrefix(key []byte) (interface{}, error) {
+ // note prefix should use the key without the trailing \x00
+ // Find the longest prefix match with the given index.
+ indexTxn := txn.readableIndex(defTable, id)
+ if _, value, ok := indexTxn.Root().LongestPrefix(key); ok {
+ return value, nil
+ }
+ return nil, nil
+}
+
+// ResultIterator is used to iterate over a list of results from a query on a table.
+//
+// When a ResultIterator is created from a write transaction, the results from
+// Next will reflect a snapshot of the table at the time the ResultIterator is
+// created.
+// This means that calling Insert or Delete on a transaction while iterating is
+// allowed, but the changes made by Insert or Delete will not be observed in the
+// results returned from subsequent calls to Next. For example if an item is deleted
+// from the index used by the iterator it will still be returned by Next. If an
+// item is inserted into the index used by the iterator, it will not be returned
+// by Next. However, an iterator created after a call to Insert or Delete will
+// reflect the modifications.
+//
+// When a ResultIterator is created from a write transaction, and there are already
+// modifications to the index used by the iterator, the modification cache of the
+// index will be invalidated. This may result in some additional allocations if
+// the same node in the index is modified again.
+type ResultIterator interface {
+ WatchCh() <-chan struct{}
+ // Next returns the next result from the iterator. If there are no more results
+ // nil is returned.
+ Next() ([]byte, interface{})
+}
+
+// Get is used to construct a ResultIterator over all the rows that match the
+// given constraints of an index.
+//
+// See the documentation for ResultIterator to understand the behaviour of the
+// returned ResultIterator.
+func (txn *Txn) Get(key []byte) (ResultIterator, error) {
+ indexIter, val, err := txn.getIndexIterator(key)
+ if err != nil {
+ return nil, err
+ }
+
+ // Seek the iterator to the appropriate sub-set
+ watchCh := indexIter.SeekPrefixWatch(val)
+
+ // Create an iterator
+ iter := &radixIterator{
+ iter: indexIter,
+ watchCh: watchCh,
+ }
+ return iter, nil
+}
+
+// GetReverse is used to construct a Reverse ResultIterator over all the
+// rows that match the given constraints of an index.
+// The returned ResultIterator's Next() will return the next Previous value.
+//
+// See the documentation for ResultIterator to understand the behaviour of the
+// returned ResultIterator.
+func (txn *Txn) GetReverse(key []byte) (ResultIterator, error) {
+ indexIter, val, err := txn.getIndexIteratorReverse(key)
+ if err != nil {
+ return nil, err
+ }
+
+ // Seek the iterator to the appropriate sub-set
+ watchCh := indexIter.SeekPrefixWatch(val)
+
+ // Create an iterator
+ iter := &radixReverseIterator{
+ iter: indexIter,
+ watchCh: watchCh,
+ }
+ return iter, nil
+}
+
+// LowerBound is used to construct a ResultIterator over all the the range of
+// rows that have an index value greater than or equal to the provide args.
+// Calling this then iterating until the rows are larger than required allows
+// range scans within an index. It is not possible to watch the resulting
+// iterator since the radix tree doesn't efficiently allow watching on lower
+// bound changes. The WatchCh returned will be nill and so will block forever.
+//
+// See the documentation for ResultIterator to understand the behaviour of the
+// returned ResultIterator.
+func (txn *Txn) LowerBound(key []byte) (ResultIterator, error) {
+ indexIter, val, err := txn.getIndexIterator(key)
+ if err != nil {
+ return nil, err
+ }
+
+ // Seek the iterator to the appropriate sub-set
+ indexIter.SeekLowerBound(val)
+
+ // Create an iterator
+ iter := &radixIterator{
+ iter: indexIter,
+ }
+ return iter, nil
+}
+
+// ReverseLowerBound is used to construct a Reverse ResultIterator over all the
+// the range of rows that have an index value less than or equal to the
+// provide args. Calling this then iterating until the rows are lower than
+// required allows range scans within an index. It is not possible to watch the
+// resulting iterator since the radix tree doesn't efficiently allow watching
+// on lower bound changes. The WatchCh returned will be nill and so will block
+// forever.
+//
+// See the documentation for ResultIterator to understand the behaviour of the
+// returned ResultIterator.
+func (txn *Txn) ReverseLowerBound(key []byte) (ResultIterator, error) {
+ indexIter, val, err := txn.getIndexIteratorReverse(key)
+ if err != nil {
+ return nil, err
+ }
+
+ // Seek the iterator to the appropriate sub-set
+ indexIter.SeekReverseLowerBound(val)
+
+ // Create an iterator
+ iter := &radixReverseIterator{
+ iter: indexIter,
+ }
+ return iter, nil
+}
+
+func (txn *Txn) getIndexIterator(key []byte) (*iradix.Iterator, []byte, error) {
+ // Get the index itself
+ indexTxn := txn.readableIndex(defTable, id)
+ indexRoot := indexTxn.Root()
+
+ // Get an iterator over the index
+ indexIter := indexRoot.Iterator()
+ return indexIter, toIndexKey(key), nil
+}
+
+func (txn *Txn) getIndexIteratorReverse(key []byte) (*iradix.ReverseIterator, []byte, error) {
+ // Get the index itself
+ indexTxn := txn.readableIndex(defTable, id)
+ indexRoot := indexTxn.Root()
+
+ // Get an interator over the index
+ indexIter := indexRoot.ReverseIterator()
+ return indexIter, toIndexKey(key), nil
+}
+
+// Defer is used to push a new arbitrary function onto a stack which
+// gets called when a transaction is committed and finished. Deferred
+// functions are called in LIFO order, and only invoked at the end of
+// write transactions.
+func (txn *Txn) Defer(fn func()) {
+ txn.after = append(txn.after, fn)
+}
+
+// radixIterator is used to wrap an underlying iradix iterator.
+// This is much more efficient than a sliceIterator as we are not
+// materializing the entire view.
+type radixIterator struct {
+ iter *iradix.Iterator
+ watchCh <-chan struct{}
+}
+
+func (r *radixIterator) WatchCh() <-chan struct{} {
+ return r.watchCh
+}
+
+func (r *radixIterator) Next() ([]byte, interface{}) {
+ k, value, ok := r.iter.Next()
+ if !ok {
+ return nil, nil
+ }
+ return extractFromIndexKey(k), value
+}
+
+type radixReverseIterator struct {
+ iter *iradix.ReverseIterator
+ watchCh <-chan struct{}
+}
+
+func (r *radixReverseIterator) Next() ([]byte, interface{}) {
+ k, value, ok := r.iter.Previous()
+ if !ok {
+ return nil, nil
+ }
+ return extractFromIndexKey(k), value
+}
+
+func (r *radixReverseIterator) WatchCh() <-chan struct{} {
+ return r.watchCh
+}
+
+// Snapshot creates a snapshot of the current state of the transaction.
+// Returns a new read-only transaction or nil if the transaction is already
+// aborted or committed.
+func (txn *Txn) Snapshot() *Txn {
+ if txn.rootTxn == nil {
+ return nil
+ }
+
+ snapshot := &Txn{
+ db: txn.db,
+ rootTxn: txn.rootTxn.Clone(),
+ }
+
+ // Commit sub-transactions into the snapshot
+ if txn.modified != nil {
+ path := indexPath(defTable, id)
+ final := txn.modified.CommitOnly()
+ snapshot.rootTxn.Insert(path, final)
+ }
+
+ return snapshot
+}
diff --git a/engine/rock_iter.go b/engine/rock_iter.go
new file mode 100644
index 00000000..62d3f9d6
--- /dev/null
+++ b/engine/rock_iter.go
@@ -0,0 +1,120 @@
+package engine
+
+import (
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/gorocksdb"
+)
+
+type rockIterator struct {
+ *gorocksdb.Iterator
+ snap *gorocksdb.Snapshot
+ ro *gorocksdb.ReadOptions
+ db *gorocksdb.DB
+ upperBound *gorocksdb.IterBound
+ lowerBound *gorocksdb.IterBound
+ removeTsType byte
+}
+
+// low_bound is inclusive
+// upper bound is exclusive
+func newRockIterator(db *gorocksdb.DB,
+ prefixSame bool, opts IteratorOpts) (*rockIterator, error) {
+ db.RLock()
+ if !db.IsOpened() {
+ db.RUnlock()
+ return nil, common.ErrStopped
+ }
+ upperBound := opts.Max
+ lowerBound := opts.Min
+ if opts.Type&common.RangeROpen <= 0 && upperBound != nil {
+ // range right not open, we need inclusive the max,
+ // however upperBound is exclusive
+ upperBound = append(upperBound, 0)
+ }
+ dbit := &rockIterator{
+ db: db,
+ }
+ readOpts := gorocksdb.NewDefaultReadOptions()
+ readOpts.SetFillCache(false)
+ readOpts.SetVerifyChecksums(false)
+ if prefixSame {
+ readOpts.SetPrefixSameAsStart(true)
+ }
+ if lowerBound != nil {
+ dbit.lowerBound = gorocksdb.NewIterBound(lowerBound)
+ readOpts.SetIterLowerBound(dbit.lowerBound)
+ }
+ if upperBound != nil {
+ dbit.upperBound = gorocksdb.NewIterBound(upperBound)
+ readOpts.SetIterUpperBound(dbit.upperBound)
+ }
+ if opts.IgnoreDel {
+ // may iterator some deleted keys still not compacted.
+ readOpts.SetIgnoreRangeDeletions(true)
+ }
+ dbit.ro = readOpts
+ var err error
+ if opts.WithSnap {
+ dbit.snap, err = db.NewSnapshot()
+ if err != nil {
+ dbit.Close()
+ return nil, err
+ }
+ readOpts.SetSnapshot(dbit.snap)
+ }
+ dbit.Iterator, err = db.NewIterator(readOpts)
+ if err != nil {
+ dbit.Close()
+ return nil, err
+ }
+ return dbit, nil
+}
+
+// the bytes returned will be freed after next
+func (it *rockIterator) RefKey() []byte {
+ return it.Iterator.Key().Data()
+}
+
+func (it *rockIterator) Key() []byte {
+ return it.Iterator.Key().Bytes()
+}
+
+// the bytes returned will be freed after next
+func (it *rockIterator) RefValue() []byte {
+ v := it.Iterator.Value().Data()
+ if (it.removeTsType == KVType || it.removeTsType == HashType) && len(v) >= tsLen {
+ v = v[:len(v)-tsLen]
+ }
+ return v
+}
+
+func (it *rockIterator) Value() []byte {
+ v := it.Iterator.Value().Bytes()
+ if (it.removeTsType == KVType || it.removeTsType == HashType) && len(v) >= tsLen {
+ v = v[:len(v)-tsLen]
+ }
+ return v
+}
+
+func (it *rockIterator) NoTimestamp(vt byte) {
+ it.removeTsType = vt
+}
+
+func (it *rockIterator) Close() {
+ if it.Iterator != nil {
+ it.Iterator.Close()
+ }
+ if it.ro != nil {
+ it.ro.Destroy()
+ }
+ if it.snap != nil {
+ it.snap.Release()
+ }
+ if it.upperBound != nil {
+ it.upperBound.Destroy()
+ }
+ if it.lowerBound != nil {
+ it.lowerBound.Destroy()
+ }
+ it.db.RUnlock()
+}
diff --git a/engine/rockeng.go b/engine/rockeng.go
new file mode 100644
index 00000000..0732e000
--- /dev/null
+++ b/engine/rockeng.go
@@ -0,0 +1,634 @@
+package engine
+
+import (
+ "errors"
+ "math"
+ "os"
+ "path"
+ "strconv"
+ "sync/atomic"
+ "time"
+
+ "github.com/shirou/gopsutil/mem"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/gorocksdb"
+)
+
+type rockRefSlice struct {
+ v *gorocksdb.Slice
+}
+
+func (rs *rockRefSlice) Free() {
+ rs.v.Free()
+}
+
+func (rs *rockRefSlice) Bytes() []byte {
+ return rs.v.Bytes()
+}
+
+func (rs *rockRefSlice) Data() []byte {
+ return rs.v.Data()
+}
+
+type sharedRockConfig struct {
+ SharedCache *gorocksdb.Cache
+ SharedEnv *gorocksdb.Env
+ SharedRateLimiter *gorocksdb.RateLimiter
+}
+
+func newSharedRockConfig(opt RockOptions) *sharedRockConfig {
+ rc := &sharedRockConfig{}
+ if opt.UseSharedCache {
+ if opt.BlockCache <= 0 {
+ v, err := mem.VirtualMemory()
+ if err != nil {
+ opt.BlockCache = 1024 * 1024 * 128 * 10
+ } else {
+ opt.BlockCache = int64(v.Total / 10)
+ if opt.CacheIndexAndFilterBlocks || opt.EnablePartitionedIndexFilter {
+ opt.BlockCache *= 2
+ }
+ }
+ }
+ rc.SharedCache = gorocksdb.NewLRUCache(opt.BlockCache)
+ }
+ if opt.AdjustThreadPool {
+ rc.SharedEnv = gorocksdb.NewDefaultEnv()
+ if opt.BackgroundHighThread <= 0 {
+ opt.BackgroundHighThread = 2
+ }
+ if opt.BackgroundLowThread <= 0 {
+ opt.BackgroundLowThread = 16
+ }
+ rc.SharedEnv.SetBackgroundThreads(opt.BackgroundLowThread)
+ rc.SharedEnv.SetHighPriorityBackgroundThreads(opt.BackgroundHighThread)
+ }
+ if opt.UseSharedRateLimiter && opt.RateBytesPerSec > 0 {
+ rc.SharedRateLimiter = gorocksdb.NewGenericRateLimiter(opt.RateBytesPerSec, 100*1000, 10)
+ }
+ return rc
+}
+
+func (src *sharedRockConfig) ChangeLimiter(bytesPerSec int64) {
+ limiter := src.SharedRateLimiter
+ if limiter == nil {
+ return
+ }
+ limiter.SetBytesPerSecond(bytesPerSec)
+}
+
+func (src *sharedRockConfig) Destroy() {
+ if src.SharedCache != nil {
+ src.SharedCache.Destroy()
+ }
+ if src.SharedEnv != nil {
+ src.SharedEnv.Destroy()
+ }
+ if src.SharedRateLimiter != nil {
+ src.SharedRateLimiter.Destroy()
+ }
+}
+
+type RockEng struct {
+ cfg *RockEngConfig
+ eng *gorocksdb.DB
+ dbOpts *gorocksdb.Options
+ defaultWriteOpts *gorocksdb.WriteOptions
+ defaultReadOpts *gorocksdb.ReadOptions
+ wb *rocksWriteBatch
+ lruCache *gorocksdb.Cache
+ rl *gorocksdb.RateLimiter
+ engOpened int32
+ lastCompact int64
+ deletedCnt int64
+ quit chan struct{}
+}
+
+func NewRockEng(cfg *RockEngConfig) (*RockEng, error) {
+ if len(cfg.DataDir) == 0 {
+ return nil, errors.New("config error")
+ }
+
+ //if cfg.DisableWAL {
+ // cfg.DefaultWriteOpts.DisableWAL(true)
+ //}
+ // options need be adjust due to using hdd or sdd, please reference
+ // https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide
+ bbto := gorocksdb.NewDefaultBlockBasedTableOptions()
+ // use large block to reduce index block size for hdd
+ // if using ssd, should use the default value
+ bbto.SetBlockSize(cfg.BlockSize)
+ // should about 20% less than host RAM
+ // http://smalldatum.blogspot.com/2016/09/tuning-rocksdb-block-cache.html
+ var lru *gorocksdb.Cache
+ sharedConfig, _ := cfg.SharedConfig.(*sharedRockConfig)
+ if cfg.RockOptions.UseSharedCache {
+ if sharedConfig == nil || sharedConfig.SharedCache == nil {
+ return nil, errors.New("missing shared cache instance")
+ }
+ bbto.SetBlockCache(sharedConfig.SharedCache)
+ dbLog.Infof("use shared cache: %v", sharedConfig.SharedCache)
+ } else {
+ lru = gorocksdb.NewLRUCache(cfg.BlockCache)
+ bbto.SetBlockCache(lru)
+ }
+ // cache index and filter blocks can save some memory,
+ // if not cache, the index and filter will be pre-loaded in memory
+ bbto.SetCacheIndexAndFilterBlocks(cfg.CacheIndexAndFilterBlocks)
+ // set pin_l0_filter_and_index_blocks_in_cache = true if cache index is true to improve performance on read
+ // and see this https://github.com/facebook/rocksdb/pull/3692 if partitioned filter is on
+ // TODO: no need set after 6.15
+ bbto.SetPinL0FilterAndIndexBlocksInCache(true)
+
+ if cfg.EnablePartitionedIndexFilter {
+ //enable partitioned indexes and partitioned filters
+ bbto.SetCacheIndexAndFilterBlocksWithHighPriority(true)
+ bbto.SetIndexType(gorocksdb.IndexTypeTwoLevelIndexSearch)
+ bbto.SetPartitionFilters(true)
+ bbto.SetMetaDataBlockSize(1024 * 8)
+ bbto.SetCacheIndexAndFilterBlocks(true)
+ }
+ // TODO: set to 5 for version after 6.6
+ bbto.SetFormatVersion(4)
+ bbto.SetIndexBlockRestartInterval(16)
+
+ // /* filter should not block_based, use sst based to reduce cpu */
+ filter := gorocksdb.NewBloomFilter(10, false)
+ bbto.SetFilterPolicy(filter)
+ opts := gorocksdb.NewDefaultOptions()
+ // optimize filter for hit, use less memory since last level will has no bloom filter
+ // If you're certain that Get() will mostly find a key you're looking for, you can set options.optimize_filters_for_hits = true
+ // to save memory usage for bloom filters
+ if cfg.OptimizeFiltersForHits {
+ opts.OptimizeFilterForHits(true)
+ }
+ opts.SetBlockBasedTableFactory(bbto)
+ if cfg.RockOptions.AdjustThreadPool {
+ if cfg.SharedConfig == nil || sharedConfig.SharedEnv == nil {
+ return nil, errors.New("missing shared env instance")
+ }
+ opts.SetEnv(sharedConfig.SharedEnv)
+ dbLog.Infof("use shared env: %v", sharedConfig.SharedEnv)
+ }
+
+ var rl *gorocksdb.RateLimiter
+ if cfg.RateBytesPerSec > 0 {
+ if cfg.UseSharedRateLimiter {
+ if cfg.SharedConfig == nil {
+ return nil, errors.New("missing shared instance")
+ }
+ opts.SetRateLimiter(sharedConfig.SharedRateLimiter)
+ dbLog.Infof("use shared rate limiter: %v", sharedConfig.SharedRateLimiter)
+ } else {
+ rl = gorocksdb.NewGenericRateLimiter(cfg.RateBytesPerSec, 100*1000, 10)
+ opts.SetRateLimiter(rl)
+ }
+ }
+
+ if cfg.InsertHintFixedLen > 0 {
+ opts.SetMemtableInsertWithHintFixedLengthPrefixExtractor(cfg.InsertHintFixedLen)
+ }
+ opts.SetCreateIfMissing(true)
+ opts.SetMaxOpenFiles(-1)
+ // keep level0_file_num_compaction_trigger * write_buffer_size * min_write_buffer_number_tomerge = max_bytes_for_level_base to minimize write amplification
+ opts.SetWriteBufferSize(cfg.WriteBufferSize)
+ opts.SetMaxWriteBufferNumber(cfg.MaxWriteBufferNumber)
+ opts.SetMinWriteBufferNumberToMerge(cfg.MinWriteBufferNumberToMerge)
+ opts.SetLevel0FileNumCompactionTrigger(cfg.Level0FileNumCompactionTrigger)
+ opts.SetMaxBytesForLevelBase(cfg.MaxBytesForLevelBase)
+ opts.SetTargetFileSizeBase(cfg.TargetFileSizeBase)
+ opts.SetMaxBackgroundFlushes(cfg.MaxBackgroundFlushes)
+ opts.SetMaxBackgroundCompactions(cfg.MaxBackgroundCompactions)
+ opts.SetMinLevelToCompress(cfg.MinLevelToCompress)
+ // we use table, so we use prefix seek feature
+ opts.SetPrefixExtractor(gorocksdb.NewFixedPrefixTransform(3))
+ opts.SetMemtablePrefixBloomSizeRatio(0.1)
+ opts.EnableStatistics()
+ opts.SetMaxLogFileSize(1024 * 1024 * 64)
+ opts.SetLogFileTimeToRoll(3600 * 24 * 15)
+ opts.SetKeepLogFileNum(200)
+ opts.SetMaxManifestFileSize(cfg.MaxMainifestFileSize)
+ opts.SetMaxSuccessiveMerges(1000)
+ // https://github.com/facebook/mysql-5.6/wiki/my.cnf-tuning
+ // rate limiter need to reduce the compaction io
+ if !cfg.DisableMergeCounter {
+ if cfg.EnableTableCounter {
+ opts.SetUint64AddMergeOperator()
+ }
+ } else {
+ cfg.EnableTableCounter = false
+ }
+ // TODO: add avoid_unnecessary_blocking_io option for db after 6.14
+
+ // See http://smalldatum.blogspot.com/2018/09/5-things-to-set-when-configuring.html
+ // level_compaction_dynamic_level_bytes
+ if cfg.LevelCompactionDynamicLevelBytes {
+ opts.SetLevelCompactionDynamicLevelBytes(true)
+ }
+
+ if !cfg.ReadOnly {
+ err := os.MkdirAll(cfg.DataDir, common.DIR_PERM)
+ if err != nil {
+ return nil, err
+ }
+ }
+ db := &RockEng{
+ cfg: cfg,
+ dbOpts: opts,
+ lruCache: lru,
+ rl: rl,
+ defaultWriteOpts: gorocksdb.NewDefaultWriteOptions(),
+ defaultReadOpts: gorocksdb.NewDefaultReadOptions(),
+ quit: make(chan struct{}),
+ }
+ db.defaultReadOpts.SetVerifyChecksums(false)
+ if cfg.DisableWAL {
+ db.defaultWriteOpts.DisableWAL(true)
+ }
+ if cfg.AutoCompacted {
+ go db.compactLoop()
+ }
+ return db, nil
+}
+
+func (r *RockEng) SetCompactionFilter(filter ICompactFilter) {
+ r.dbOpts.SetCompactionFilter(filter)
+}
+
+func (r *RockEng) SetMaxBackgroundOptions(maxCompact int, maxBackJobs int) error {
+ /*
+ all options we can change is in MutableDBOptions
+ struct MutableDBOptions {
+ int max_background_jobs;
+ int base_background_compactions;
+ int max_background_compactions;
+ bool avoid_flush_during_shutdown;
+ size_t writable_file_max_buffer_size;
+ uint64_t delayed_write_rate;
+ uint64_t max_total_wal_size;
+ uint64_t delete_obsolete_files_period_micros;
+ unsigned int stats_dump_period_sec;
+ int max_open_files;
+ uint64_t bytes_per_sync;
+ uint64_t wal_bytes_per_sync;
+ size_t compaction_readahead_size;
+ };
+ */
+ keys := []string{}
+ values := []string{}
+ if maxCompact > 0 {
+ r.dbOpts.SetMaxBackgroundCompactions(maxCompact)
+ keys = append(keys, "max_background_compactions")
+ values = append(values, strconv.Itoa(maxCompact))
+ }
+ if maxBackJobs > 0 {
+ r.dbOpts.SetMaxBackgroundFlushes(maxBackJobs)
+ keys = append(keys, "max_background_jobs")
+ values = append(values, strconv.Itoa(maxBackJobs))
+ }
+ if len(keys) == 0 {
+ return nil
+ }
+ return r.eng.SetDBOptions(keys, values)
+}
+
+func (r *RockEng) compactLoop() {
+ ticker := time.NewTicker(time.Hour)
+ interval := (time.Hour / time.Second).Nanoseconds()
+ dbLog.Infof("start auto compact loop : %v", interval)
+ for {
+ select {
+ case <-r.quit:
+ return
+ case <-ticker.C:
+ if (r.DeletedBeforeCompact() > compactThreshold) &&
+ (time.Now().Unix()-r.LastCompactTime()) > interval {
+ dbLog.Infof("auto compact : %v, %v", r.DeletedBeforeCompact(), r.LastCompactTime())
+ r.CompactAllRange()
+ }
+ }
+ }
+}
+
+// NewWriteBatch init a new write batch for write, should only call this after the engine opened
+func (r *RockEng) NewWriteBatch() WriteBatch {
+ if r.eng == nil {
+ panic("nil engine, should only get write batch after db opened")
+ }
+ return newRocksWriteBatch(r.eng, r.defaultWriteOpts)
+}
+
+// DefaultWriteBatch return the internal default write batch for write, should only call this after the engine opened
+// and can not be used concurrently
+func (r *RockEng) DefaultWriteBatch() WriteBatch {
+ if r.wb == nil {
+ panic("nil default write batch, should only get write batch after db opened")
+ }
+ return r.wb
+}
+
+func (r *RockEng) SetOptsForLogStorage() {
+ r.defaultReadOpts.SetVerifyChecksums(false)
+ r.defaultReadOpts.SetFillCache(false)
+ // read raft always hit non-deleted range
+ r.defaultReadOpts.SetIgnoreRangeDeletions(true)
+ r.defaultWriteOpts.DisableWAL(true)
+}
+
+func (r *RockEng) GetOpts() *gorocksdb.Options {
+ return r.dbOpts
+}
+
+func (r *RockEng) GetDataDir() string {
+ return path.Join(r.cfg.DataDir, "rocksdb")
+}
+
+func (r *RockEng) CheckDBEngForRead(fullPath string) error {
+ ro := *(r.GetOpts())
+ ro.SetCreateIfMissing(false)
+ db, err := gorocksdb.OpenDbForReadOnly(&ro, fullPath, false)
+ if err != nil {
+ return err
+ }
+ db.Close()
+ return nil
+}
+
+func (r *RockEng) OpenEng() error {
+ if !r.IsClosed() {
+ dbLog.Warningf("rocksdb engine already opened: %v, should close it before reopen", r.GetDataDir())
+ return errors.New("rocksdb open failed since not closed")
+ }
+ if r.cfg.ReadOnly {
+ ro := *(r.GetOpts())
+ ro.SetCreateIfMissing(false)
+ dfile := r.GetDataDir()
+ if r.cfg.DataTool {
+ _, err := os.Stat(dfile)
+ if os.IsNotExist(err) {
+ dfile = r.cfg.DataDir
+ }
+ }
+ dbLog.Infof("rocksdb engine open %v as read only", dfile)
+ eng, err := gorocksdb.OpenDbForReadOnly(&ro, dfile, false)
+ if err != nil {
+ return err
+ }
+ r.eng = eng
+ } else {
+ eng, err := gorocksdb.OpenDb(r.dbOpts, r.GetDataDir())
+ if err != nil {
+ return err
+ }
+ r.eng = eng
+ }
+ r.wb = newRocksWriteBatch(r.eng, r.defaultWriteOpts)
+ atomic.StoreInt32(&r.engOpened, 1)
+ dbLog.Infof("rocksdb engine opened: %v", r.GetDataDir())
+ return nil
+}
+
+func (r *RockEng) Write(wb WriteBatch) error {
+ return wb.Commit()
+}
+
+func (r *RockEng) DeletedBeforeCompact() int64 {
+ return atomic.LoadInt64(&r.deletedCnt)
+}
+
+func (r *RockEng) AddDeletedCnt(c int64) {
+ atomic.AddInt64(&r.deletedCnt, c)
+}
+
+func (r *RockEng) LastCompactTime() int64 {
+ return atomic.LoadInt64(&r.lastCompact)
+}
+
+func (r *RockEng) CompactRange(rg CRange) {
+ atomic.StoreInt64(&r.lastCompact, time.Now().Unix())
+ dbLog.Infof("compact rocksdb %v begin: %v", r.GetDataDir(), rg)
+ defer dbLog.Infof("compact rocksdb %v done", r.GetDataDir())
+ var rrg gorocksdb.Range
+ rrg.Start = rg.Start
+ rrg.Limit = rg.Limit
+ r.eng.CompactRange(rrg)
+}
+
+func (r *RockEng) CompactAllRange() {
+ atomic.StoreInt64(&r.deletedCnt, 0)
+ r.CompactRange(CRange{})
+}
+
+func (r *RockEng) DisableManualCompact(disable bool) {
+ //TODO: rocksdb 6.5 will support the disable and enable manual compaction
+ if disable {
+ //r.eng.DisableManualCompact(disable)
+ } else {
+ //r.eng.EnableManualCompact(disable)
+ }
+}
+
+func (r *RockEng) GetApproximateTotalKeyNum() int {
+ numStr := r.eng.GetProperty("rocksdb.estimate-num-keys")
+ num, err := strconv.Atoi(numStr)
+ if err != nil {
+ dbLog.Infof("total keys num error: %v, %v", numStr, err)
+ return 0
+ }
+ return num
+}
+
+func (r *RockEng) GetApproximateKeyNum(ranges []CRange) uint64 {
+ rgs := make([]gorocksdb.Range, 0, len(ranges))
+ for _, r := range ranges {
+ rgs = append(rgs, gorocksdb.Range{Start: r.Start, Limit: r.Limit})
+ }
+ return r.eng.GetApproximateKeyNum(rgs)
+}
+
+func (r *RockEng) GetApproximateSizes(ranges []CRange, includeMem bool) []uint64 {
+ rgs := make([]gorocksdb.Range, 0, len(ranges))
+ for _, r := range ranges {
+ rgs = append(rgs, gorocksdb.Range{Start: r.Start, Limit: r.Limit})
+ }
+ return r.eng.GetApproximateSizes(rgs, includeMem)
+}
+
+func (r *RockEng) IsClosed() bool {
+ if atomic.LoadInt32(&r.engOpened) == 0 {
+ return true
+ }
+ return false
+}
+
+func (r *RockEng) CloseEng() bool {
+ if r.eng != nil {
+ if atomic.CompareAndSwapInt32(&r.engOpened, 1, 0) {
+ if r.wb != nil {
+ r.wb.Destroy()
+ }
+ r.eng.PreShutdown()
+ r.eng.Close()
+ dbLog.Infof("rocksdb engine closed: %v", r.GetDataDir())
+ return true
+ }
+ }
+ return false
+}
+
+func (r *RockEng) CloseAll() {
+ select {
+ case <-r.quit:
+ default:
+ close(r.quit)
+ }
+ r.CloseEng()
+ if r.dbOpts != nil {
+ r.dbOpts.Destroy()
+ r.dbOpts = nil
+ }
+ if r.lruCache != nil {
+ r.lruCache.Destroy()
+ r.lruCache = nil
+ }
+ if r.rl != nil {
+ r.rl.Destroy()
+ r.rl = nil
+ }
+
+ if r.defaultWriteOpts != nil {
+ r.defaultWriteOpts.Destroy()
+ }
+ if r.defaultReadOpts != nil {
+ r.defaultReadOpts.Destroy()
+ }
+}
+
+func (r *RockEng) GetStatistics() string {
+ return r.dbOpts.GetStatistics()
+}
+
+func (r *RockEng) GetInternalStatus() map[string]interface{} {
+ status := make(map[string]interface{})
+ bbt := r.dbOpts.GetBlockBasedTableFactory()
+ if bbt != nil {
+ bc := bbt.GetBlockCache()
+ if bc != nil {
+ status["block-cache-usage"] = bc.GetUsage()
+ status["block-cache-pinned-usage"] = bc.GetPinnedUsage()
+ }
+ }
+
+ memStr := r.eng.GetProperty("rocksdb.estimate-table-readers-mem")
+ status["estimate-table-readers-mem"] = memStr
+ memStr = r.eng.GetProperty("rocksdb.cur-size-all-mem-tables")
+ status["cur-size-all-mem-tables"] = memStr
+ memStr = r.eng.GetProperty("rocksdb.cur-size-active-mem-table")
+ status["cur-size-active-mem-tables"] = memStr
+ return status
+}
+
+func (r *RockEng) GetInternalPropertyStatus(p string) string {
+ return r.eng.GetProperty(p)
+}
+
+func (r *RockEng) GetBytesNoLock(key []byte) ([]byte, error) {
+ return r.eng.GetBytesNoLock(r.defaultReadOpts, key)
+}
+
+func (r *RockEng) GetBytes(key []byte) ([]byte, error) {
+ return r.eng.GetBytes(r.defaultReadOpts, key)
+}
+
+func (r *RockEng) MultiGetBytes(keyList [][]byte, values [][]byte, errs []error) {
+ r.eng.MultiGetBytes(r.defaultReadOpts, keyList, values, errs)
+}
+
+func (r *RockEng) Exist(key []byte) (bool, error) {
+ return r.eng.Exist(r.defaultReadOpts, key)
+}
+
+func (r *RockEng) ExistNoLock(key []byte) (bool, error) {
+ return r.eng.ExistNoLock(r.defaultReadOpts, key)
+}
+
+func (r *RockEng) GetRefNoLock(key []byte) (RefSlice, error) {
+ v, err := r.eng.GetNoLock(r.defaultReadOpts, key)
+ if err != nil {
+ return nil, err
+ }
+ return &rockRefSlice{v: v}, nil
+}
+
+func (r *RockEng) GetRef(key []byte) (RefSlice, error) {
+ v, err := r.eng.Get(r.defaultReadOpts, key)
+ if err != nil {
+ return nil, err
+ }
+ return &rockRefSlice{v: v}, nil
+}
+
+func (r *RockEng) GetValueWithOp(key []byte,
+ op func([]byte) error) error {
+ val, err := r.eng.Get(r.defaultReadOpts, key)
+ if err != nil {
+ return err
+ }
+ defer val.Free()
+ return op(val.Data())
+}
+
+func (r *RockEng) GetValueWithOpNoLock(key []byte,
+ op func([]byte) error) error {
+ val, err := r.eng.GetNoLock(r.defaultReadOpts, key)
+ if err != nil {
+ return err
+ }
+ defer val.Free()
+ return op(val.Data())
+}
+
+func (r *RockEng) GetIterator(opts IteratorOpts) (Iterator, error) {
+ dbit, err := newRockIterator(r.eng, true, opts)
+ if err != nil {
+ return nil, err
+ }
+ return dbit, nil
+}
+
+func (r *RockEng) DeleteFilesInRange(rg CRange) {
+ var rrg gorocksdb.Range
+ rrg.Start = rg.Start
+ rrg.Limit = rg.Limit
+ r.eng.DeleteFilesInRange(rrg)
+}
+
+func (r *RockEng) NewCheckpoint(printToStdoutAlso bool) (KVCheckpoint, error) {
+ ck, err := gorocksdb.NewCheckpoint(r.eng)
+ if err != nil {
+ return nil, err
+ }
+ return &rockEngCheckpoint{
+ ck: ck,
+ eng: r.eng,
+ }, nil
+}
+
+type rockEngCheckpoint struct {
+ ck *gorocksdb.Checkpoint
+ eng *gorocksdb.DB
+}
+
+func (rck *rockEngCheckpoint) Save(path string, notify chan struct{}) error {
+ rck.eng.RLock()
+ defer rck.eng.RUnlock()
+ if rck.eng.IsOpened() {
+ if notify != nil {
+ time.AfterFunc(time.Millisecond*20, func() {
+ close(notify)
+ })
+ }
+ return rck.ck.Save(path, math.MaxUint64)
+ }
+ return errDBEngClosed
+}
diff --git a/engine/rockeng_test.go b/engine/rockeng_test.go
new file mode 100644
index 00000000..7c5c33ef
--- /dev/null
+++ b/engine/rockeng_test.go
@@ -0,0 +1,30 @@
+package engine
+
+import (
+ "io/ioutil"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestRockWriteAfterClose(t *testing.T) {
+ SetLogger(0, nil)
+ cfg := NewRockConfig()
+ tmpDir, err := ioutil.TempDir("", "test")
+ assert.Nil(t, err)
+ t.Log(tmpDir)
+ defer os.RemoveAll(tmpDir)
+ cfg.DataDir = tmpDir
+ eng, err := NewRockEng(cfg)
+ err = eng.OpenEng()
+ assert.Nil(t, err)
+ wb := eng.DefaultWriteBatch()
+ wb.Put([]byte("test"), []byte("test"))
+ wb.Put([]byte("test2"), []byte("test2"))
+
+ eng.CloseAll()
+
+ err = eng.Write(wb)
+ assert.NotNil(t, err)
+}
diff --git a/engine/skiplist.cc b/engine/skiplist.cc
new file mode 100644
index 00000000..5e96eaa5
--- /dev/null
+++ b/engine/skiplist.cc
@@ -0,0 +1,999 @@
+/************************************************************************
+Modifications Copyright 2017-2019 eBay Inc.
+
+Original Copyright 2017 Jung-Sang Ahn
+See URL: https://github.com/greensky00/skiplist
+ (v0.2.9)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+**************************************************************************/
+
+#include "skiplist.h"
+
+#include
+
+#define __SLD_RT_INS(e, n, t, c)
+#define __SLD_NC_INS(n, nn, t, c)
+#define __SLD_RT_RMV(e, n, t, c)
+#define __SLD_NC_RMV(n, nn, t, c)
+#define __SLD_BM(n)
+#define __SLD_ASSERT(cond)
+#define __SLD_P(args...)
+#define __SLD_(b)
+
+//#define __SL_DEBUG (1)
+#ifdef __SL_DEBUG
+ #ifndef __cplusplus
+ #error "Debug mode is available with C++ compiler only."
+ #endif
+ #include "skiplist_debug.h"
+#endif
+
+#define __SL_YIELD (1)
+#ifdef __SL_YIELD
+ #ifdef __cplusplus
+ #include
+ #define YIELD() std::this_thread::yield()
+ #else
+ #include
+ #define YIELD() sched_yield()
+ #endif
+#else
+ #define YIELD()
+#endif
+
+#if defined(_STL_ATOMIC) && defined(__cplusplus)
+ // C++ (STL) atomic operations
+ #define MOR std::memory_order_relaxed
+ #define ATM_GET(var) (var).load(MOR)
+ #define ATM_LOAD(var, val) (val) = (var).load(MOR)
+ #define ATM_STORE(var, val) (var).store((val), MOR)
+ #define ATM_CAS(var, exp, val) (var).compare_exchange_weak((exp), (val))
+ #define ATM_FETCH_ADD(var, val) (var).fetch_add(val, MOR)
+ #define ATM_FETCH_SUB(var, val) (var).fetch_sub(val, MOR)
+ #define ALLOC_(type, var, count) (var) = new type[count]
+ #define FREE_(var) delete[] (var)
+#else
+ // C-style atomic operations
+ #ifndef __cplusplus
+ typedef uint8_t bool;
+ #ifndef true
+ #define true 1
+ #endif
+ #ifndef false
+ #define false 0
+ #endif
+ #endif
+
+ #ifndef __cplusplus
+ #define thread_local /*_Thread_local*/
+ #endif
+
+ #define MOR __ATOMIC_RELAXED
+ #define ATM_GET(var) (var)
+ #define ATM_LOAD(var, val) __atomic_load(&(var), &(val), MOR)
+ #define ATM_STORE(var, val) __atomic_store(&(var), &(val), MOR)
+ #define ATM_CAS(var, exp, val) \
+ __atomic_compare_exchange(&(var), &(exp), &(val), 1, MOR, MOR)
+ #define ATM_FETCH_ADD(var, val) __atomic_fetch_add(&(var), (val), MOR)
+ #define ATM_FETCH_SUB(var, val) __atomic_fetch_sub(&(var), (val), MOR)
+ #define ALLOC_(type, var, count) \
+ (var) = (type*)calloc(count, sizeof(type))
+ #define FREE_(var) free(var)
+#endif
+
+static inline void _sl_node_init(skiplist_node *node,
+ size_t top_layer)
+{
+ if (top_layer > UINT8_MAX) top_layer = UINT8_MAX;
+
+ __SLD_ASSERT(node->is_fully_linked == false);
+ __SLD_ASSERT(node->being_modified == false);
+
+ bool bool_val = false;
+ ATM_STORE(node->is_fully_linked, bool_val);
+ ATM_STORE(node->being_modified, bool_val);
+ ATM_STORE(node->removed, bool_val);
+
+ if (node->top_layer != top_layer ||
+ node->next == NULL) {
+
+ node->top_layer = top_layer;
+
+ if (node->next) FREE_(node->next);
+ ALLOC_(atm_node_ptr, node->next, top_layer+1);
+ }
+}
+
+void skiplist_init(skiplist_raw *slist,
+ skiplist_cmp_t *cmp_func) {
+
+ slist->cmp_func = NULL;
+ slist->aux = NULL;
+
+ // fanout 4 + layer 12: 4^12 ~= upto 17M items under O(lg n) complexity.
+ // for +17M items, complexity will grow linearly: O(k lg n).
+ slist->fanout = 4;
+ slist->max_layer = 12;
+ slist->num_entries = 0;
+
+ ALLOC_(atm_uint32_t, slist->layer_entries, slist->max_layer);
+ slist->top_layer = 0;
+
+ skiplist_init_node(&slist->head);
+ skiplist_init_node(&slist->tail);
+
+ _sl_node_init(&slist->head, slist->max_layer);
+ _sl_node_init(&slist->tail, slist->max_layer);
+
+ size_t layer;
+ for (layer = 0; layer < slist->max_layer; ++layer) {
+ slist->head.next[layer] = &slist->tail;
+ slist->tail.next[layer] = NULL;
+ }
+
+ bool bool_val = true;
+ ATM_STORE(slist->head.is_fully_linked, bool_val);
+ ATM_STORE(slist->tail.is_fully_linked, bool_val);
+ slist->cmp_func = cmp_func;
+}
+
+void skiplist_free(skiplist_raw *slist)
+{
+ skiplist_free_node(&slist->head);
+ skiplist_free_node(&slist->tail);
+
+ FREE_(slist->layer_entries);
+ slist->layer_entries = NULL;
+
+ slist->aux = NULL;
+ slist->cmp_func = NULL;
+}
+
+void skiplist_init_node(skiplist_node *node)
+{
+ node->next = NULL;
+
+ bool bool_false = false;
+ ATM_STORE(node->is_fully_linked, bool_false);
+ ATM_STORE(node->being_modified, bool_false);
+ ATM_STORE(node->removed, bool_false);
+
+ node->accessing_next = 0;
+ node->top_layer = 0;
+ node->ref_count = 0;
+}
+
+void skiplist_free_node(skiplist_node *node)
+{
+ FREE_(node->next);
+ node->next = NULL;
+}
+
+size_t skiplist_get_size(skiplist_raw* slist) {
+ uint32_t val;
+ ATM_LOAD(slist->num_entries, val);
+ return val;
+}
+
+skiplist_raw_config skiplist_get_default_config()
+{
+ skiplist_raw_config ret;
+ ret.fanout = 4;
+ ret.maxLayer = 12;
+ ret.aux = NULL;
+ return ret;
+}
+
+skiplist_raw_config skiplist_get_config(skiplist_raw *slist)
+{
+ skiplist_raw_config ret;
+ ret.fanout = slist->fanout;
+ ret.maxLayer = slist->max_layer;
+ ret.aux = slist->aux;
+ return ret;
+}
+
+void skiplist_set_config(skiplist_raw *slist,
+ skiplist_raw_config config)
+{
+ slist->fanout = config.fanout;
+
+ slist->max_layer = config.maxLayer;
+ if (slist->layer_entries) FREE_(slist->layer_entries);
+ ALLOC_(atm_uint32_t, slist->layer_entries, slist->max_layer);
+
+ slist->aux = config.aux;
+}
+
+static inline int _sl_cmp(skiplist_raw *slist,
+ skiplist_node *a,
+ skiplist_node *b)
+{
+ if (a == b) return 0;
+ if (a == &slist->head || b == &slist->tail) return -1;
+ if (a == &slist->tail || b == &slist->head) return 1;
+ return slist->cmp_func(a, b, slist->aux);
+}
+
+static inline bool _sl_valid_node(skiplist_node *node) {
+ bool is_fully_linked = false;
+ ATM_LOAD(node->is_fully_linked, is_fully_linked);
+ return is_fully_linked;
+}
+
+static inline void _sl_read_lock_an(skiplist_node* node) {
+ for(;;) {
+ // Wait for active writer to release the lock
+ uint32_t accessing_next = 0;
+ ATM_LOAD(node->accessing_next, accessing_next);
+ while (accessing_next & 0xfff00000) {
+ YIELD();
+ ATM_LOAD(node->accessing_next, accessing_next);
+ }
+
+ ATM_FETCH_ADD(node->accessing_next, 0x1);
+ ATM_LOAD(node->accessing_next, accessing_next);
+ if ((accessing_next & 0xfff00000) == 0) {
+ return;
+ }
+
+ ATM_FETCH_SUB(node->accessing_next, 0x1);
+ }
+}
+
+static inline void _sl_read_unlock_an(skiplist_node* node) {
+ ATM_FETCH_SUB(node->accessing_next, 0x1);
+}
+
+static inline void _sl_write_lock_an(skiplist_node* node) {
+ for(;;) {
+ // Wait for active writer to release the lock
+ uint32_t accessing_next = 0;
+ ATM_LOAD(node->accessing_next, accessing_next);
+ while (accessing_next & 0xfff00000) {
+ YIELD();
+ ATM_LOAD(node->accessing_next, accessing_next);
+ }
+
+ ATM_FETCH_ADD(node->accessing_next, 0x100000);
+ ATM_LOAD(node->accessing_next, accessing_next);
+ if((accessing_next & 0xfff00000) == 0x100000) {
+ // Wait until there's no more readers
+ while (accessing_next & 0x000fffff) {
+ YIELD();
+ ATM_LOAD(node->accessing_next, accessing_next);
+ }
+ return;
+ }
+
+ ATM_FETCH_SUB(node->accessing_next, 0x100000);
+ }
+}
+
+static inline void _sl_write_unlock_an(skiplist_node* node) {
+ ATM_FETCH_SUB(node->accessing_next, 0x100000);
+}
+
+// Note: it increases the `ref_count` of returned node.
+// Caller is responsible to decrease it.
+static inline skiplist_node* _sl_next(skiplist_raw* slist,
+ skiplist_node* cur_node,
+ int layer,
+ skiplist_node* node_to_find,
+ bool* found)
+{
+ skiplist_node *next_node = NULL;
+
+ // Turn on `accessing_next`:
+ // now `cur_node` is not removable from skiplist,
+ // which means that `cur_node->next` will be consistent
+ // until clearing `accessing_next`.
+ _sl_read_lock_an(cur_node); {
+ if (!_sl_valid_node(cur_node)) {
+ _sl_read_unlock_an(cur_node);
+ return NULL;
+ }
+ ATM_LOAD(cur_node->next[layer], next_node);
+ // Increase ref count of `next_node`:
+ // now `next_node` is not destroyable.
+
+ // << Remaining issue >>
+ // 1) initially: A -> B
+ // 2) T1: call _sl_next(A):
+ // A.accessing_next := true;
+ // next_node := B;
+ // ----- context switch happens here -----
+ // 3) T2: insert C:
+ // A -> C -> B
+ // 4) T2: and then erase B, and free B.
+ // A -> C B(freed)
+ // ----- context switch back again -----
+ // 5) T1: try to do something with B,
+ // but crash happens.
+ //
+ // ... maybe resolved using RW spinlock (Aug 21, 2017).
+ __SLD_ASSERT(next_node);
+ ATM_FETCH_ADD(next_node->ref_count, 1);
+ __SLD_ASSERT(next_node->top_layer >= layer);
+ } _sl_read_unlock_an(cur_node);
+
+ size_t num_nodes = 0;
+ skiplist_node* nodes[256];
+
+ while ( (next_node && !_sl_valid_node(next_node)) ||
+ next_node == node_to_find ) {
+ if (found && node_to_find == next_node) *found = true;
+
+ skiplist_node* temp = next_node;
+ _sl_read_lock_an(temp); {
+ __SLD_ASSERT(next_node);
+ if (!_sl_valid_node(temp)) {
+ _sl_read_unlock_an(temp);
+ ATM_FETCH_SUB(temp->ref_count, 1);
+ next_node = NULL;
+ break;
+ }
+ ATM_LOAD(temp->next[layer], next_node);
+ ATM_FETCH_ADD(next_node->ref_count, 1);
+ nodes[num_nodes++] = temp;
+ __SLD_ASSERT(next_node->top_layer >= layer);
+ } _sl_read_unlock_an(temp);
+ }
+
+ for (size_t ii=0; iiref_count, 1);
+ }
+
+ return next_node;
+}
+
+static inline size_t _sl_decide_top_layer(skiplist_raw *slist)
+{
+ size_t layer = 0;
+ while (layer+1 < slist->max_layer) {
+ // coin filp
+ if (rand() % slist->fanout == 0) {
+ // grow: 1/fanout probability
+ layer++;
+ } else {
+ // stop: 1 - 1/fanout probability
+ break;
+ }
+ }
+ return layer;
+}
+
+static inline void _sl_clr_flags(skiplist_node** node_arr,
+ int start_layer,
+ int top_layer)
+{
+ int layer;
+ for (layer = start_layer; layer <= top_layer; ++layer) {
+ if ( layer == top_layer ||
+ node_arr[layer] != node_arr[layer+1] ) {
+
+ bool exp = true;
+ bool bool_false = false;
+ if (!ATM_CAS(node_arr[layer]->being_modified, exp, bool_false)) {
+ __SLD_ASSERT(0);
+ }
+ }
+ }
+}
+
+static inline bool _sl_valid_prev_next(skiplist_node *prev,
+ skiplist_node *next) {
+ return _sl_valid_node(prev) && _sl_valid_node(next);
+}
+
+static inline int _skiplist_insert(skiplist_raw *slist,
+ skiplist_node *node,
+ bool no_dup)
+{
+ __SLD_(
+ thread_local std::thread::id tid = std::this_thread::get_id();
+ thread_local size_t tid_hash = std::hash{}(tid) % 256;
+ (void)tid_hash;
+ )
+
+ int top_layer = _sl_decide_top_layer(slist);
+ bool bool_true = true;
+
+ // init node before insertion
+ _sl_node_init(node, top_layer);
+ _sl_write_lock_an(node);
+
+ skiplist_node* prevs[SKIPLIST_MAX_LAYER];
+ skiplist_node* nexts[SKIPLIST_MAX_LAYER];
+
+ __SLD_P("%02x ins %p begin\n", (int)tid_hash, node);
+
+insert_retry:
+ // in pure C, a label can only be part of a stmt.
+ (void)top_layer;
+
+ int cmp = 0, cur_layer = 0, layer;
+ skiplist_node *cur_node = &slist->head;
+ ATM_FETCH_ADD(cur_node->ref_count, 1);
+
+ __SLD_(size_t nh = 0);
+ __SLD_(thread_local skiplist_node* history[1024]; (void)history);
+
+ int sl_top_layer = slist->top_layer;
+ if (top_layer > sl_top_layer) sl_top_layer = top_layer;
+ for (cur_layer = sl_top_layer; cur_layer >= 0; --cur_layer) {
+ do {
+ __SLD_( history[nh++] = cur_node );
+
+ skiplist_node *next_node = _sl_next(slist, cur_node, cur_layer,
+ NULL, NULL);
+ if (!next_node) {
+ _sl_clr_flags(prevs, cur_layer+1, top_layer);
+ ATM_FETCH_SUB(cur_node->ref_count, 1);
+ YIELD();
+ goto insert_retry;
+ }
+ cmp = _sl_cmp(slist, node, next_node);
+ if (cmp > 0) {
+ // cur_node < next_node < node
+ // => move to next node
+ skiplist_node* temp = cur_node;
+ cur_node = next_node;
+ ATM_FETCH_SUB(temp->ref_count, 1);
+ continue;
+ } else {
+ // otherwise: cur_node < node <= next_node
+ ATM_FETCH_SUB(next_node->ref_count, 1);
+ }
+
+ if (no_dup && cmp == 0) {
+ // Duplicate key is not allowed.
+ _sl_clr_flags(prevs, cur_layer+1, top_layer);
+ ATM_FETCH_SUB(cur_node->ref_count, 1);
+ return -1;
+ }
+
+ if (cur_layer <= top_layer) {
+ prevs[cur_layer] = cur_node;
+ nexts[cur_layer] = next_node;
+ // both 'prev' and 'next' should be fully linked before
+ // insertion, and no other thread should not modify 'prev'
+ // at the same time.
+
+ int error_code = 0;
+ int locked_layer = cur_layer + 1;
+
+ // check if prev node is duplicated with upper layer
+ if (cur_layer < top_layer &&
+ prevs[cur_layer] == prevs[cur_layer+1]) {
+ // duplicate
+ // => which means that 'being_modified' flag is already true
+ // => do nothing
+ } else {
+ bool expected = false;
+ if (ATM_CAS(prevs[cur_layer]->being_modified,
+ expected, bool_true)) {
+ locked_layer = cur_layer;
+ } else {
+ error_code = -1;
+ }
+ }
+
+ if (error_code == 0 &&
+ !_sl_valid_prev_next(prevs[cur_layer], nexts[cur_layer])) {
+ error_code = -2;
+ }
+
+ if (error_code != 0) {
+ __SLD_RT_INS(error_code, node, top_layer, cur_layer);
+ _sl_clr_flags(prevs, locked_layer, top_layer);
+ ATM_FETCH_SUB(cur_node->ref_count, 1);
+ YIELD();
+ goto insert_retry;
+ }
+
+ // set current node's pointers
+ ATM_STORE(node->next[cur_layer], nexts[cur_layer]);
+
+ // check if `cur_node->next` has been changed from `next_node`.
+ skiplist_node* next_node_again =
+ _sl_next(slist, cur_node, cur_layer, NULL, NULL);
+ ATM_FETCH_SUB(next_node_again->ref_count, 1);
+ if (next_node_again != next_node) {
+ __SLD_NC_INS(cur_node, next_node, top_layer, cur_layer);
+ // clear including the current layer
+ // as we already set modification flag above.
+ _sl_clr_flags(prevs, cur_layer, top_layer);
+ ATM_FETCH_SUB(cur_node->ref_count, 1);
+ YIELD();
+ goto insert_retry;
+ }
+ }
+
+ if (cur_layer) {
+ // non-bottom layer => go down
+ break;
+ }
+
+ // bottom layer => insertion succeeded
+ // change prev/next nodes' prev/next pointers from 0 ~ top_layer
+ for (layer = 0; layer <= top_layer; ++layer) {
+ // `accessing_next` works as a spin-lock.
+ _sl_write_lock_an(prevs[layer]);
+ skiplist_node* exp = nexts[layer];
+ if ( !ATM_CAS(prevs[layer]->next[layer], exp, node) ) {
+ __SLD_P("%02x ASSERT ins %p[%d] -> %p (expected %p)\n",
+ (int)tid_hash, prevs[layer], cur_layer,
+ ATM_GET(prevs[layer]->next[layer]), nexts[layer] );
+ __SLD_ASSERT(0);
+ }
+ __SLD_P("%02x ins %p[%d] -> %p -> %p\n",
+ (int)tid_hash, prevs[layer], layer,
+ node, ATM_GET(node->next[layer]) );
+ _sl_write_unlock_an(prevs[layer]);
+ }
+
+ // now this node is fully linked
+ ATM_STORE(node->is_fully_linked, bool_true);
+
+ // allow removing next nodes
+ _sl_write_unlock_an(node);
+
+ __SLD_P("%02x ins %p done\n", (int)tid_hash, node);
+
+ ATM_FETCH_ADD(slist->num_entries, 1);
+ ATM_FETCH_ADD(slist->layer_entries[node->top_layer], 1);
+ for (int ii=slist->max_layer-1; ii>=0; --ii) {
+ if (slist->layer_entries[ii] > 0) {
+ slist->top_layer = ii;
+ break;
+ }
+ }
+
+ // modification is done for all layers
+ _sl_clr_flags(prevs, 0, top_layer);
+ ATM_FETCH_SUB(cur_node->ref_count, 1);
+
+ return 0;
+ } while (cur_node != &slist->tail);
+ }
+ return 0;
+}
+
+int skiplist_insert(skiplist_raw *slist,
+ skiplist_node *node)
+{
+ return _skiplist_insert(slist, node, false);
+}
+
+int skiplist_insert_nodup(skiplist_raw *slist,
+ skiplist_node *node)
+{
+ return _skiplist_insert(slist, node, true);
+}
+
+typedef enum {
+ SM = -2,
+ SMEQ = -1,
+ EQ = 0,
+ GTEQ = 1,
+ GT = 2
+} _sl_find_mode;
+
+// Note: it increases the `ref_count` of returned node.
+// Caller is responsible to decrease it.
+static inline skiplist_node* _sl_find(skiplist_raw *slist,
+ skiplist_node *query,
+ _sl_find_mode mode)
+{
+ // mode:
+ // SM -2: smaller
+ // SMEQ -1: smaller or equal
+ // EQ 0: equal
+ // GTEQ 1: greater or equal
+ // GT 2: greater
+find_retry:
+ (void)mode;
+ int cmp = 0;
+ int cur_layer = 0;
+ skiplist_node *cur_node = &slist->head;
+ ATM_FETCH_ADD(cur_node->ref_count, 1);
+
+ __SLD_(size_t nh = 0);
+ __SLD_(thread_local skiplist_node* history[1024]; (void)history);
+
+ uint8_t sl_top_layer = slist->top_layer;
+ for (cur_layer = sl_top_layer; cur_layer >= 0; --cur_layer) {
+ do {
+ __SLD_(history[nh++] = cur_node);
+
+ skiplist_node *next_node = _sl_next(slist, cur_node, cur_layer,
+ NULL, NULL);
+ if (!next_node) {
+ ATM_FETCH_SUB(cur_node->ref_count, 1);
+ YIELD();
+ goto find_retry;
+ }
+ cmp = _sl_cmp(slist, query, next_node);
+ if (cmp > 0) {
+ // cur_node < next_node < query
+ // => move to next node
+ skiplist_node* temp = cur_node;
+ cur_node = next_node;
+ ATM_FETCH_SUB(temp->ref_count, 1);
+ continue;
+ } else if (-1 <= mode && mode <= 1 && cmp == 0) {
+ // cur_node < query == next_node .. return
+ ATM_FETCH_SUB(cur_node->ref_count, 1);
+ return next_node;
+ }
+
+ // otherwise: cur_node < query < next_node
+ if (cur_layer) {
+ // non-bottom layer => go down
+ ATM_FETCH_SUB(next_node->ref_count, 1);
+ break;
+ }
+
+ // bottom layer
+ if (mode < 0 && cur_node != &slist->head) {
+ // smaller mode
+ ATM_FETCH_SUB(next_node->ref_count, 1);
+ return cur_node;
+ } else if (mode > 0 && next_node != &slist->tail) {
+ // greater mode
+ ATM_FETCH_SUB(cur_node->ref_count, 1);
+ return next_node;
+ }
+ // otherwise: exact match mode OR not found
+ ATM_FETCH_SUB(cur_node->ref_count, 1);
+ ATM_FETCH_SUB(next_node->ref_count, 1);
+ return NULL;
+ } while (cur_node != &slist->tail);
+ }
+
+ return NULL;
+}
+
+skiplist_node* skiplist_find(skiplist_raw *slist,
+ skiplist_node *query)
+{
+ return _sl_find(slist, query, EQ);
+}
+
+skiplist_node* skiplist_find_smaller_or_equal(skiplist_raw *slist,
+ skiplist_node *query)
+{
+ return _sl_find(slist, query, SMEQ);
+}
+
+skiplist_node* skiplist_find_greater_or_equal(skiplist_raw *slist,
+ skiplist_node *query)
+{
+ return _sl_find(slist, query, GTEQ);
+}
+
+int skiplist_erase_node_passive(skiplist_raw *slist,
+ skiplist_node *node)
+{
+ __SLD_(
+ thread_local std::thread::id tid = std::this_thread::get_id();
+ thread_local size_t tid_hash = std::hash{}(tid) % 256;
+ (void)tid_hash;
+ )
+
+ int top_layer = node->top_layer;
+ bool bool_true = true, bool_false = false;
+ bool removed = false;
+ bool is_fully_linked = false;
+
+ ATM_LOAD(node->removed, removed);
+ if (removed) {
+ // already removed
+ return -1;
+ }
+
+ skiplist_node* prevs[SKIPLIST_MAX_LAYER];
+ skiplist_node* nexts[SKIPLIST_MAX_LAYER];
+
+ bool expected = false;
+ if (!ATM_CAS(node->being_modified, expected, bool_true)) {
+ // already being modified .. cannot work on this node for now.
+ __SLD_BM(node);
+ return -2;
+ }
+
+ // set removed flag first, so that reader cannot read this node.
+ ATM_STORE(node->removed, bool_true);
+
+ __SLD_P("%02x rmv %p begin\n", (int)tid_hash, node);
+
+erase_node_retry:
+ ATM_LOAD(node->is_fully_linked, is_fully_linked);
+ if (!is_fully_linked) {
+ // already unlinked .. remove is done by other thread
+ ATM_STORE(node->removed, bool_false);
+ ATM_STORE(node->being_modified, bool_false);
+ return -3;
+ }
+
+ int cmp = 0;
+ bool found_node_to_erase = false;
+ (void)found_node_to_erase;
+ skiplist_node *cur_node = &slist->head;
+ ATM_FETCH_ADD(cur_node->ref_count, 1);
+
+ __SLD_(size_t nh = 0);
+ __SLD_(thread_local skiplist_node* history[1024]; (void)history);
+
+ int cur_layer = slist->top_layer;
+ for (; cur_layer >= 0; --cur_layer) {
+ do {
+ __SLD_( history[nh++] = cur_node );
+
+ bool node_found = false;
+ skiplist_node *next_node = _sl_next(slist, cur_node, cur_layer,
+ node, &node_found);
+ if (!next_node) {
+ _sl_clr_flags(prevs, cur_layer+1, top_layer);
+ ATM_FETCH_SUB(cur_node->ref_count, 1);
+ YIELD();
+ goto erase_node_retry;
+ }
+
+ // Note: unlike insert(), we should find exact position of `node`.
+ cmp = _sl_cmp(slist, node, next_node);
+ if (cmp > 0 || (cur_layer <= top_layer && !node_found) ) {
+ // cur_node <= next_node < node
+ // => move to next node
+ skiplist_node* temp = cur_node;
+ cur_node = next_node;
+ __SLD_( if (cmp > 0) {
+ int cmp2 = _sl_cmp(slist, cur_node, node);
+ if (cmp2 > 0) {
+ // node < cur_node <= next_node: not found.
+ _sl_clr_flags(prevs, cur_layer+1, top_layer);
+ ATM_FETCH_SUB(temp->ref_count, 1);
+ ATM_FETCH_SUB(next_node->ref_count, 1);
+ __SLD_ASSERT(0);
+ }
+ } )
+ ATM_FETCH_SUB(temp->ref_count, 1);
+ continue;
+ } else {
+ // otherwise: cur_node <= node <= next_node
+ ATM_FETCH_SUB(next_node->ref_count, 1);
+ }
+
+ if (cur_layer <= top_layer) {
+ prevs[cur_layer] = cur_node;
+ // note: 'next_node' and 'node' should not be the same,
+ // as 'removed' flag is already set.
+ __SLD_ASSERT(next_node != node);
+ nexts[cur_layer] = next_node;
+
+ // check if prev node duplicates with upper layer
+ int error_code = 0;
+ int locked_layer = cur_layer + 1;
+ if (cur_layer < top_layer &&
+ prevs[cur_layer] == prevs[cur_layer+1]) {
+ // duplicate with upper layer
+ // => which means that 'being_modified' flag is already true
+ // => do nothing.
+ } else {
+ expected = false;
+ if (ATM_CAS(prevs[cur_layer]->being_modified,
+ expected, bool_true)) {
+ locked_layer = cur_layer;
+ } else {
+ error_code = -1;
+ }
+ }
+
+ if (error_code == 0 &&
+ !_sl_valid_prev_next(prevs[cur_layer], nexts[cur_layer])) {
+ error_code = -2;
+ }
+
+ if (error_code != 0) {
+ __SLD_RT_RMV(error_code, node, top_layer, cur_layer);
+ _sl_clr_flags(prevs, locked_layer, top_layer);
+ ATM_FETCH_SUB(cur_node->ref_count, 1);
+ YIELD();
+ goto erase_node_retry;
+ }
+
+ skiplist_node* next_node_again =
+ _sl_next(slist, cur_node, cur_layer, node, NULL);
+ ATM_FETCH_SUB(next_node_again->ref_count, 1);
+ if (next_node_again != nexts[cur_layer]) {
+ // `next` pointer has been changed, retry.
+ __SLD_NC_RMV(cur_node, nexts[cur_layer], top_layer, cur_layer);
+ _sl_clr_flags(prevs, cur_layer, top_layer);
+ ATM_FETCH_SUB(cur_node->ref_count, 1);
+ YIELD();
+ goto erase_node_retry;
+ }
+ }
+ if (cur_layer == 0) found_node_to_erase = true;
+ // go down
+ break;
+ } while (cur_node != &slist->tail);
+ }
+ // Not exist in the skiplist, should not happen.
+ __SLD_ASSERT(found_node_to_erase);
+ // bottom layer => removal succeeded.
+ // mark this node unlinked
+ _sl_write_lock_an(node); {
+ ATM_STORE(node->is_fully_linked, bool_false);
+ } _sl_write_unlock_an(node);
+
+ // change prev nodes' next pointer from 0 ~ top_layer
+ for (cur_layer = 0; cur_layer <= top_layer; ++cur_layer) {
+ _sl_write_lock_an(prevs[cur_layer]);
+ skiplist_node* exp = node;
+ __SLD_ASSERT(exp != nexts[cur_layer]);
+ __SLD_ASSERT(nexts[cur_layer]->is_fully_linked);
+ if ( !ATM_CAS(prevs[cur_layer]->next[cur_layer],
+ exp, nexts[cur_layer]) ) {
+ __SLD_P("%02x ASSERT rmv %p[%d] -> %p (node %p)\n",
+ (int)tid_hash, prevs[cur_layer], cur_layer,
+ ATM_GET(prevs[cur_layer]->next[cur_layer]), node );
+ __SLD_ASSERT(0);
+ }
+ __SLD_ASSERT(nexts[cur_layer]->top_layer >= cur_layer);
+ __SLD_P("%02x rmv %p[%d] -> %p (node %p)\n",
+ (int)tid_hash, prevs[cur_layer], cur_layer,
+ nexts[cur_layer], node);
+ _sl_write_unlock_an(prevs[cur_layer]);
+ }
+
+ __SLD_P("%02x rmv %p done\n", (int)tid_hash, node);
+
+ ATM_FETCH_SUB(slist->num_entries, 1);
+ ATM_FETCH_SUB(slist->layer_entries[node->top_layer], 1);
+ for (int ii=slist->max_layer-1; ii>=0; --ii) {
+ if (slist->layer_entries[ii] > 0) {
+ slist->top_layer = ii;
+ break;
+ }
+ }
+
+ // modification is done for all layers
+ _sl_clr_flags(prevs, 0, top_layer);
+ ATM_FETCH_SUB(cur_node->ref_count, 1);
+
+ ATM_STORE(node->being_modified, bool_false);
+
+ return 0;
+}
+
+int skiplist_erase_node(skiplist_raw *slist,
+ skiplist_node *node)
+{
+ int ret = 0;
+ do {
+ ret = skiplist_erase_node_passive(slist, node);
+ // if ret == -2, other thread is accessing the same node
+ // at the same time. try again.
+ } while (ret == -2);
+ return ret;
+}
+
+int skiplist_erase(skiplist_raw *slist,
+ skiplist_node *query)
+{
+ skiplist_node *found = skiplist_find(slist, query);
+ if (!found) {
+ // key not found
+ return -4;
+ }
+
+ int ret = 0;
+ do {
+ ret = skiplist_erase_node_passive(slist, found);
+ // if ret == -2, other thread is accessing the same node
+ // at the same time. try again.
+ } while (ret == -2);
+
+ ATM_FETCH_SUB(found->ref_count, 1);
+ return ret;
+}
+
+int skiplist_is_valid_node(skiplist_node* node) {
+ return _sl_valid_node(node);
+}
+
+int skiplist_is_safe_to_free(skiplist_node* node) {
+ if (node->accessing_next) return 0;
+ if (node->being_modified) return 0;
+ if (!node->removed) return 0;
+
+ uint16_t ref_count = 0;
+ ATM_LOAD(node->ref_count, ref_count);
+ if (ref_count) return 0;
+ return 1;
+}
+
+void skiplist_wait_for_free(skiplist_node* node) {
+ while (!skiplist_is_safe_to_free(node)) {
+ YIELD();
+ }
+}
+
+void skiplist_grab_node(skiplist_node* node) {
+ ATM_FETCH_ADD(node->ref_count, 1);
+}
+
+void skiplist_release_node(skiplist_node* node) {
+ __SLD_ASSERT(node->ref_count);
+ ATM_FETCH_SUB(node->ref_count, 1);
+}
+
+skiplist_node* skiplist_next(skiplist_raw *slist,
+ skiplist_node *node) {
+ // << Issue >>
+ // If `node` is already removed and its next node is also removed
+ // and then released, the link update will not be applied to `node`
+ // as it is already unrechable from skiplist. `node` still points to
+ // the released node so that `_sl_next(node)` may return corrupted
+ // memory region.
+ //
+ // 0) initial:
+ // A -> B -> C -> D
+ //
+ // 1) B is `node`, which is removed but not yet released:
+ // B --+-> C -> D
+ // |
+ // A --+
+ //
+ // 2) remove C, and then release:
+ // B -> !C! +-> D
+ // |
+ // A --------+
+ //
+ // 3) skiplist_next(B):
+ // will fetch C, which is already released so that
+ // may contain garbage data.
+ //
+ // In this case, start over from the top layer,
+ // to find valid link (same as in prev()).
+
+ skiplist_node *next = _sl_next(slist, node, 0, NULL, NULL);
+ if (!next) next = _sl_find(slist, node, GT);
+
+ if (next == &slist->tail) return NULL;
+ return next;
+}
+
+skiplist_node* skiplist_prev(skiplist_raw *slist,
+ skiplist_node *node) {
+ skiplist_node *prev = _sl_find(slist, node, SM);
+ if (prev == &slist->head) return NULL;
+ return prev;
+}
+
+skiplist_node* skiplist_begin(skiplist_raw *slist) {
+ skiplist_node *next = NULL;
+ while (!next) {
+ next = _sl_next(slist, &slist->head, 0, NULL, NULL);
+ }
+ if (next == &slist->tail) return NULL;
+ return next;
+}
+
+skiplist_node* skiplist_end(skiplist_raw *slist) {
+ return skiplist_prev(slist, &slist->tail);
+}
+
diff --git a/engine/skiplist.go b/engine/skiplist.go
new file mode 100644
index 00000000..2bb72b54
--- /dev/null
+++ b/engine/skiplist.go
@@ -0,0 +1,87 @@
+package engine
+
+//#cgo CXXFLAGS: -std=c++11
+// #include
+// #include "skiplist.h"
+// #include "kv_skiplist.h"
+import "C"
+import (
+ "sync"
+ "sync/atomic"
+ "unsafe"
+)
+
+// byteToChar returns *C.char from byte slice.
+func byteToChar(b []byte) *C.char {
+ var c *C.char
+ if len(b) > 0 {
+ c = (*C.char)(unsafe.Pointer(&b[0]))
+ }
+ return c
+}
+
+type skipList struct {
+ csl *C.skiplist_raw
+ wl sync.Mutex
+ closed int32
+}
+
+func NewSkipList() *skipList {
+ return &skipList{
+ csl: C.kv_skiplist_create(),
+ }
+}
+
+func (sl *skipList) Destroy() {
+ atomic.StoreInt32(&sl.closed, 1)
+ C.kv_skiplist_destroy(sl.csl)
+}
+
+func (sl *skipList) IsClosed() bool {
+ return atomic.LoadInt32(&sl.closed) == 1
+}
+
+func (sl *skipList) Len() int64 {
+ cs := C.skiplist_get_size(sl.csl)
+ return int64(cs)
+}
+
+func (sl *skipList) NewIterator() *SkipListIterator {
+ // the mutex RUnlock must be called while iterator is closed
+ return &SkipListIterator{
+ sl: sl,
+ cursor: nil,
+ }
+}
+
+func (sl *skipList) Get(key []byte) ([]byte, error) {
+ var (
+ cvsz C.size_t
+ cKey = byteToChar(key)
+ )
+ cv := C.kv_skiplist_get(sl.csl, cKey, C.size_t(len(key)), &cvsz)
+ if cv == nil {
+ return nil, nil
+ }
+ defer C.free(unsafe.Pointer(cv))
+ return C.GoBytes(unsafe.Pointer(cv), C.int(cvsz)), nil
+}
+
+func (sl *skipList) Set(key []byte, value []byte) error {
+ var (
+ cKey = byteToChar(key)
+ cValue = byteToChar(value)
+ )
+ sl.wl.Lock()
+ defer sl.wl.Unlock()
+ C.kv_skiplist_update(sl.csl, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)))
+ return nil
+}
+
+func (sl *skipList) Delete(key []byte) error {
+ sl.wl.Lock()
+ defer sl.wl.Unlock()
+ cKey := byteToChar(key)
+ C.kv_skiplist_del(sl.csl, cKey, C.size_t(len(key)))
+ return nil
+}
diff --git a/engine/skiplist.h b/engine/skiplist.h
new file mode 100644
index 00000000..15f5fb7c
--- /dev/null
+++ b/engine/skiplist.h
@@ -0,0 +1,144 @@
+/************************************************************************
+Modifications Copyright 2017-2019 eBay Inc.
+
+Original Copyright 2017 Jung-Sang Ahn
+See URL: https://github.com/greensky00/skiplist
+ (v0.2.9)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+**************************************************************************/
+
+#ifndef _JSAHN_SKIPLIST_H
+#define _JSAHN_SKIPLIST_H (1)
+
+#include
+#include
+
+#define SKIPLIST_MAX_LAYER (64)
+
+struct _skiplist_node;
+
+//#define _STL_ATOMIC (1)
+#ifdef __APPLE__
+ #define _STL_ATOMIC (1)
+#endif
+#if defined(_STL_ATOMIC) && defined(__cplusplus)
+ #include
+ typedef std::atomic<_skiplist_node*> atm_node_ptr;
+ typedef std::atomic atm_bool;
+ typedef std::atomic atm_uint8_t;
+ typedef std::atomic atm_uint16_t;
+ typedef std::atomic atm_uint32_t;
+#else
+ typedef struct _skiplist_node* atm_node_ptr;
+ typedef uint8_t atm_bool;
+ typedef uint8_t atm_uint8_t;
+ typedef uint16_t atm_uint16_t;
+ typedef uint32_t atm_uint32_t;
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct _skiplist_node {
+ atm_node_ptr *next;
+ atm_bool is_fully_linked;
+ atm_bool being_modified;
+ atm_bool removed;
+ uint8_t top_layer; // 0: bottom
+ atm_uint16_t ref_count;
+ atm_uint32_t accessing_next;
+} skiplist_node;
+
+// *a < *b : return neg
+// *a == *b : return 0
+// *a > *b : return pos
+typedef int skiplist_cmp_t(skiplist_node *a, skiplist_node *b, void *aux);
+
+typedef struct {
+ size_t fanout;
+ size_t maxLayer;
+ void *aux;
+} skiplist_raw_config;
+
+typedef struct {
+ skiplist_node head;
+ skiplist_node tail;
+ skiplist_cmp_t *cmp_func;
+ void *aux;
+ atm_uint32_t num_entries;
+ atm_uint32_t* layer_entries;
+ atm_uint8_t top_layer;
+ uint8_t fanout;
+ uint8_t max_layer;
+} skiplist_raw;
+
+#ifndef _get_entry
+#define _get_entry(ELEM, STRUCT, MEMBER) \
+ ((STRUCT *) ((uint8_t *) (ELEM) - offsetof (STRUCT, MEMBER)))
+#endif
+
+void skiplist_init(skiplist_raw* slist,
+ skiplist_cmp_t* cmp_func);
+void skiplist_free(skiplist_raw* slist);
+
+void skiplist_init_node(skiplist_node* node);
+void skiplist_free_node(skiplist_node* node);
+
+size_t skiplist_get_size(skiplist_raw* slist);
+
+skiplist_raw_config skiplist_get_default_config();
+skiplist_raw_config skiplist_get_config(skiplist_raw* slist);
+
+void skiplist_set_config(skiplist_raw* slist,
+ skiplist_raw_config config);
+
+int skiplist_insert(skiplist_raw* slist,
+ skiplist_node* node);
+int skiplist_insert_nodup(skiplist_raw *slist,
+ skiplist_node *node);
+
+skiplist_node* skiplist_find(skiplist_raw* slist,
+ skiplist_node* query);
+skiplist_node* skiplist_find_smaller_or_equal(skiplist_raw* slist,
+ skiplist_node* query);
+skiplist_node* skiplist_find_greater_or_equal(skiplist_raw* slist,
+ skiplist_node* query);
+
+int skiplist_erase_node_passive(skiplist_raw* slist,
+ skiplist_node* node);
+int skiplist_erase_node(skiplist_raw *slist,
+ skiplist_node *node);
+int skiplist_erase(skiplist_raw* slist,
+ skiplist_node* query);
+
+int skiplist_is_valid_node(skiplist_node* node);
+int skiplist_is_safe_to_free(skiplist_node* node);
+void skiplist_wait_for_free(skiplist_node* node);
+
+void skiplist_grab_node(skiplist_node* node);
+void skiplist_release_node(skiplist_node* node);
+
+skiplist_node* skiplist_next(skiplist_raw* slist,
+ skiplist_node* node);
+skiplist_node* skiplist_prev(skiplist_raw* slist,
+ skiplist_node* node);
+skiplist_node* skiplist_begin(skiplist_raw* slist);
+skiplist_node* skiplist_end(skiplist_raw* slist);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // _JSAHN_SKIPLIST_H
diff --git a/engine/skiplist_iterator.go b/engine/skiplist_iterator.go
new file mode 100644
index 00000000..604a3c31
--- /dev/null
+++ b/engine/skiplist_iterator.go
@@ -0,0 +1,105 @@
+package engine
+
+// #include
+// #include "kv_skiplist.h"
+import "C"
+import "unsafe"
+
+type SkipListIterator struct {
+ sl *skipList
+ cursor *C.skiplist_node
+}
+
+// Valid returns false only when an Iterator has iterated past either the
+// first or the last key in the database.
+func (iter *SkipListIterator) Valid() bool {
+ if iter.cursor == nil {
+ return false
+ }
+ return C.skiplist_is_valid_node(iter.cursor) != 0
+}
+
+// Key returns the key the iterator currently holds.
+func (iter *SkipListIterator) Key() []byte {
+ var cLen C.size_t
+ cKey := C.kv_skiplist_get_node_key(iter.cursor, &cLen)
+ if cKey == nil {
+ return nil
+ }
+ defer C.free(unsafe.Pointer(cKey))
+ return C.GoBytes(unsafe.Pointer(cKey), C.int(cLen))
+}
+
+// Value returns the value in the database the iterator currently holds.
+func (iter *SkipListIterator) Value() []byte {
+ var cLen C.size_t
+ cVal := C.kv_skiplist_get_node_value(iter.cursor, &cLen)
+ if cVal == nil {
+ return nil
+ }
+ defer C.free(unsafe.Pointer(cVal))
+ return C.GoBytes(unsafe.Pointer(cVal), C.int(cLen))
+}
+
+// Next moves the iterator to the next sequential key in the database.
+func (iter *SkipListIterator) Next() {
+ old := iter.cursor
+ iter.cursor = C.skiplist_next(iter.sl.csl, iter.cursor)
+ if old != nil {
+ C.skiplist_release_node(old)
+ }
+}
+
+// Prev moves the iterator to the previous sequential key in the database.
+func (iter *SkipListIterator) Prev() {
+ old := iter.cursor
+ iter.cursor = C.skiplist_prev(iter.sl.csl, iter.cursor)
+ if old != nil {
+ C.skiplist_release_node(old)
+ }
+}
+
+// SeekToFirst moves the iterator to the first key in the database.
+func (iter *SkipListIterator) First() {
+ if iter.cursor != nil {
+ C.skiplist_release_node(iter.cursor)
+ }
+ iter.cursor = C.skiplist_begin(iter.sl.csl)
+}
+
+// SeekToLast moves the iterator to the last key in the database.
+func (iter *SkipListIterator) Last() {
+ if iter.cursor != nil {
+ C.skiplist_release_node(iter.cursor)
+ }
+ iter.cursor = C.skiplist_end(iter.sl.csl)
+}
+
+// Seek moves the iterator to the position greater than or equal to the key.
+func (iter *SkipListIterator) Seek(key []byte) {
+ if iter.cursor != nil {
+ C.skiplist_release_node(iter.cursor)
+ }
+ cKey := byteToChar(key)
+ iter.cursor = C.kv_skiplist_find_ge(iter.sl.csl, cKey, C.size_t(len(key)))
+}
+
+// seek to the last key that less than or equal to the target key
+// while enable prefix_extractor, use seek() and prev() doesn't work if seek to the end
+// of the prefix range. use this seekforprev instead
+func (iter *SkipListIterator) SeekForPrev(key []byte) {
+ if iter.cursor != nil {
+ C.skiplist_release_node(iter.cursor)
+ }
+ cKey := byteToChar(key)
+ iter.cursor = C.kv_skiplist_find_le(iter.sl.csl, cKey, C.size_t(len(key)))
+}
+
+// Close closes the iterator.
+func (iter *SkipListIterator) Close() {
+ if iter.cursor == nil {
+ return
+ }
+ C.skiplist_release_node(iter.cursor)
+ iter.cursor = nil
+}
diff --git a/engine/skiplist_test.go b/engine/skiplist_test.go
new file mode 100644
index 00000000..898481db
--- /dev/null
+++ b/engine/skiplist_test.go
@@ -0,0 +1,153 @@
+package engine
+
+import (
+ "strconv"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestSkipListOp(t *testing.T) {
+ sl := NewSkipList()
+ defer sl.Destroy()
+ key := []byte("test")
+ value := key
+ v, err := sl.Get(key)
+ assert.Nil(t, err)
+ assert.Nil(t, v)
+ sl.Set(key, value)
+ n := sl.Len()
+ assert.Equal(t, int64(1), n)
+ v, err = sl.Get(key)
+ assert.Nil(t, err)
+ assert.Equal(t, key, v)
+
+ v[0] = 'd'
+ sl.Set(key, v)
+ v2, err := sl.Get(key)
+ assert.Nil(t, err)
+ assert.Equal(t, v, v2)
+ v2[0] = 'e'
+ sl.Set(key, v2)
+ v3, err := sl.Get(key)
+ assert.Nil(t, err)
+ assert.Equal(t, v2, v3)
+
+ sl.Delete(key)
+ v, err = sl.Get(key)
+ assert.Nil(t, err)
+ assert.Nil(t, v)
+}
+
+func TestSkipListIterator(t *testing.T) {
+ sl := NewSkipList()
+ defer sl.Destroy()
+ key := []byte("test")
+ sl.Set(key, key)
+ key2 := []byte("test2")
+ sl.Set(key2, key2)
+ key3 := []byte("test3")
+ sl.Set(key3, key3)
+ key4 := []byte("test4")
+ sl.Set(key4, key4)
+ n := sl.Len()
+ assert.Equal(t, int64(4), n)
+ it := sl.NewIterator()
+ defer it.Close()
+ it.Seek(key3)
+ assert.True(t, it.Valid())
+ assert.Equal(t, key3, it.Key())
+ assert.Equal(t, key3, it.Value())
+ it.Seek([]byte("test1"))
+ assert.True(t, it.Valid())
+ assert.Equal(t, key2, it.Key())
+ assert.Equal(t, key2, it.Value())
+
+ it.First()
+ assert.True(t, it.Valid())
+ assert.Equal(t, key, it.Key())
+ assert.Equal(t, key, it.Value())
+ it.Next()
+ assert.True(t, it.Valid())
+ assert.Equal(t, key2, it.Key())
+ assert.Equal(t, key2, it.Value())
+ it.Next()
+ assert.True(t, it.Valid())
+ assert.Equal(t, key3, it.Key())
+ assert.Equal(t, key3, it.Value())
+ it.Prev()
+ assert.True(t, it.Valid())
+ assert.Equal(t, key2, it.Key())
+ assert.Equal(t, key2, it.Value())
+ it.Last()
+ assert.True(t, it.Valid())
+ assert.Equal(t, key4, it.Key())
+ assert.Equal(t, key4, it.Value())
+ it.Prev()
+ assert.True(t, it.Valid())
+ if !it.Valid() {
+ return
+ }
+ assert.Equal(t, key3, it.Key())
+ assert.Equal(t, key3, it.Value())
+ it.SeekForPrev(key3)
+ assert.True(t, it.Valid())
+ assert.Equal(t, key3, it.Key())
+ assert.Equal(t, key3, it.Value())
+ it.SeekForPrev([]byte("test1"))
+ assert.True(t, it.Valid())
+ assert.Equal(t, key, it.Key())
+ assert.Equal(t, key, it.Value())
+ it.Prev()
+ assert.True(t, !it.Valid())
+}
+
+func TestSkipListIteratorAll(t *testing.T) {
+ sl := NewSkipList()
+ defer sl.Destroy()
+ key := []byte("0")
+ sl.Set(key, key)
+ key1 := []byte("1")
+ sl.Set(key1, key1)
+ key2 := []byte("2")
+ sl.Set(key2, key2)
+ key3 := []byte("3")
+ sl.Set(key3, key3)
+ n := sl.Len()
+ assert.Equal(t, int64(4), n)
+ it := sl.NewIterator()
+ defer it.Close()
+ it.First()
+ cnt := 0
+ for ; it.Valid(); it.Next() {
+ assert.Equal(t, strconv.Itoa(cnt), string(it.Key()))
+ assert.Equal(t, strconv.Itoa(cnt), string(it.Value()))
+ cnt++
+ }
+ assert.Equal(t, n, int64(cnt))
+}
+
+func TestSkipListReverseIteratorAll(t *testing.T) {
+ sl := NewSkipList()
+ defer sl.Destroy()
+ key := []byte("0")
+ sl.Set(key, key)
+ key1 := []byte("1")
+ sl.Set(key1, key1)
+ key2 := []byte("2")
+ sl.Set(key2, key2)
+ key3 := []byte("3")
+ sl.Set(key3, key3)
+ n := sl.Len()
+ assert.Equal(t, int64(4), n)
+ it := sl.NewIterator()
+ defer it.Close()
+ it.Last()
+ cnt := 0
+ for ; it.Valid(); it.Prev() {
+ assert.Equal(t, strconv.Itoa(int(n)-cnt-1), string(it.Key()))
+ assert.Equal(t, strconv.Itoa(int(n)-cnt-1), string(it.Value()))
+ cnt++
+ }
+ assert.Equal(t, n, int64(cnt))
+}
diff --git a/engine/writebatch.go b/engine/writebatch.go
new file mode 100644
index 00000000..9073419f
--- /dev/null
+++ b/engine/writebatch.go
@@ -0,0 +1,111 @@
+package engine
+
+import (
+ "errors"
+
+ "github.com/cockroachdb/pebble"
+ "github.com/youzan/gorocksdb"
+)
+
+type WriteBatch interface {
+ Destroy()
+ Clear()
+ DeleteRange(start, end []byte)
+ Delete(key []byte)
+ Put(key []byte, value []byte)
+ Merge(key []byte, value []byte)
+ Commit() error
+}
+
+type rocksWriteBatch struct {
+ wb *gorocksdb.WriteBatch
+ wo *gorocksdb.WriteOptions
+ db *gorocksdb.DB
+}
+
+func newRocksWriteBatch(db *gorocksdb.DB, wo *gorocksdb.WriteOptions) *rocksWriteBatch {
+ return &rocksWriteBatch{
+ wb: gorocksdb.NewWriteBatch(),
+ wo: wo,
+ db: db,
+ }
+}
+
+func (wb *rocksWriteBatch) Destroy() {
+ wb.wb.Destroy()
+}
+
+func (wb *rocksWriteBatch) Clear() {
+ wb.wb.Clear()
+}
+
+func (wb *rocksWriteBatch) DeleteRange(start, end []byte) {
+ wb.wb.DeleteRange(start, end)
+}
+
+func (wb *rocksWriteBatch) Delete(key []byte) {
+ wb.wb.Delete(key)
+}
+
+func (wb *rocksWriteBatch) Put(key []byte, value []byte) {
+ wb.wb.Put(key, value)
+}
+
+func (wb *rocksWriteBatch) Merge(key []byte, value []byte) {
+ wb.wb.Merge(key, value)
+}
+
+func (wb *rocksWriteBatch) Commit() error {
+ if wb.db == nil || wb.wo == nil {
+ return errors.New("nil db or options")
+ }
+ return wb.db.Write(wb.wo, wb.wb)
+}
+
+type pebbleWriteBatch struct {
+ wb *pebble.Batch
+ wo *pebble.WriteOptions
+ db *pebble.DB
+}
+
+func newPebbleWriteBatch(db *pebble.DB, wo *pebble.WriteOptions) *pebbleWriteBatch {
+ return &pebbleWriteBatch{
+ wb: db.NewBatch(),
+ wo: wo,
+ db: db,
+ }
+}
+
+func (wb *pebbleWriteBatch) Destroy() {
+ wb.wb.Close()
+}
+
+func (wb *pebbleWriteBatch) Clear() {
+ wb.wb.Close()
+ wb.wb = wb.db.NewBatch()
+ // TODO: reuse it
+ //wb.wb.Reset()
+}
+
+func (wb *pebbleWriteBatch) DeleteRange(start, end []byte) {
+ wb.wb.DeleteRange(start, end, wb.wo)
+}
+
+func (wb *pebbleWriteBatch) Delete(key []byte) {
+ wb.wb.Delete(key, wb.wo)
+}
+
+func (wb *pebbleWriteBatch) Put(key []byte, value []byte) {
+ wb.wb.Set(key, value, wb.wo)
+}
+
+func (wb *pebbleWriteBatch) Merge(key []byte, value []byte) {
+ wb.wb.Merge(key, value, wb.wo)
+}
+
+func (wb *pebbleWriteBatch) Commit() error {
+ if wb.db == nil || wb.wo == nil {
+ return errors.New("nil db or options")
+ }
+ return wb.db.Apply(wb.wb, wb.wo)
+}
diff --git a/go.mod b/go.mod
new file mode 100644
index 00000000..cc9f7ee1
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,59 @@
+module github.com/youzan/ZanRedisDB
+
+go 1.13
+
+require (
+ github.com/BurntSushi/toml v0.3.1
+ github.com/Jeffail/gabs v1.4.0 // indirect
+ github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
+ github.com/absolute8511/glog v0.3.1
+ github.com/absolute8511/go-hll v0.0.0-20190228064837-043118556d83
+ github.com/absolute8511/hyperloglog v0.0.0-20171127080255-5259284545fc
+ github.com/absolute8511/hyperloglog2 v0.1.1
+ github.com/absolute8511/redcon v0.9.3
+ github.com/absolute8511/redigo v1.4.6
+ github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 // indirect
+ github.com/alicebob/miniredis v2.5.0+incompatible // indirect
+ github.com/cockroachdb/pebble v0.0.0-20200616214509-8de6baeca713
+ github.com/coreos/etcd v3.1.15+incompatible
+ github.com/coreos/go-semver v0.2.0
+ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 // indirect
+ github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea
+ github.com/dgryski/go-bits v0.0.0-20180113010104-bd8a69a71dc2 // indirect
+ github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect
+ github.com/emirpasic/gods v1.12.0
+ github.com/go-ole/go-ole v1.2.1 // indirect
+ github.com/gobwas/glob v0.2.3
+ github.com/gogo/protobuf v1.3.1
+ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect
+ github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf
+ github.com/gomodule/redigo v2.0.0+incompatible // indirect
+ github.com/hashicorp/go-immutable-radix v1.3.0
+ github.com/hashicorp/golang-lru v0.5.4
+ github.com/judwhite/go-svc v1.0.0
+ github.com/julienschmidt/httprouter v1.2.0
+ github.com/mailru/easyjson v0.7.0 // indirect
+ github.com/mreiferson/go-options v0.0.0-20161229190002-77551d20752b
+ github.com/pquerna/ffjson v0.0.0-20190930134022-aa0246cd15f7 // indirect
+ github.com/prometheus/client_golang v1.3.0
+ github.com/prometheus/client_model v0.1.0
+ github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7
+ github.com/siddontang/goredis v0.0.0-20180423163523-0b4019cbd7b7
+ github.com/stretchr/testify v1.4.0
+ github.com/tidwall/gjson v1.1.0
+ github.com/tidwall/match v1.0.1 // indirect
+ github.com/tidwall/sjson v1.0.0
+ github.com/twmb/murmur3 v1.1.5
+ github.com/ugorji/go v0.0.0-20170107133203-ded73eae5db7 // indirect
+ github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18
+ github.com/youzan/go-zanredisdb v0.6.3
+ github.com/youzan/gorocksdb v0.0.0-20201201080653-1a9b5c65c962
+ github.com/yuin/gopher-lua v0.0.0-20190514113301-1cd887cd7036 // indirect
+ go.uber.org/zap v1.16.0
+ golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553
+ google.golang.org/genproto v0.0.0-20180518175338-11a468237815 // indirect
+ google.golang.org/grpc v1.9.2
+ gopkg.in/natefinch/lumberjack.v2 v2.0.0
+)
+
+replace github.com/hashicorp/go-immutable-radix v1.3.0 => github.com/absolute8511/go-immutable-radix v1.3.1-0.20210225131658-3dcbbb786587
diff --git a/go.sum b/go.sum
new file mode 100644
index 00000000..d162b98c
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,274 @@
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/Jeffail/gabs v1.4.0 h1://5fYRRTq1edjfIrQGvdkcd22pkYUrHZ5YC/H2GJVAo=
+github.com/Jeffail/gabs v1.4.0/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc=
+github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
+github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
+github.com/absolute8511/glog v0.3.1 h1:oTuACTGSouUxi7CWuya5azGQ9s7WnsAxrAxAZPKevRk=
+github.com/absolute8511/glog v0.3.1/go.mod h1:T44AH/EtS/Grr3Ujq6U0Lhk1y4960FC9i06pxXblZ2I=
+github.com/absolute8511/go-hll v0.0.0-20190228064837-043118556d83 h1:+pnNO4ZB8LTIUuOSw+n//LPHp1y6etaPxKclS4nCbIY=
+github.com/absolute8511/go-hll v0.0.0-20190228064837-043118556d83/go.mod h1:QMvi1rdckY14QqBq5tccirZ1v/aDGOt2cS1bjccZLnU=
+github.com/absolute8511/go-immutable-radix v1.3.1-0.20210225131658-3dcbbb786587 h1:CPf6PGw3dRaudxu9jYB7LWsXk1VC8JJcCkLSjiYYhpw=
+github.com/absolute8511/go-immutable-radix v1.3.1-0.20210225131658-3dcbbb786587/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/absolute8511/hyperloglog v0.0.0-20171127080255-5259284545fc h1:i/ihfp9MP/tpc9SP8sLPof1On9INZcMWrUbylsTbkb0=
+github.com/absolute8511/hyperloglog v0.0.0-20171127080255-5259284545fc/go.mod h1:AGYMD7u0pQKOyShXxW8RTje9BgU+2bXruQPL6NDSAZE=
+github.com/absolute8511/hyperloglog2 v0.1.1 h1:ZFAXGBcLsbXTtfMVqOJkaJe0Z1GNaeNFEtt3LQVB2GY=
+github.com/absolute8511/hyperloglog2 v0.1.1/go.mod h1:4ZpNQu/xhWhgXqUKLPaqJlbcyBvKsqbVNsxYcv/cqVo=
+github.com/absolute8511/redcon v0.9.3 h1:na5mBQ1OcLxhIP9omakpeWH1/d52UdJmPlTksQEjI0E=
+github.com/absolute8511/redcon v0.9.3/go.mod h1:pcezn7cyNCl6Bbics6NRddlZw1GwLqpNFapwKI14N4A=
+github.com/absolute8511/redigo v1.4.6 h1:22Zgx7rKi/veL2xs7PEOvQS5fy2E5BBW6yRhId9WsTo=
+github.com/absolute8511/redigo v1.4.6/go.mod h1:ncnpDtZTl7oMnWRkyfo8hfwC8+u3HlzK3XEzJ7W/M08=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 h1:45bxf7AZMwWcqkLzDAQugVEwedisr5nRJ1r+7LYnv0U=
+github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
+github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI=
+github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894 h1:JLaf/iINcLyjwbtTsCJjc6rtlASgHeIJPrB6QmwURnA=
+github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
+github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs=
+github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
+github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY=
+github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
+github.com/cockroachdb/pebble v0.0.0-20200616214509-8de6baeca713 h1:SK0BUmdsXF7Ammqw+AmaHOlzFgkmtyDM/WRCTwDpS6I=
+github.com/cockroachdb/pebble v0.0.0-20200616214509-8de6baeca713/go.mod h1:crLnbSFbwAcQNs9FPfI1avHb5BqVgqZcr4r+IzpJ5FM=
+github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
+github.com/coreos/etcd v3.1.15+incompatible h1:vGoGbT5fUqm0BHb+BKOtdu0rGF3vzDyEv7zbWU3FY+w=
+github.com/coreos/etcd v3.1.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 h1:u9SHYsPQNyt5tgDm3YN7+9dYrpK96E5wFilTFWIDZOM=
+github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea h1:n2Ltr3SrfQlf/9nOna1DoGKxLx3qTSI8Ttl6Xrqp6mw=
+github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgryski/go-bits v0.0.0-20180113010104-bd8a69a71dc2 h1:2+yip7nN/auel0PDwY7SIaTOxQPI2NwdkZkvpgtc3Pk=
+github.com/dgryski/go-bits v0.0.0-20180113010104-bd8a69a71dc2/go.mod h1:/9UYwwvZuEgp+mQ4960SHWCU1FS+FgdFX+m5ExFByNs=
+github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc h1:8WFBn63wegobsYAX0YjD+8suexZDga5CctH4CCTx2+8=
+github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw=
+github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=
+github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
+github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ=
+github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64=
+github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A=
+github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg=
+github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 h1:E2s37DuLxFhQDg5gKsWoLBOB0n+ZW8s599zru8FJ2/Y=
+github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=
+github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs=
+github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
+github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E=
+github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
+github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
+github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf h1:gFVkHXmVAhEbxZVDln5V9GKrLaluNoFHDbrZwAWZgws=
+github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0=
+github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
+github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/judwhite/go-svc v1.0.0 h1:W447kYhZsqC14hkfNG8XLy9wbYibeMW75g5DtAIpFGw=
+github.com/judwhite/go-svc v1.0.0/go.mod h1:EeMSAFO3mLgEQfcvnZ50JDG0O1uQlagpAbMS6talrXE=
+github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=
+github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
+github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mreiferson/go-options v0.0.0-20161229190002-77551d20752b h1:xjKomx939vefURtocD1uaKvcvAp1dNYX05i0TIpnfVI=
+github.com/mreiferson/go-options v0.0.0-20161229190002-77551d20752b/go.mod h1:A0JOgZNsj9V+npbgxH0Ib75PvrHS6Ezri/4HdcTp/DI=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pquerna/ffjson v0.0.0-20190930134022-aa0246cd15f7 h1:xoIK0ctDddBMnc74udxJYBqlo9Ylnsp1waqjLsnef20=
+github.com/pquerna/ffjson v0.0.0-20190930134022-aa0246cd15f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.3.0 h1:miYCvYqFXtl/J9FIy8eNpBfYthAEFg+Ys0XyUVEcDsc=
+github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.1.0 h1:ElTg5tNp4DqfV7UQjDqv2+RJlNzsDtvNAWccbItceIE=
+github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY=
+github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7 h1:80VN+vGkqM773Br/uNNTSheo3KatTgV8IpjIKjvVLng=
+github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
+github.com/siddontang/goredis v0.0.0-20180423163523-0b4019cbd7b7 h1:du8SEcI377y8J+zBs7CMOrCZTplYNTJx9ePUIN0/Cfw=
+github.com/siddontang/goredis v0.0.0-20180423163523-0b4019cbd7b7/go.mod h1:DDcKzU3qCuvj/tPnimWSsZZzvk9qvkvrIL5naVBPh5s=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/tidwall/gjson v1.1.0 h1:/7OBSUzFP8NhuzLlHg0vETJrRL02C++0ql5uSY3DITs=
+github.com/tidwall/gjson v1.1.0/go.mod h1:c/nTNbUr0E0OrXEhq1pwa8iEgc2DOt4ZZqAt1HtCkPA=
+github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc=
+github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
+github.com/tidwall/sjson v1.0.0 h1:hOrzQPtGKlKAudQVmU43GkxEgG8TOgKyiKUyb7sE0rs=
+github.com/tidwall/sjson v1.0.0/go.mod h1:bURseu1nuBkFpIES5cz6zBtjmYeOQmEESshn7VpF15Y=
+github.com/twmb/murmur3 v1.1.5 h1:i9OLS9fkuLzBXjt6dptlAEyk58fJsSTXbRg3SgVyqgk=
+github.com/twmb/murmur3 v1.1.5/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
+github.com/ugorji/go v0.0.0-20170107133203-ded73eae5db7 h1:BPPUhSq7uU6E9lFzyb81vjwVOhiWwMXp0EpKL75NX+8=
+github.com/ugorji/go v0.0.0-20170107133203-ded73eae5db7/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
+github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18 h1:MPPkRncZLN9Kh4MEFmbnK4h3BD7AUmskWv2+EeZJCCs=
+github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/youzan/go-zanredisdb v0.6.3 h1:68jFQk0FNWs6E8dbAF3rWnOGBEHyOagbX1fNS45YZ1w=
+github.com/youzan/go-zanredisdb v0.6.3/go.mod h1:P0v1euhkNAHXWvtP4wvIHqRhvLrfdwSMbVqfdgExMJM=
+github.com/youzan/gorocksdb v0.0.0-20201201080653-1a9b5c65c962 h1:RY0MvaEHer3MjdpAkGXNbIypGkfL5BF3ekzAThOF7Q4=
+github.com/youzan/gorocksdb v0.0.0-20201201080653-1a9b5c65c962/go.mod h1:Ztkk3nnoEJnQDM1iSS/2oNOj1qCZIUWJHY2lHL5YRJg=
+github.com/yuin/gopher-lua v0.0.0-20190514113301-1cd887cd7036 h1:1b6PAtenNyhsmo/NKXVe34h7JEZKva1YB/ne7K7mqKM=
+github.com/yuin/gopher-lua v0.0.0-20190514113301-1cd887cd7036/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
+go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
+go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
+go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
+go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM=
+go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20200513190911-00229845015e h1:rMqLP+9XLy+LdbCXHjJHAmTfXCr93W7oruWA6Hq1Alc=
+golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b h1:GgiSbuUyC0BlbUmHQBgFqu32eiRR/CEYdjOjOd4zE6Y=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190204203706-41f3e6584952 h1:FDfvYgoVsA7TTZSbgiqjAbfPbK47CNHdWl3h/PJtii0=
+golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191220142924-d4481acd189f h1:68K/z8GLUxV76xGSqwTWw2gyk/jwn79LUL43rES2g8o=
+golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa h1:5E4dL8+NgFOgjwbTKz+OOEGGhP+ectTmF842l6KjupQ=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/genproto v0.0.0-20180518175338-11a468237815 h1:p3qKkjcSW6m32Lr1CInA3jW53vG29/JB6QOvQWie5WI=
+google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/grpc v1.9.2 h1:roJ2Fad4PmV4LRO8LF7CFuMU23BAliEqJHQXv2BW+ng=
+google.golang.org/grpc v1.9.2/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
diff --git a/internal/flume_log/client.go b/internal/flume_log/client.go
new file mode 100644
index 00000000..16f07d27
--- /dev/null
+++ b/internal/flume_log/client.go
@@ -0,0 +1,75 @@
+package flume_log
+
+import (
+ "errors"
+)
+
+const (
+ typeInfo = "info"
+ typeWarn = "warn"
+ typeDebug = "debug"
+ typeError = "error"
+)
+
+// RemoteLogClient describes the remote log client
+type RemoteLogClient struct {
+ AppName string
+ LogIndex string
+ ServerAddress string
+ client *FlumeClient
+}
+
+// NewClient returns a RemoteLogClient
+func NewClient(address string, appName string, logIndex string) *RemoteLogClient {
+ c := NewFlumeClient(address)
+ return &RemoteLogClient{
+ AppName: appName,
+ LogIndex: logIndex,
+ ServerAddress: address,
+ client: c,
+ }
+}
+
+// Info uses info-level
+func (rlc *RemoteLogClient) Info(msg string, extra interface{}) error {
+ return rlc.log(typeInfo, msg, extra)
+}
+
+// Warn uses warn-level
+func (rlc *RemoteLogClient) Warn(msg string, extra interface{}) error {
+ return rlc.log(typeWarn, msg, extra)
+}
+
+// Debug uses debug-level
+func (rlc *RemoteLogClient) Debug(msg string, extra interface{}) error {
+ return rlc.log(typeDebug, msg, extra)
+}
+
+// Error uses error-level
+func (rlc *RemoteLogClient) Error(msg string, extra interface{}) error {
+ return rlc.log(typeError, msg, extra)
+}
+
+func (rlc *RemoteLogClient) log(level string, msg string, extra interface{}) error {
+ logindex := rlc.LogIndex
+ logItem := NewLogItem(level, rlc.AppName, logindex)
+ logItem.SetTag(msg)
+
+ if extra != nil {
+ detail := make(map[string]interface{})
+ detail["extra"] = []interface{}{extra}
+ logItem.SetDetail(detail)
+ }
+
+ if rlc.client != nil {
+ return rlc.client.SendLog(logItem.Bytes())
+ }
+ return errors.New("no client")
+}
+
+// Stop stops all conns
+func (rlc *RemoteLogClient) Stop() {
+ if rlc.client != nil {
+ rlc.client.Stop()
+ }
+}
diff --git a/internal/flume_log/client_test.go b/internal/flume_log/client_test.go
new file mode 100644
index 00000000..94810b88
--- /dev/null
+++ b/internal/flume_log/client_test.go
@@ -0,0 +1,33 @@
+package flume_log
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestClient_Info(t *testing.T) {
+ client := NewClient("127.0.0.1:5140", "zankv", "zankv_slowlog")
+ detail := make(map[string]interface{})
+
+ detail["Scope"] = "youzan:test"
+ detail["Key"] = "testkey"
+ detail["Note"] = "note_info"
+ err := client.Info("测试 Info", detail)
+ assert.Nil(t, err)
+
+ detail["Note"] = "note_warn"
+ err = client.Warn("测试 Warn", detail)
+ assert.Nil(t, err)
+
+ detail["Note"] = "note_debug"
+ err = client.Debug("测试 Debug", detail)
+ assert.Nil(t, err)
+
+ detail["Note"] = "note_err"
+ err = client.Error("测试 error", detail)
+ assert.Nil(t, err)
+ // wait remote log flush
+ time.Sleep(time.Second * 10)
+}
diff --git a/internal/flume_log/flume_client.go b/internal/flume_log/flume_client.go
new file mode 100644
index 00000000..5962de0b
--- /dev/null
+++ b/internal/flume_log/flume_client.go
@@ -0,0 +1,134 @@
+package flume_log
+
+import (
+ "bufio"
+ "errors"
+ "log"
+ "net"
+ "time"
+)
+
+type FlumeClient struct {
+ remoteAddr string
+ conn net.Conn
+ bw *bufio.Writer
+ stopChan chan bool
+ bufList chan []byte
+ loopDone chan bool
+}
+
+func NewFlumeClient(agentIP string) *FlumeClient {
+ client := &FlumeClient{
+ bufList: make(chan []byte, 1000),
+ stopChan: make(chan bool),
+ remoteAddr: agentIP,
+ loopDone: make(chan bool),
+ }
+
+ go client.writeLoop()
+ return client
+}
+
+func readLoop(conn net.Conn) {
+ buf := make([]byte, 1024)
+ for {
+ _, err := conn.Read(buf)
+ if err != nil {
+ conn.Close()
+ return
+ }
+ }
+}
+
+func (c *FlumeClient) writeLoop() {
+ defer func() {
+ select {
+ case buf := <-c.bufList:
+ _, err := c.bw.Write(buf)
+ if err != nil {
+ log.Printf("write log %v failed: %v, left data: %v", string(buf), err, len(c.bufList))
+ break
+ }
+ default:
+ }
+ if c.conn != nil {
+ c.bw.Flush()
+ c.conn.Close()
+ c.conn = nil
+ }
+ close(c.loopDone)
+ }()
+ c.reconnect()
+ ticker := time.NewTicker(time.Second * 3)
+ for {
+ if c.conn == nil {
+ err := c.reconnect()
+ if err != nil {
+ select {
+ case <-time.After(time.Second):
+ case <-c.stopChan:
+ return
+ }
+ continue
+ }
+ }
+
+ select {
+ case buf := <-c.bufList:
+ _, err := c.bw.Write(buf)
+ if err != nil {
+ log.Printf("write log %v failed: %v", string(buf), err)
+ c.reconnect()
+ }
+ case <-ticker.C:
+ err := c.bw.Flush()
+ if err != nil {
+ log.Printf("flush write log failed: %v", err)
+ c.reconnect()
+ }
+ case <-c.stopChan:
+ return
+ }
+
+ }
+}
+
+func (c *FlumeClient) SendLog(d []byte) error {
+ select {
+ case <-c.stopChan:
+ return errors.New("flume client stopped")
+ case c.bufList <- d:
+ default:
+ return errors.New("flume client buffer overflowed")
+ }
+ return nil
+}
+
+func (c *FlumeClient) reconnect() (err error) {
+ if c.conn != nil {
+ c.bw.Flush()
+ c.conn.Close()
+ c.conn = nil
+ c.bw = nil
+ }
+ // log.Printf("reconnect flumelogger to %v ", c.remoteAddr)
+ conn, err := net.DialTimeout("tcp", c.remoteAddr, time.Second*5)
+ if err != nil {
+ log.Printf("connect to %v failed: %v", c.remoteAddr, err)
+ return err
+ } else {
+ c.conn = conn
+ c.bw = bufio.NewWriterSize(conn, 1024*8)
+ go readLoop(conn)
+ }
+
+ return nil
+}
+
+// Stop closes conn
+func (c *FlumeClient) Stop() {
+ if c.stopChan != nil {
+ close(c.stopChan)
+ }
+ <-c.loopDone
+}
diff --git a/internal/flume_log/log_item.go b/internal/flume_log/log_item.go
new file mode 100644
index 00000000..fb86746d
--- /dev/null
+++ b/internal/flume_log/log_item.go
@@ -0,0 +1,98 @@
+package flume_log
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "net"
+ "os"
+ "runtime"
+ "time"
+)
+
+var (
+ hostName string
+ internalIp string
+ platform string
+ pid int
+)
+
+func init() {
+ hostName, _ = os.Hostname()
+ ips, _ := net.LookupHost(hostName)
+
+ for _, ip := range ips {
+ if ip != "127.0.0.1" {
+ internalIp = ip
+ break
+ }
+ }
+ platform = runtime.Version()
+ pid = os.Getpid()
+}
+
+func NewLogItem(level string, appName string, logIndex string) *LogItem {
+ return &LogItem{
+ LogHeader{
+ level,
+ appName,
+ logIndex,
+ },
+ LogBody{
+ App: appName,
+ Level: level,
+ Module: logIndex,
+ Platform: platform,
+ },
+ }
+}
+
+type LogItem struct {
+ Header LogHeader
+ Body LogBody
+}
+
+func (self *LogItem) SetDetail(detail map[string]interface{}) {
+ self.Body.Detail = detail
+}
+
+func (self *LogItem) SetTag(tag string) {
+ self.Body.Tag = tag
+}
+
+func (self *LogItem) Bytes() []byte {
+ var tmpBuf bytes.Buffer
+ tmpBuf.WriteString(self.Header.String())
+ tmpBuf.WriteString(" ")
+ tmpBuf.Write(self.Body.Bytes())
+ tmpBuf.WriteString("\n")
+ return tmpBuf.Bytes()
+}
+
+type LogHeader struct {
+ Level string
+ AppName string
+ LogIndex string
+}
+
+func (header *LogHeader) String() string {
+ return fmt.Sprintf(
+ "<158>%s %s/%s %s[%d]: topic=log.%s.%s",
+ time.Now().Format("2006-01-02 15:04:05"), hostName, internalIp, header.Level, pid,
+ header.AppName, header.LogIndex,
+ )
+}
+
+type LogBody struct {
+ App string `json:"app"`
+ Level string `json:"level"`
+ Module string `json:"module"`
+ Tag string `json:"tag"`
+ Detail map[string]interface{} `json:"detail,omitempty"`
+ Platform string `json:"platform"`
+}
+
+func (body *LogBody) Bytes() []byte {
+ jsonb, _ := json.Marshal(body)
+ return jsonb
+}
diff --git a/jenkins-ci-deploy.sh b/jenkins-ci-deploy.sh
index 384d8383..04976d16 100755
--- a/jenkins-ci-deploy.sh
+++ b/jenkins-ci-deploy.sh
@@ -7,25 +7,8 @@
#sudo yum install devtoolset-3-gcc devtoolset-3-gcc-c++ devtoolset-3-gdb
echo `pwd`
-GoDep=`go env GOPATH`/src/golang.org/x
-mkdir -p $GoDep
-if [ ! -d "$GoDep/net" ]; then
- pushd $GoDep && git clone https://github.com/golang/net.git && popd
-fi
-if [ ! -d "$GoDep/sys" ]; then
- pushd $GoDep && git clone https://github.com/golang/sys.git && popd
-fi
-googleDep=`go env GOPATH`/src/google.golang.org
-mkdir -p $googleDep
-if [ ! -d "$googleDep/grpc" ]; then
- pushd $googleDep && git clone https://github.com/grpc/grpc-go.git grpc && popd
-fi
-if [ ! -d "$googleDep/genproto" ]; then
- pushd $googleDep && git https://github.com/google/go-genproto clone genproto && popd
-fi
-
-go get -d github.com/absolute8511/ZanRedisDB/...
+go get -d github.com/youzan/ZanRedisDB/...
arch=$(go env GOARCH)
os=$(go env GOOS)
goversion=$(go version | awk '{print $3}')
@@ -35,30 +18,29 @@ etcdurl=$ETCD_URL
scl enable devtoolset-3 bash
-rocksdb=`pwd`/rocksdb
+rocksdb=$(pwd)/rocksdb
if [ ! -f "$rocksdb/Makefile" ]; then
rm -rf $rocksdb
git clone https://github.com/absolute8511/rocksdb.git $rocksdb
fi
pushd $rocksdb
git pull
-git checkout v5.8.8-share-rate-limiter
-CC=/opt/rh/devtoolset-3/root/usr/bin/gcc CXX=/opt/rh/devtoolset-3/root/usr/bin/g++ LD=/opt/rh/devtoolset-3/root/usr/bin/ld USE_SSE=1 make static_lib
+git checkout v6.4.6-patched
+PORTABLE=1 WITH_JEMALLOC_FLAG=1 JEMALLOC=1 make static_lib
popd
-LD=/opt/rh/devtoolset-3/root/usr/bin/ld CGO_CFLAGS="-I$rocksdb/include" CGO_LDFLAGS="-L/opt/rh/devtoolset-3/root/usr/lib/gcc/x86_64-redhat-linux/4.9.2 -L$rocksdb -lrocksdb -lstdc++ -lm -lsnappy -lrt -lz -lbz2" go get -u github.com/absolute8511/gorocksdb
-
-wget -c https://raw.githubusercontent.com/pote/gpm/v1.4.0/bin/gpm && chmod +x gpm
-export PATH=`pwd`:$PATH
+export PATH=$(pwd):$PATH
-echo `pwd`
-pushd `go env GOPATH`/src/github.com/absolute8511/ZanRedisDB/
+echo $(pwd)
+pushd $(go env GOPATH)/src/github.com/youzan/ZanRedisDB/
git pull
./pre-dist.sh || true
-./dist.sh
+## we also use gpm in ci because some dep can not be pulled since gfw.
+DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+ROCKSDB=$rocksdb ./dist.sh
popd
-if [ ! -f "`pwd`/etcd-v2.3.8-linux-amd64/etcd" ] && [ -z "$etcdurl" ]; then
+if [ ! -f "$(pwd)/etcd-v2.3.8-linux-amd64/etcd" ] && [ -z "$etcdurl" ]; then
rm -rf etcd-v2.3.8-linux-amd64
wget -c https://github.com/coreos/etcd/releases/download/v2.3.8/etcd-v2.3.8-linux-amd64.tar.gz
tar -xvzf etcd-v2.3.8-linux-amd64.tar.gz
@@ -71,7 +53,7 @@ fi
echo $etcdurl
echo $ETCD_URL
-cp -fp `go env GOPATH`/src/github.com/absolute8511/ZanRedisDB/dist/$LATEST.tar.gz .
+cp -fp $(go env GOPATH)/src/github.com/youzan/ZanRedisDB/dist/$LATEST.tar.gz .
killall zankv || true
killall placedriver || true
killall etcd || true
diff --git a/metric/collheap.go b/metric/collheap.go
new file mode 100644
index 00000000..608dcee5
--- /dev/null
+++ b/metric/collheap.go
@@ -0,0 +1,124 @@
+package metric
+
+import (
+ "container/heap"
+ "sort"
+ "sync"
+)
+
+const (
+ minCollSizeInHeap = 32
+ DefaultHeapCapacity = 100
+)
+
+// An Item is something we manage in a priority queue.
+type Item struct {
+ value []byte // The value of the item; arbitrary.
+ priority int // The priority of the item in the queue.
+ // The index is needed by update and is maintained by the heap.Interface methods.
+ index int // The index of the item in the heap.
+}
+
+// A PriorityQueue implements heap.Interface and holds Items.
+type PriorityQueue []*Item
+
+func (pq PriorityQueue) Len() int { return len(pq) }
+
+func (pq PriorityQueue) Less(i, j int) bool {
+ return pq[i].priority < pq[j].priority
+}
+
+func (pq PriorityQueue) Swap(i, j int) {
+ pq[i], pq[j] = pq[j], pq[i]
+ pq[i].index = i
+ pq[j].index = j
+}
+
+func (pq *PriorityQueue) Push(x interface{}) {
+ n := len(*pq)
+ item := x.(*Item)
+ item.index = n
+ *pq = append(*pq, item)
+}
+
+func (pq *PriorityQueue) Pop() interface{} {
+ old := *pq
+ n := len(old)
+ item := old[n-1]
+ old[n-1] = nil // avoid memory leak
+ item.index = -1 // for safety
+ *pq = old[0 : n-1]
+ return item
+}
+
+// update modifies the priority and value of an Item in the queue.
+func (pq *PriorityQueue) update(item *Item, priority int) {
+ item.priority = priority
+ heap.Fix(pq, item.index)
+}
+
+type CollSizeHeap struct {
+ l sync.Mutex
+ pq PriorityQueue
+ items map[string]*Item
+ capacity int
+}
+
+func NewCollSizeHeap(cap int) *CollSizeHeap {
+ q := make(PriorityQueue, 0)
+ heap.Init(&q)
+ return &CollSizeHeap{
+ pq: q,
+ items: make(map[string]*Item),
+ capacity: cap,
+ }
+}
+
+func (csh *CollSizeHeap) Update(key []byte, collSize int) {
+ csh.l.Lock()
+ defer csh.l.Unlock()
+ item, ok := csh.items[string(key)]
+ if ok {
+ if collSize < minCollSizeInHeap {
+ heap.Remove(&csh.pq, item.index)
+ csh.items[string(key)] = nil
+ delete(csh.items, string(key))
+ } else {
+ csh.pq.update(item, collSize)
+ }
+ } else {
+ if collSize < minCollSizeInHeap {
+ return
+ }
+ item = &Item{
+ value: key,
+ priority: collSize,
+ }
+ heap.Push(&csh.pq, item)
+ csh.items[string(key)] = item
+ if csh.pq.Len() > csh.capacity {
+ old := heap.Pop(&csh.pq)
+ if old != nil {
+ oldItem := old.(*Item)
+ csh.items[string(oldItem.value)] = nil
+ delete(csh.items, string(oldItem.value))
+ }
+ }
+ }
+}
+
+func (csh *CollSizeHeap) keys() topnList {
+ csh.l.Lock()
+ defer csh.l.Unlock()
+ keys := make(topnList, 0, len(csh.items))
+ for _, item := range csh.items {
+ keys = append(keys, TopNInfo{Key: string(item.value), Cnt: int32(item.priority)})
+ }
+ return keys
+}
+
+func (csh *CollSizeHeap) TopKeys() []TopNInfo {
+ keys := csh.keys()
+ sort.Sort(keys)
+ return keys
+}
diff --git a/metric/collheap_test.go b/metric/collheap_test.go
new file mode 100644
index 00000000..7a5412ce
--- /dev/null
+++ b/metric/collheap_test.go
@@ -0,0 +1,29 @@
+package metric
+
+import (
+ "strconv"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestCollSizeHeap(t *testing.T) {
+ h := NewCollSizeHeap(DefaultHeapCapacity)
+ for i := 0; i < minCollSizeInHeap*2; i++ {
+ k := []byte(strconv.Itoa(i))
+ h.Update(k, i)
+ }
+ assert.Equal(t, minCollSizeInHeap, len(h.TopKeys()))
+ keys := h.TopKeys()
+ assert.True(t, keys[0].Cnt <= keys[1].Cnt)
+ assert.True(t, keys[len(keys)-2].Cnt <= keys[len(keys)-1].Cnt)
+ assert.Equal(t, int32(minCollSizeInHeap*2-1), keys[len(keys)-1].Cnt)
+ assert.Equal(t, strconv.Itoa(int(keys[len(keys)-1].Cnt)), keys[len(keys)-1].Key)
+
+ oldCnt := len(keys)
+ h.Update([]byte(keys[len(keys)-1].Key), minCollSizeInHeap-1)
+ // should be removed since under minCollSizeInHeap
+ keys = h.TopKeys()
+ assert.Equal(t, int32(minCollSizeInHeap*2-1-1), keys[len(keys)-1].Cnt)
+ assert.Equal(t, oldCnt-1, len(keys))
+}
diff --git a/metric/prom.go b/metric/prom.go
new file mode 100644
index 00000000..e3ddbbbf
--- /dev/null
+++ b/metric/prom.go
@@ -0,0 +1,99 @@
+package metric
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+)
+
+var (
+ // unit is ms
+ ClusterWriteLatency = promauto.NewHistogramVec(prometheus.HistogramOpts{
+ Name: "cluster_write_latency",
+ Help: "cluster write request latency",
+ Buckets: prometheus.ExponentialBuckets(1, 2, 14),
+ }, []string{"namespace"})
+ // unit is ms
+ DBWriteLatency = promauto.NewHistogramVec(prometheus.HistogramOpts{
+ Name: "db_write_latency",
+ Help: "db write request latency",
+ Buckets: prometheus.ExponentialBuckets(1, 2, 14),
+ }, []string{"namespace"})
+ // unit is ms
+ RaftWriteLatency = promauto.NewHistogramVec(prometheus.HistogramOpts{
+ Name: "raft_write_latency",
+ Help: "raft write request latency",
+ Buckets: prometheus.ExponentialBuckets(1, 2, 14),
+ }, []string{"namespace", "step"})
+
+ WriteByteSize = promauto.NewHistogramVec(prometheus.HistogramOpts{
+ Name: "write_byte_size",
+ Help: "write request byte size",
+ Buckets: prometheus.ExponentialBuckets(128, 2, 12),
+ }, []string{"namespace"})
+
+ SlowWrite100msCnt = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "slow_write_100ms_cnt",
+ Help: "slow 100ms counter for slow write command",
+ }, []string{"table", "cmd"})
+ SlowWrite50msCnt = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "slow_write_50ms_cnt",
+ Help: "slow 50ms counter for slow write command",
+ }, []string{"table", "cmd"})
+ SlowWrite10msCnt = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "slow_write_10ms_cnt",
+ Help: "slow 10ms counter for slow write command",
+ }, []string{"table", "cmd"})
+ SlowLimiterRefusedCnt = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "slow_limiter_refused_cnt",
+ Help: "slow limiter refused counter for slow write command",
+ }, []string{"table", "cmd"})
+ SlowLimiterQueuedCnt = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "slow_limiter_queued_cnt",
+ Help: "queued total counter for slow wait queue",
+ }, []string{"table", "cmd", "slow_level"})
+ SlowLimiterQueuedCost = promauto.NewHistogramVec(prometheus.HistogramOpts{
+ Name: "slow_limiter_queued_cost",
+ Help: "slow limiter queued cost distribution in slow wait queue",
+ Buckets: prometheus.ExponentialBuckets(1, 2, 14),
+ }, []string{"namespace", "table", "cmd"})
+
+ QueueLen = promauto.NewGaugeVec(prometheus.GaugeOpts{
+ Name: "queue_len",
+ Help: "queue length for all kinds of queue like raft proposal/transport/apply",
+ }, []string{"namespace", "queue_name"})
+
+ ErrorCnt = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "error_cnt",
+ Help: "error counter for some useful kinds of internal error",
+ }, []string{"namespace", "error_info"})
+
+ EventCnt = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "event_cnt",
+ Help: "the important event counter for internal event",
+ }, []string{"namespace", "event_name"})
+
+ TableKeyNum = promauto.NewGaugeVec(prometheus.GaugeOpts{
+ Name: "table_key_num",
+ Help: "the key number stats for each table",
+ }, []string{"table", "group"})
+
+ TableDiskUsage = promauto.NewGaugeVec(prometheus.GaugeOpts{
+ Name: "table_disk_usage",
+ Help: "the disk usage stats for each table",
+ }, []string{"table", "group"})
+
+ ReadCmdCounter = promauto.NewCounter(prometheus.CounterOpts{
+ Name: "read_cmd_total",
+ Help: "redis read command total counter",
+ })
+ WriteCmdCounter = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "write_cmd_total",
+ Help: "redis write command total counter",
+ }, []string{"namespace"})
+
+ CollectionLenDist = promauto.NewHistogramVec(prometheus.HistogramOpts{
+ Name: "collection_length_dist",
+ Help: "the length distribute for the large collections",
+ Buckets: prometheus.ExponentialBuckets(128, 2, 12),
+ }, []string{"table"})
+)
diff --git a/common/stats.go b/metric/stats.go
similarity index 89%
rename from common/stats.go
rename to metric/stats.go
index 6e9d3e03..c236a980 100644
--- a/common/stats.go
+++ b/metric/stats.go
@@ -1,4 +1,4 @@
-package common
+package metric
import (
"math"
@@ -73,14 +73,23 @@ type TableStats struct {
ApproximateKeyNum int64 `json:"approximate_key_num"`
}
+type CompactFilterStats struct {
+ ExpiredCleanCnt int64 `json:"expired_clean_cnt,omitempty"`
+ VersionCleanCnt int64 `json:"version_clean_cnt,omitempty"`
+ DelCleanCnt int64 `json:"del_clean_cnt,omitempty"`
+}
+
type NamespaceStats struct {
Name string `json:"name"`
TStats []TableStats `json:"table_stats"`
DBWriteStats *WriteStats `json:"db_write_stats"`
ClusterWriteStats *WriteStats `json:"cluster_write_stats"`
InternalStats map[string]interface{} `json:"internal_stats"`
+ DBCompactStats CompactFilterStats `json:"db_compact_stats,omitempty"`
EngType string `json:"eng_type"`
IsLeader bool `json:"is_leader"`
+ TopNWriteKeys []TopNInfo `json:"top_n_write_keys,omitempty"`
+ TopNLargeCollKeys []TopNInfo `json:"top_n_large_coll_keys,omitempty"`
}
type LogSyncStats struct {
diff --git a/metric/topn.go b/metric/topn.go
new file mode 100644
index 00000000..c0387a65
--- /dev/null
+++ b/metric/topn.go
@@ -0,0 +1,190 @@
+package metric
+
+// slow write logs
+// large keys
+// large collections
+// large deletion
+// large read/scan
+// use (LRU) for topn hot keys, large keys and large collections
+
+import (
+ "sort"
+ "sync/atomic"
+
+ lru "github.com/hashicorp/golang-lru"
+ "github.com/twmb/murmur3"
+)
+
+type HKeyInfo struct {
+ //Value []byte
+ Cnt int32
+ //InitTime time.Time
+}
+
+func (hki *HKeyInfo) Inc() {
+ atomic.AddInt32(&hki.Cnt, 1)
+}
+
+const (
+ defaultTopnBucketSize = 2
+ maxTopnInBucket = 16
+)
+
+var TopnHotKeys = NewTopNHot()
+
+type topNBucket struct {
+ hotWriteKeys *lru.ARCCache
+ sampleCnt int64
+}
+
+func newTopNBucket() *topNBucket {
+ l, err := lru.NewARC(maxTopnInBucket)
+ if err != nil {
+ panic(err)
+ }
+ return &topNBucket{
+ hotWriteKeys: l,
+ }
+}
+
+func handleTopnHit(hotKeys *lru.ARCCache, k []byte) *HKeyInfo {
+ item, ok := hotKeys.Get(string(k))
+ var hki *HKeyInfo
+ if ok {
+ hki = item.(*HKeyInfo)
+ hki.Inc()
+ } else {
+ // if concurrent add, just ignore will be ok
+ hki = &HKeyInfo{
+ Cnt: 1,
+ //InitTime: time.Now(),
+ }
+ hotKeys.Add(string(k), hki)
+ }
+ return hki
+}
+
+func (b *topNBucket) write(k []byte) {
+ c := atomic.AddInt64(&b.sampleCnt, 1)
+ if c%3 != 0 {
+ return
+ }
+ handleTopnHit(b.hotWriteKeys, k)
+}
+
+func (b *topNBucket) read(k []byte) {
+ // currently read no need
+ return
+}
+
+func (b *topNBucket) Clear() {
+ b.hotWriteKeys.Purge()
+}
+
+func (b *topNBucket) Keys() []interface{} {
+ return b.hotWriteKeys.Keys()
+}
+
+func (b *topNBucket) Peek(key interface{}) *HKeyInfo {
+ v, ok := b.hotWriteKeys.Peek(key)
+ if !ok {
+ return nil
+ }
+ item, ok := v.(*HKeyInfo)
+ if !ok {
+ return nil
+ }
+ return item
+}
+
+type TopNHot struct {
+ hotKeys [defaultTopnBucketSize]*topNBucket
+ enabled int32
+}
+
+func NewTopNHot() *TopNHot {
+ top := &TopNHot{
+ enabled: 1,
+ }
+ for i := 0; i < len(top.hotKeys); i++ {
+ top.hotKeys[i] = newTopNBucket()
+ }
+ return top
+}
+
+// Clear will clear all history lru data. Period reset can make sure
+// some new data can be refreshed to lru
+func (tnh *TopNHot) Clear() {
+ for _, b := range tnh.hotKeys {
+ b.Clear()
+ }
+}
+
+func (tnh *TopNHot) isEnabled() bool {
+ return atomic.LoadInt32(&tnh.enabled) > 0
+}
+
+func (tnh *TopNHot) Enable(on bool) {
+ if on {
+ atomic.StoreInt32(&tnh.enabled, 1)
+ } else {
+ atomic.StoreInt32(&tnh.enabled, 0)
+ }
+}
+
+func (tnh *TopNHot) getBucket(k []byte) (*topNBucket, uint64) {
+ hk := murmur3.Sum64(k)
+ bi := hk % uint64(len(tnh.hotKeys))
+ b := tnh.hotKeys[bi]
+ return b, bi
+}
+
+func (tnh *TopNHot) HitWrite(k []byte) {
+ if !tnh.isEnabled() {
+ return
+ }
+ if len(k) == 0 {
+ return
+ }
+ b, _ := tnh.getBucket(k)
+ b.write(k)
+}
+
+type TopNInfo struct {
+ Key string
+ Cnt int32
+}
+
+type topnList []TopNInfo
+
+func (t topnList) Len() int {
+ return len(t)
+}
+func (t topnList) Swap(i, j int) {
+ t[i], t[j] = t[j], t[i]
+}
+func (t topnList) Less(i, j int) bool {
+ if t[i].Cnt == t[j].Cnt {
+ return t[i].Key < t[j].Key
+ }
+ return t[i].Cnt < t[j].Cnt
+}
+
+func (tnh *TopNHot) GetTopNWrites() []TopNInfo {
+ if !tnh.isEnabled() {
+ return nil
+ }
+ hks := make(topnList, 0, len(tnh.hotKeys))
+ for _, b := range tnh.hotKeys {
+ keys := b.Keys()
+ for _, key := range keys {
+ v := b.Peek(key)
+ if v == nil {
+ continue
+ }
+ hks = append(hks, TopNInfo{Key: key.(string), Cnt: atomic.LoadInt32(&v.Cnt)})
+ }
+ }
+ sort.Sort(hks)
+ return hks
+}
diff --git a/metric/topn_test.go b/metric/topn_test.go
new file mode 100644
index 00000000..6456d83d
--- /dev/null
+++ b/metric/topn_test.go
@@ -0,0 +1,43 @@
+package metric
+
+import (
+ "strconv"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestTopNWrite(t *testing.T) {
+ topn := NewTopNHot()
+
+ for i := 0; i < defaultTopnBucketSize*2; i++ {
+ k := []byte(strconv.Itoa(i))
+ topn.HitWrite(k)
+ }
+ t.Logf("%v", topn.GetTopNWrites())
+ hk := []byte(strconv.Itoa(1))
+ topn.HitWrite(hk)
+ topn.HitWrite(hk)
+ topn.HitWrite(hk)
+ t.Logf("%v", topn.GetTopNWrites())
+ assert.Equal(t, defaultTopnBucketSize, len(topn.GetTopNWrites()))
+
+ for i := 0; i < defaultTopnBucketSize*2; i++ {
+ k := []byte(strconv.Itoa(i))
+ topn.HitWrite(k)
+ }
+ for l := 0; l < 100; l++ {
+ topn.HitWrite(hk)
+ topn.HitWrite(hk)
+ topn.HitWrite(hk)
+ }
+ t.Logf("%v", topn.GetTopNWrites())
+ assert.Equal(t, defaultTopnBucketSize, len(topn.GetTopNWrites()))
+ keys := topn.GetTopNWrites()
+ assert.Equal(t, string(hk), keys[len(keys)-1].Key)
+ assert.Equal(t, true, keys[0].Cnt <= keys[1].Cnt)
+ assert.Equal(t, true, keys[len(keys)-2].Cnt <= keys[len(keys)-1].Cnt)
+ topn.Clear()
+
+ assert.Equal(t, 0, len(topn.GetTopNWrites()))
+}
diff --git a/mkdocs-material/Dockerfile b/mkdocs-material/Dockerfile
index ab0af453..de4eabb4 100644
--- a/mkdocs-material/Dockerfile
+++ b/mkdocs-material/Dockerfile
@@ -19,7 +19,6 @@
# IN THE SOFTWARE.
FROM jfloff/alpine-python:2.7-slim
-MAINTAINER Martin Donath
# Set build directory
WORKDIR /tmp
diff --git a/mkdocs.yml b/mkdocs.yml
index 10aac535..82ce5277 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -20,8 +20,8 @@ edit_uri: edit/master/doc/
#theme:
# name: null
# custom_dir: 'mkdocs-material/material'
-repo_name: 'absolute8511/ZanRedisDB'
-repo_url: 'https://github.com/absolute8511/ZanRedisDB'
+repo_name: 'youzan/ZanRedisDB'
+repo_url: 'https://github.com/youzan/ZanRedisDB'
google_analytics:
- 'UA-3196917-8'
- 'auto'
@@ -32,4 +32,5 @@ pages:
- 'Overview' : 'design.md'
- 'full scan' : 'design/fullscan.md'
- 'source analyse' : 'design/source_analyse.md'
- - 'Examples' : 'examples.md'
\ No newline at end of file
+ - 'User Guide' : 'user-guide.md'
+ - 'Operation Guide' : 'operation-guide.md'
\ No newline at end of file
diff --git a/node/config.go b/node/config.go
index affc4564..3d69e6b9 100644
--- a/node/config.go
+++ b/node/config.go
@@ -1,8 +1,8 @@
package node
import (
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/rockredis"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/engine"
)
type NamespaceConfig struct {
@@ -18,17 +18,19 @@ type NamespaceConfig struct {
OptimizedFsync bool `json:"optimized_fsync"`
RaftGroupConf RaftGroupConfig `json:"raft_group_conf"`
ExpirationPolicy string `json:"expiration_policy"`
+ DataVersion string `json:"data_version"`
}
func NewNSConfig() *NamespaceConfig {
return &NamespaceConfig{
- SnapCount: 400000,
- SnapCatchup: 100000,
+ SnapCount: common.DefaultSnapCount,
+ SnapCatchup: common.DefaultSnapCatchup,
ExpirationPolicy: common.DefaultExpirationPolicy,
}
}
type NamespaceDynamicConf struct {
+ Replicator int
}
type RaftGroupConfig struct {
@@ -38,19 +40,24 @@ type RaftGroupConfig struct {
type MachineConfig struct {
// server node id
- NodeID uint64 `json:"node_id"`
- BroadcastAddr string `json:"broadcast_addr"`
- HttpAPIPort int `json:"http_api_port"`
- LocalRaftAddr string `json:"local_raft_addr"`
- DataRootDir string `json:"data_root_dir"`
- ElectionTick int `json:"election_tick"`
- TickMs int `json:"tick_ms"`
- KeepWAL int `json:"keep_wal"`
- LearnerRole string `json:"learner_role"`
- RemoteSyncCluster string `json:"remote_sync_cluster"`
- StateMachineType string `json:"state_machine_type"`
- RocksDBOpts rockredis.RockOptions `json:"rocksdb_opts"`
- RocksDBSharedConfig *rockredis.SharedRockConfig
+ NodeID uint64 `json:"node_id"`
+ BroadcastAddr string `json:"broadcast_addr"`
+ HttpAPIPort int `json:"http_api_port"`
+ LocalRaftAddr string `json:"local_raft_addr"`
+ DataRootDir string `json:"data_root_dir"`
+ ElectionTick int `json:"election_tick"`
+ TickMs int `json:"tick_ms"`
+ KeepBackup int `json:"keep_backup"`
+ KeepWAL int `json:"keep_wal"`
+ UseRocksWAL bool `json:"use_rocks_wal"`
+ SharedRocksWAL bool `json:"shared_rocks_wal"`
+ LearnerRole string `json:"learner_role"`
+ RemoteSyncCluster string `json:"remote_sync_cluster"`
+ StateMachineType string `json:"state_machine_type"`
+ RocksDBOpts engine.RockOptions `json:"rocksdb_opts"`
+ RocksDBSharedConfig engine.SharedRockConfig
+ WALRocksDBOpts engine.RockOptions `json:"wal_rocksdb_opts"`
+ WALRocksDBSharedConfig engine.SharedRockConfig
}
type ReplicaInfo struct {
@@ -70,12 +77,17 @@ type RaftConfig struct {
RaftAddr string `json:"raft_addr"`
DataDir string `json:"data_dir"`
WALDir string `json:"wal_dir"`
- KeepWAL int `json:"keep_wal"`
SnapDir string `json:"snap_dir"`
+ RaftStorageDir string `json:"raft_storage_dir"`
RaftPeers map[uint64]ReplicaInfo `json:"raft_peers"`
SnapCount int `json:"snap_count"`
SnapCatchup int `json:"snap_catchup"`
- Replicator int `json:"replicator"`
+ Replicator int32 `json:"replicator"`
OptimizedFsync bool `json:"optimized_fsync"`
+ rockEng engine.KVEngine
nodeConfig *MachineConfig
}
+
+func (rc *RaftConfig) SetEng(eng engine.KVEngine) {
+ rc.rockEng = eng
+}
diff --git a/node/conflict_checker.go b/node/conflict_checker.go
index cdd9ea60..6d1cfb09 100644
--- a/node/conflict_checker.go
+++ b/node/conflict_checker.go
@@ -6,7 +6,15 @@ import (
"github.com/absolute8511/redcon"
)
-type ConflictCheckFunc func(redcon.Command, int64) bool
+type ConflictState int
+
+const (
+ NoConflict = iota
+ MaybeConflict
+ Conflict
+)
+
+type ConflictCheckFunc func(redcon.Command, int64) ConflictState
type conflictRouter struct {
checkCmds map[string]ConflictCheckFunc
@@ -31,18 +39,18 @@ func (r *conflictRouter) GetHandler(name string) (ConflictCheckFunc, bool) {
return v, ok
}
-func (kvsm *kvStoreSM) checkKVConflict(cmd redcon.Command, reqTs int64) bool {
+func (kvsm *kvStoreSM) checkKVConflict(cmd redcon.Command, reqTs int64) ConflictState {
oldTs, err := kvsm.store.KVGetVer(cmd.Args[1])
if err != nil {
kvsm.Infof("key %v failed to get modify version: %v", cmd.Args[1], err)
}
if oldTs < reqTs {
- return false
+ return NoConflict
}
- return true
+ return Conflict
}
-func (kvsm *kvStoreSM) checkKVKVConflict(cmd redcon.Command, reqTs int64) bool {
+func (kvsm *kvStoreSM) checkKVKVConflict(cmd redcon.Command, reqTs int64) ConflictState {
kvs := cmd.Args[1:]
for i := 0; i < len(kvs)-1; i += 2 {
oldTs, err := kvsm.store.KVGetVer(kvs[i])
@@ -50,13 +58,13 @@ func (kvsm *kvStoreSM) checkKVKVConflict(cmd redcon.Command, reqTs int64) bool {
kvsm.Infof("key %v failed to get modify version: %v", cmd.Args[1], err)
}
if oldTs >= reqTs {
- return true
+ return Conflict
}
}
- return false
+ return NoConflict
}
-func (kvsm *kvStoreSM) checkHashKFVConflict(cmd redcon.Command, reqTs int64) bool {
+func (kvsm *kvStoreSM) checkHashKFVConflict(cmd redcon.Command, reqTs int64) ConflictState {
fvs := cmd.Args[2:]
for i := 0; i < len(fvs)-1; i += 2 {
oldTs, err := kvsm.store.HGetVer(cmd.Args[1], fvs[i])
@@ -64,13 +72,13 @@ func (kvsm *kvStoreSM) checkHashKFVConflict(cmd redcon.Command, reqTs int64) boo
kvsm.Infof("key %v failed to get modify version: %v", cmd.Args[1], err)
}
if oldTs >= reqTs {
- return true
+ return Conflict
}
}
- return false
+ return NoConflict
}
-func (kvsm *kvStoreSM) checkHashKFFConflict(cmd redcon.Command, reqTs int64) bool {
+func (kvsm *kvStoreSM) checkHashKFFConflict(cmd redcon.Command, reqTs int64) ConflictState {
fvs := cmd.Args[2:]
for i := 0; i < len(fvs); i++ {
oldTs, err := kvsm.store.HGetVer(cmd.Args[1], fvs[i])
@@ -78,50 +86,81 @@ func (kvsm *kvStoreSM) checkHashKFFConflict(cmd redcon.Command, reqTs int64) boo
kvsm.Infof("key %v failed to get modify version: %v", cmd.Args[1], err)
}
if oldTs >= reqTs {
- return true
+ return Conflict
}
}
- return false
+ return NoConflict
}
-func (kvsm *kvStoreSM) checkListConflict(cmd redcon.Command, reqTs int64) bool {
+// for list, set, zset, ts may be not enough.
+// If clusterA modified and clusterB modified later and then clusterA
+// modified again, the second modification in clusterA will be allowed since its timestamp is newer
+// but this will cause inconsistence. So we need forbiden write sync if both timestamps are newer
+// than the time we switched the cluster.
+
+func (kvsm *kvStoreSM) checkListConflict(cmd redcon.Command, reqTs int64) ConflictState {
oldTs, err := kvsm.store.LVer(cmd.Args[1])
if err != nil {
kvsm.Infof("key %v failed to get modify version: %v", cmd.Args[1], err)
}
+ if oldTs >= GetSyncedOnlyChangedTs() && reqTs >= GetSyncedOnlyChangedTs() {
+ return Conflict
+ }
if oldTs < reqTs {
- return false
+ return NoConflict
}
- return true
+ return Conflict
+}
+
+func (kvsm *kvStoreSM) checkBitmapConflict(cmd redcon.Command, reqTs int64) ConflictState {
+ oldTs, err := kvsm.store.BitGetVer(cmd.Args[1])
+ if err != nil {
+ kvsm.Infof("key %v failed to get modify version: %v", cmd.Args[1], err)
+ }
+ if oldTs >= GetSyncedOnlyChangedTs() && reqTs >= GetSyncedOnlyChangedTs() {
+ return MaybeConflict
+ }
+ if oldTs < reqTs {
+ return NoConflict
+ }
+ return MaybeConflict
}
-func (kvsm *kvStoreSM) checkSetConflict(cmd redcon.Command, reqTs int64) bool {
+// for set and zset, mostly it is safe to handle conflict, since the set and zset will not have the same member
+// and good for both add op. (possible issue is the order of delete and add may be out of order)
+func (kvsm *kvStoreSM) checkSetConflict(cmd redcon.Command, reqTs int64) ConflictState {
oldTs, err := kvsm.store.SGetVer(cmd.Args[1])
if err != nil {
kvsm.Infof("key %v failed to get modify version: %v", cmd.Args[1], err)
}
+ if oldTs >= GetSyncedOnlyChangedTs() && reqTs >= GetSyncedOnlyChangedTs() {
+ return MaybeConflict
+ }
if oldTs < reqTs {
- return false
+ return NoConflict
}
- return true
+ return MaybeConflict
}
-func (kvsm *kvStoreSM) checkZSetConflict(cmd redcon.Command, reqTs int64) bool {
+func (kvsm *kvStoreSM) checkZSetConflict(cmd redcon.Command, reqTs int64) ConflictState {
oldTs, err := kvsm.store.ZGetVer(cmd.Args[1])
if err != nil {
kvsm.Infof("key %v failed to get modify version: %v", cmd.Args[1], err)
}
+ if oldTs >= GetSyncedOnlyChangedTs() && reqTs >= GetSyncedOnlyChangedTs() {
+ return MaybeConflict
+ }
if oldTs < reqTs {
- return false
+ return NoConflict
}
- return true
+ return MaybeConflict
}
-func (kvsm *kvStoreSM) checkHLLConflict(cmd redcon.Command, reqTs int64) bool {
+func (kvsm *kvStoreSM) checkHLLConflict(cmd redcon.Command, reqTs int64) ConflictState {
// hll no need handle conflict since it is not accurately
- return false
+ return NoConflict
}
-func (kvsm *kvStoreSM) checkJsonConflict(cmd redcon.Command, reqTs int64) bool {
- return true
+func (kvsm *kvStoreSM) checkJsonConflict(cmd redcon.Command, reqTs int64) ConflictState {
+ return Conflict
}
diff --git a/node/geo.go b/node/geo.go
index 84444be2..18f2ab85 100644
--- a/node/geo.go
+++ b/node/geo.go
@@ -5,13 +5,14 @@ package node
import (
"errors"
+ "fmt"
"sort"
"strconv"
"strings"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/common/geohash"
"github.com/absolute8511/redcon"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/common/geohash"
)
var (
@@ -36,10 +37,10 @@ const (
/* usage:
GEOADD key lon0 lat0 elem0 lon1 lat1 elem1
*/
-func (nd *KVNode) geoaddCommand(conn redcon.Conn, cmd redcon.Command) {
+func (nd *KVNode) geoaddCommand(cmd redcon.Command) (interface{}, error) {
if len(cmd.Args) < 5 || (len(cmd.Args)-2)%3 != 0 {
- conn.WriteError("ERR wrong number of arguments for 'geoadd' command")
- return
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ return nil, err
}
var zaddCmd redcon.Command
@@ -51,19 +52,16 @@ func (nd *KVNode) geoaddCommand(conn redcon.Conn, cmd redcon.Command) {
for i := 0; i < (len(cmd.Args)-2)/3; i++ {
lon, err := strconv.ParseFloat(string(cmd.Args[i*3+2]), 64)
if err != nil {
- conn.WriteError("ERR value is not a valid float")
- return
+ return nil, errors.New("ERR value is not a valid float")
}
lat, err := strconv.ParseFloat(string(cmd.Args[i*3+3]), 64)
if err != nil {
- conn.WriteError("ERR value is not a valid float")
- return
+ return nil, errors.New("ERR value is not a valid float")
}
hash, err := geohash.EncodeWGS84(lon, lat)
if err != nil {
- conn.WriteError("Err " + err.Error())
- return
+ return nil, errors.New("Err " + err.Error())
}
zaddCmd.Args[i*2+2] = strconv.AppendUint(zaddCmd.Args[i*2+2], hash, 10)
@@ -72,29 +70,23 @@ func (nd *KVNode) geoaddCommand(conn redcon.Conn, cmd redcon.Command) {
if ifGeoHashUnitTest {
/* The code used for unit test. */
- _, key, err := common.ExtractNamesapce(cmd.Args[1])
+ key, err := common.CutNamesapce(cmd.Args[1])
if err != nil {
- conn.WriteError(err.Error())
- return
- }
- if common.IsValidTableName(key) {
- conn.WriteError(common.ErrInvalidTableName.Error())
- return
+ return nil, err
}
zaddCmd.Args[1] = key
sm, ok := nd.sm.(*kvStoreSM)
if !ok {
- conn.WriteError("Err not supported state machine")
- return
+ return nil, errors.New("Err not supported state machine")
}
if _, err := sm.localZaddCommand(buildCommand(zaddCmd.Args), -1); err != nil {
- conn.WriteError("Err " + err.Error())
+ return nil, errors.New("Err " + err.Error())
}
-
} else {
/* The code actually execute. */
- nd.zaddCommand(conn, buildCommand(zaddCmd.Args))
+ return nd.zaddCommand(buildCommand(zaddCmd.Args))
}
+ return 0, nil
}
/* usage:
@@ -204,7 +196,11 @@ func (nd *KVNode) geoRadiusGeneric(conn redcon.Conn, cmd redcon.Command, stype s
var err error
if card, err := nd.store.ZCard(cmd.Args[1]); err != nil || card == 0 {
- conn.WriteError("(empty list or set)")
+ if err != nil {
+ conn.WriteError(err.Error())
+ } else {
+ conn.WriteArray(0)
+ }
return
}
diff --git a/node/geo_test.go b/node/geo_test.go
index f07138e1..52697a66 100644
--- a/node/geo_test.go
+++ b/node/geo_test.go
@@ -152,9 +152,31 @@ func TestKVNode_GeoCommand(t *testing.T) {
},
}
+ /* Test georadius with empty. */
+ testCmd := "georadius"
+ cmdArgs := make([][]byte, 7)
+ cmdArgs[0] = []byte(testCmd)
+ cmdArgs[1] = testKey
+ cmdArgs[2] = []byte("0")
+ cmdArgs[3] = []byte("31")
+ cmdArgs[4] = []byte("100000")
+ cmdArgs[5] = []byte("km")
+ cmdArgs[6] = []byte("asc")
+ cmdArgs = cmdArgs[:7]
+
+ handlerCmd := buildCommand(cmdArgs)
+ c := &fakeRedisConn{}
+ handler, _ := nd.router.GetCmdHandler(testCmd)
+ handler(c, handlerCmd)
+
+ assert.Nil(t, c.GetError(), "command: georadius executed failed, %v", c.GetError())
+ assert.Equal(t, 0, c.rsp[0])
+ assert.Equal(t, 1, len(c.rsp))
+ c.Reset()
+
/* Test geoadd. */
- testCmd := "geoadd"
- cmdArgs := make([][]byte, len(tCases)*3+2)
+ testCmd = "geoadd"
+ cmdArgs = make([][]byte, len(tCases)*3+2)
cmdArgs[0] = []byte(testCmd)
cmdArgs[1] = testKey
@@ -165,11 +187,13 @@ func TestKVNode_GeoCommand(t *testing.T) {
j = j + 3
}
- handlerCmd := buildCommand(cmdArgs)
- c := &fakeRedisConn{}
- handler, _, _ := nd.router.GetCmdHandler(testCmd)
- handler(c, handlerCmd)
- assert.Nil(t, c.GetError())
+ handlerCmd = buildCommand(cmdArgs)
+ whandler, _ := nd.router.GetWCmdHandler(testCmd)
+ rsp, err := whandler(handlerCmd)
+ assert.Nil(t, err)
+ _, ok := rsp.(error)
+ assert.True(t, !ok)
+
c.Reset()
/* Test geohash. */
@@ -185,7 +209,7 @@ func TestKVNode_GeoCommand(t *testing.T) {
}
}
handlerCmd = buildCommand(cmdArgs)
- handler, _, _ = nd.router.GetCmdHandler(testCmd)
+ handler, _ = nd.router.GetCmdHandler(testCmd)
handler(c, handlerCmd)
assert.Equal(t, len(tCases), c.rsp[0],
@@ -221,7 +245,7 @@ func TestKVNode_GeoCommand(t *testing.T) {
cmdArgs = cmdArgs[:5]
handlerCmd = buildCommand(cmdArgs)
- handler, _, _ = nd.router.GetCmdHandler(testCmd)
+ handler, _ = nd.router.GetCmdHandler(testCmd)
handler(c, handlerCmd)
assert.Nil(t, c.GetError(), "test command: geodist failed")
@@ -251,7 +275,7 @@ func TestKVNode_GeoCommand(t *testing.T) {
cmdArgs[3] = []byte("NoneExsitPlace")
cmdArgs[4] = []byte("m")
handlerCmd = buildCommand(cmdArgs)
- handler, _, _ = nd.router.GetCmdHandler(testCmd)
+ handler, _ = nd.router.GetCmdHandler(testCmd)
handler(c, handlerCmd)
assert.Nil(t, c.rsp[0], "geodist with nonexistent should return nil")
c.Reset()
@@ -269,7 +293,7 @@ func TestKVNode_GeoCommand(t *testing.T) {
}
}
handlerCmd = buildCommand(cmdArgs)
- handler, _, _ = nd.router.GetCmdHandler(testCmd)
+ handler, _ = nd.router.GetCmdHandler(testCmd)
handler(c, handlerCmd)
assert.Nil(t, c.GetError(), "test command: geopos failed")
@@ -317,7 +341,7 @@ func TestKVNode_GeoCommand(t *testing.T) {
cmdArgs = cmdArgs[:11]
handlerCmd = buildCommand(cmdArgs)
- handler, _, _ = nd.router.GetCmdHandler(testCmd)
+ handler, _ = nd.router.GetCmdHandler(testCmd)
handler(c, handlerCmd)
sortedResult := tCases
@@ -365,7 +389,7 @@ func TestKVNode_GeoCommand(t *testing.T) {
c.Reset()
cmdArgs[10] = []byte("DESC")
handlerCmd = buildCommand(cmdArgs)
- handler, _, _ = nd.router.GetCmdHandler(testCmd)
+ handler, _ = nd.router.GetCmdHandler(testCmd)
handler(c, handlerCmd)
assert.Nil(t, c.GetError(), "test command: georadiusbymember desc failed")
@@ -426,7 +450,7 @@ func TestKVNode_GeoCommand(t *testing.T) {
cmdArgs = cmdArgs[:7]
handlerCmd = buildCommand(cmdArgs)
- handler, _, _ = nd.router.GetCmdHandler(testCmd)
+ handler, _ = nd.router.GetCmdHandler(testCmd)
handler(c, handlerCmd)
assert.Nil(t, c.GetError(), "command: georadius executed failed, %v", c.GetError())
@@ -445,9 +469,11 @@ func TestKVNode_GeoCommand(t *testing.T) {
cmdArgs[4] = []byte(member + strconv.FormatInt(k, 36))
cmdArgs = cmdArgs[0:5]
handlerCmd = buildCommand(cmdArgs)
- handler, _, _ = nd.router.GetCmdHandler(testCmd)
- handler(c, handlerCmd)
- assert.Nil(t, c.GetError(), "command: geoadd executed failed, %v", c.GetError())
+ whandler, _ = nd.router.GetWCmdHandler(testCmd)
+ rsp, err := whandler(handlerCmd)
+ assert.Nil(t, err, "command: geoadd executed failed, %v", err)
+ _, ok := rsp.(error)
+ assert.True(t, !ok)
c.Reset()
k += 1
}
@@ -463,7 +489,7 @@ func TestKVNode_GeoCommand(t *testing.T) {
cmdArgs[5] = []byte("km")
handlerCmd = buildCommand(cmdArgs)
- handler, _, _ = nd.router.GetCmdHandler(testCmd)
+ handler, _ = nd.router.GetCmdHandler(testCmd)
handler(c, handlerCmd)
assert.Equal(t, c.GetError(), errTooMuchBatchSize, "command: georadius executed failed, %v", c.GetError())
diff --git a/node/hash.go b/node/hash.go
index 5b515ddc..5c0b3cfe 100644
--- a/node/hash.go
+++ b/node/hash.go
@@ -3,106 +3,125 @@ package node
import (
"strconv"
- "github.com/absolute8511/ZanRedisDB/common"
"github.com/absolute8511/redcon"
+ "github.com/youzan/ZanRedisDB/common"
)
func (nd *KVNode) hgetCommand(conn redcon.Conn, cmd redcon.Command) {
- val, err := nd.store.HGet(cmd.Args[1], cmd.Args[2])
- if err != nil || val == nil {
- conn.WriteNull()
- } else {
- conn.WriteBulk(val)
+ err := nd.store.HGetWithOp(cmd.Args[1], cmd.Args[2], func(val []byte) error {
+ if val == nil {
+ conn.WriteNull()
+ } else {
+ conn.WriteBulk(val)
+ // since val will be freed, we need flush before return
+ conn.Flush()
+ }
+ return nil
+ })
+ if err != nil {
+ conn.WriteError(err.Error())
}
+ return
}
-func (nd *KVNode) hgetallCommand(conn redcon.Conn, cmd redcon.Command) {
- n, valCh, err := nd.store.HGetAll(cmd.Args[1])
+func (nd *KVNode) hgetVerCommand(conn redcon.Conn, cmd redcon.Command) {
+ val, err := nd.store.HGetVer(cmd.Args[1], cmd.Args[2])
+ if err != nil {
+ conn.WriteError("ERR for " + string(cmd.Args[0]) + " command: " + err.Error())
+ return
+ }
+ conn.WriteInt64(val)
+}
+
+func (nd *KVNode) hgetallExpiredCommand(conn redcon.Conn, cmd redcon.Command) {
+ _, vals, err := nd.store.HGetAllExpired(cmd.Args[1])
if err != nil {
conn.WriteError("ERR for " + string(cmd.Args[0]) + " command: " + err.Error())
+ return
}
- conn.WriteArray(int(n) * 2)
- for v := range valCh {
+ conn.WriteArray(len(vals) * 2)
+ for _, v := range vals {
conn.WriteBulk(v.Rec.Key)
conn.WriteBulk(v.Rec.Value)
}
}
-func (nd *KVNode) hkeysCommand(conn redcon.Conn, cmd redcon.Command) {
- n, valCh, _ := nd.store.HKeys(cmd.Args[1])
- conn.WriteArray(int(n))
- for v := range valCh {
+func (nd *KVNode) hgetallCommand(conn redcon.Conn, cmd redcon.Command) {
+ _, vals, err := nd.store.HGetAll(cmd.Args[1])
+ if err != nil {
+ conn.WriteError("ERR for " + string(cmd.Args[0]) + " command: " + err.Error())
+ return
+ }
+ conn.WriteArray(len(vals) * 2)
+ for _, v := range vals {
conn.WriteBulk(v.Rec.Key)
+ conn.WriteBulk(v.Rec.Value)
}
}
-func (nd *KVNode) hexistsCommand(conn redcon.Conn, cmd redcon.Command) {
- val, err := nd.store.HGet(cmd.Args[1], cmd.Args[2])
- if err != nil || val == nil {
- conn.WriteInt(0)
- } else {
- conn.WriteInt(1)
+func (nd *KVNode) hkeysCommand(conn redcon.Conn, cmd redcon.Command) {
+ _, vals, err := nd.store.HKeys(cmd.Args[1])
+ if err != nil {
+ conn.WriteError("ERR for " + string(cmd.Args[0]) + " command: " + err.Error())
+ return
}
-}
-
-func (nd *KVNode) hmgetCommand(conn redcon.Conn, cmd redcon.Command) {
- vals, _ := nd.store.HMget(cmd.Args[1], cmd.Args[2:]...)
conn.WriteArray(len(vals))
for _, v := range vals {
- conn.WriteBulk(v)
+ conn.WriteBulk(v.Rec.Key)
}
}
-func (nd *KVNode) hlenCommand(conn redcon.Conn, cmd redcon.Command) {
- val, err := nd.store.HLen(cmd.Args[1])
+func (nd *KVNode) hvalsCommand(conn redcon.Conn, cmd redcon.Command) {
+ _, vals, err := nd.store.HValues(cmd.Args[1])
if err != nil {
- conn.WriteInt(0)
- } else {
- conn.WriteInt64(val)
+ conn.WriteError("ERR for " + string(cmd.Args[0]) + " command: " + err.Error())
+ return
}
-}
-
-func (nd *KVNode) hsetCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- if rsp, ok := v.(int64); ok {
- conn.WriteInt64(rsp)
- } else {
- conn.WriteError(errInvalidResponse.Error())
+ conn.WriteArray(len(vals))
+ for _, v := range vals {
+ conn.WriteBulk(v.Rec.Value)
}
}
-func (nd *KVNode) hsetnxCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- if rsp, ok := v.(int64); ok {
- conn.WriteInt64(rsp)
+func (nd *KVNode) hexistsCommand(conn redcon.Conn, cmd redcon.Command) {
+ val, err := nd.store.HExist(cmd.Args[1], cmd.Args[2])
+ if err != nil || !val {
+ conn.WriteInt(0)
} else {
- conn.WriteError(errInvalidResponse.Error())
+ conn.WriteInt(1)
}
}
-func (nd *KVNode) hmsetCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- conn.WriteString("OK")
-}
-
-func (nd *KVNode) hdelCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- if rsp, ok := v.(int64); ok {
- conn.WriteInt64(rsp)
- } else {
- conn.WriteError(errInvalidResponse.Error())
+func (nd *KVNode) hmgetExpiredCommand(conn redcon.Conn, cmd redcon.Command) {
+ vals, _ := nd.store.HMgetExpired(cmd.Args[1], cmd.Args[2:]...)
+ conn.WriteArray(len(vals))
+ for _, v := range vals {
+ if v == nil {
+ conn.WriteNull()
+ } else {
+ conn.WriteBulk(v)
+ }
}
}
-func (nd *KVNode) hincrbyCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- if rsp, ok := v.(int64); ok {
- conn.WriteInt64(rsp)
- } else {
- conn.WriteError(errInvalidResponse.Error())
+func (nd *KVNode) hmgetCommand(conn redcon.Conn, cmd redcon.Command) {
+ vals, _ := nd.store.HMget(cmd.Args[1], cmd.Args[2:]...)
+ conn.WriteArray(len(vals))
+ for _, v := range vals {
+ if v == nil {
+ conn.WriteNull()
+ } else {
+ conn.WriteBulk(v)
+ }
}
}
-func (nd *KVNode) hclearCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- if rsp, ok := v.(int64); ok {
- conn.WriteInt64(rsp)
+func (nd *KVNode) hlenCommand(conn redcon.Conn, cmd redcon.Command) {
+ val, err := nd.store.HLen(cmd.Args[1])
+ if err != nil {
+ conn.WriteInt(0)
} else {
- conn.WriteError(errInvalidResponse.Error())
+ conn.WriteInt64(val)
}
}
@@ -142,9 +161,7 @@ func (kvsm *kvStoreSM) localHIncrbyCommand(cmd redcon.Command, ts int64) (interf
}
func (kvsm *kvStoreSM) localHDelCommand(cmd redcon.Command, ts int64) (interface{}, error) {
- // TODO: delete should only handled on the old value, if the value is newer than the timestamp proposal
- // we should ignore delete
- n, err := kvsm.store.HDel(cmd.Args[1], cmd.Args[2:]...)
+ n, err := kvsm.store.HDel(ts, cmd.Args[1], cmd.Args[2:]...)
if err != nil {
// leader write need response
return int64(0), err
@@ -153,13 +170,13 @@ func (kvsm *kvStoreSM) localHDelCommand(cmd redcon.Command, ts int64) (interface
}
func (kvsm *kvStoreSM) localHclearCommand(cmd redcon.Command, ts int64) (interface{}, error) {
- return kvsm.store.HClear(cmd.Args[1])
+ return kvsm.store.HClear(ts, cmd.Args[1])
}
func (kvsm *kvStoreSM) localHMClearCommand(cmd redcon.Command, ts int64) (interface{}, error) {
var count int64
for _, hkey := range cmd.Args[1:] {
- if _, err := kvsm.store.HClear(hkey); err == nil {
+ if _, err := kvsm.store.HClear(ts, hkey); err == nil {
count++
} else {
return count, err
diff --git a/node/hash_test.go b/node/hash_test.go
index 0d562f91..1ed142b7 100644
--- a/node/hash_test.go
+++ b/node/hash_test.go
@@ -36,6 +36,10 @@ func TestKVNode_hashCommand(t *testing.T) {
{"hkeys", buildCommand([][]byte{[]byte("hkeys"), testKey})},
{"hexists", buildCommand([][]byte{[]byte("hexists"), testKey, testField})},
{"hlen", buildCommand([][]byte{[]byte("hlen"), testKey})},
+ {"httl", buildCommand([][]byte{[]byte("httl"), testKey})},
+ {"hkeyexist", buildCommand([][]byte{[]byte("hkeyexist"), testKey})},
+ {"hexpire", buildCommand([][]byte{[]byte("hexpire"), testKey, []byte("10")})},
+ {"hpersist", buildCommand([][]byte{[]byte("hpersist"), testKey})},
{"hclear", buildCommand([][]byte{[]byte("hclear"), testKey})},
}
defer os.RemoveAll(dataDir)
@@ -44,8 +48,18 @@ func TestKVNode_hashCommand(t *testing.T) {
c := &fakeRedisConn{}
for _, cmd := range tests {
c.Reset()
- handler, _, _ := nd.router.GetCmdHandler(cmd.name)
- handler(c, cmd.args)
- assert.Nil(t, c.GetError())
+ origCmd := append([]byte{}, cmd.args.Raw...)
+ handler, ok := nd.router.GetCmdHandler(cmd.name)
+ if ok {
+ handler(c, cmd.args)
+ assert.Nil(t, c.GetError())
+ } else {
+ whandler, _ := nd.router.GetWCmdHandler(cmd.name)
+ rsp, err := whandler(cmd.args)
+ assert.Nil(t, err)
+ _, ok := rsp.(error)
+ assert.True(t, !ok)
+ }
+ assert.Equal(t, origCmd, cmd.args.Raw)
}
}
diff --git a/node/json.go b/node/json.go
index 3434330f..996415fc 100644
--- a/node/json.go
+++ b/node/json.go
@@ -1,8 +1,8 @@
package node
import (
- "github.com/absolute8511/ZanRedisDB/common"
"github.com/absolute8511/redcon"
+ "github.com/youzan/ZanRedisDB/common"
)
func (nd *KVNode) jsonGetCommand(conn redcon.Conn, cmd redcon.Command) {
@@ -45,14 +45,14 @@ func (nd *KVNode) jsonmkGetCommand(conn redcon.Conn, cmd redcon.Command) {
conn.WriteError("ERR wrong number of arguments for '" + string(cmd.Args[0]) + "' command")
return
}
- if len(cmd.Args[1:]) >= common.MAX_BATCH_NUM {
+ if len(cmd.Args[1:]) > common.MAX_BATCH_NUM {
conn.WriteError(errTooMuchBatchSize.Error())
return
}
keys := cmd.Args[1 : len(cmd.Args)-1]
path := cmd.Args[len(cmd.Args)-1]
for i := 0; i < len(keys); i++ {
- _, key, err := common.ExtractNamesapce(keys[i])
+ key, err := common.CutNamesapce(keys[i])
if err != nil {
conn.WriteError(err.Error())
return
@@ -129,42 +129,17 @@ func (nd *KVNode) jsonObjKeysCommand(conn redcon.Conn, cmd redcon.Command) {
}
}
-func (nd *KVNode) jsonSetCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- conn.WriteString("OK")
-}
-
-func (nd *KVNode) jsonDelCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- if rsp, ok := v.(int64); ok {
- conn.WriteInt64(rsp)
- } else {
- conn.WriteError(errInvalidResponse.Error())
- }
-}
-
-func (nd *KVNode) jsonArrayAppendCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- if rsp, ok := v.(int64); ok {
- conn.WriteInt64(rsp)
- } else {
- conn.WriteError(errInvalidResponse.Error())
- }
-}
-
-func (nd *KVNode) jsonArrayPopCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- rsp, ok := v.(string)
- if !ok {
- conn.WriteError(errInvalidResponse.Error())
- return
- }
- conn.WriteBulkString(rsp)
-}
-
func (kvsm *kvStoreSM) localJSONSetCommand(cmd redcon.Command, ts int64) (interface{}, error) {
v, err := kvsm.store.JSet(ts, cmd.Args[1], cmd.Args[2], cmd.Args[3])
return v, err
}
func (kvsm *kvStoreSM) localJSONDelCommand(cmd redcon.Command, ts int64) (interface{}, error) {
- n, err := kvsm.store.JDel(ts, cmd.Args[1], cmd.Args[2])
+ path := []byte("")
+ if len(cmd.Args) > 2 {
+ path = cmd.Args[2]
+ }
+ n, err := kvsm.store.JDel(ts, cmd.Args[1], path)
if err != nil {
return int64(0), err
}
@@ -189,5 +164,5 @@ func (kvsm *kvStoreSM) localJSONArrayPopCommand(cmd redcon.Command, ts int64) (i
if err != nil {
return nil, err
}
- return elem, nil
+ return []byte(elem), nil
}
diff --git a/node/json_test.go b/node/json_test.go
index 6ab36576..8459144d 100644
--- a/node/json_test.go
+++ b/node/json_test.go
@@ -19,6 +19,7 @@ func TestKVNode_jsonCommand(t *testing.T) {
args redcon.Command
}{
{"json.get", buildCommand([][]byte{[]byte("json.get"), testKey, testJSONField})},
+ {"json.get", buildCommand([][]byte{[]byte("json.get"), testKey})},
{"json.keyexists", buildCommand([][]byte{[]byte("json.keyexists"), testKey})},
{"json.mkget", buildCommand([][]byte{[]byte("json.mkget"), testKey, testJSONField})},
{"json.type", buildCommand([][]byte{[]byte("json.type"), testKey})},
@@ -30,6 +31,7 @@ func TestKVNode_jsonCommand(t *testing.T) {
{"json.del", buildCommand([][]byte{[]byte("json.del"), testKey, testJSONField})},
{"json.arrappend", buildCommand([][]byte{[]byte("json.arrappend"), testKey, testJSONField, testJSONFieldValue})},
{"json.arrpop", buildCommand([][]byte{[]byte("json.arrpop"), testKey, testJSONField})},
+ {"json.del", buildCommand([][]byte{[]byte("json.del"), testKey})},
}
defer os.RemoveAll(dataDir)
defer nd.Stop()
@@ -37,8 +39,18 @@ func TestKVNode_jsonCommand(t *testing.T) {
c := &fakeRedisConn{}
for _, cmd := range tests {
c.Reset()
- handler, _, _ := nd.router.GetCmdHandler(cmd.name)
- handler(c, cmd.args)
- assert.Nil(t, c.GetError())
+ origCmd := append([]byte{}, cmd.args.Raw...)
+ handler, ok := nd.router.GetCmdHandler(cmd.name)
+ if ok {
+ handler(c, cmd.args)
+ assert.Nil(t, c.GetError())
+ } else {
+ whandler, _ := nd.router.GetWCmdHandler(cmd.name)
+ rsp, err := whandler(cmd.args)
+ assert.Nil(t, err)
+ _, ok := rsp.(error)
+ assert.True(t, !ok)
+ }
+ assert.Equal(t, origCmd, cmd.args.Raw)
}
}
diff --git a/node/keys.go b/node/keys.go
index 56dc0cee..851a385b 100644
--- a/node/keys.go
+++ b/node/keys.go
@@ -1,37 +1,224 @@
package node
import (
+ "bytes"
+ "fmt"
"strconv"
+ "strings"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
"github.com/absolute8511/redcon"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/rockredis"
)
+func getExSecs(ex []byte, secs []byte) (int64, error) {
+ if !bytes.Equal(bytes.ToLower(ex), []byte("ex")) {
+ return 0, common.ErrInvalidArgs
+ }
+ n, err := strconv.ParseInt(string(secs), 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ if n <= 0 {
+ return 0, common.ErrInvalidTTL
+ }
+ return n, nil
+}
+
+func getExNxXXArgs(opts [][]byte) (int64, bool, bool, error) {
+ nxorxx := false
+ var duration int64
+ createOnly := false
+ updateOnly := false
+ var err error
+ for i := 0; i < len(opts); i++ {
+ op := strings.ToLower(string(opts[i]))
+ if op == "nx" {
+ if nxorxx {
+ return duration, createOnly, updateOnly, common.ErrInvalidArgs
+ }
+ createOnly = true
+ nxorxx = true
+ } else if op == "xx" {
+ if nxorxx {
+ return duration, createOnly, updateOnly, common.ErrInvalidArgs
+ }
+ updateOnly = true
+ nxorxx = true
+ } else if op == "ex" {
+ if len(opts) <= i+1 {
+ return duration, createOnly, updateOnly, common.ErrInvalidArgs
+ }
+ duration, err = strconv.ParseInt(string(opts[i+1]), 10, 64)
+ if err != nil {
+ return duration, createOnly, updateOnly, common.ErrInvalidArgs
+ }
+ if duration <= 0 {
+ return duration, createOnly, updateOnly, common.ErrInvalidTTL
+ }
+ // skip seconds arg
+ i++
+ } else {
+ return duration, createOnly, updateOnly, common.ErrInvalidArgs
+ }
+ }
+ return duration, createOnly, updateOnly, nil
+}
+
func (nd *KVNode) Lookup(key []byte) ([]byte, error) {
- _, key, err := common.ExtractNamesapce(key)
+ key, err := common.CutNamesapce(key)
if err != nil {
return nil, err
}
- v, err := nd.store.LocalLookup(key)
+ v, err := nd.store.KVGet(key)
return v, err
}
+func (nd *KVNode) getNoLockCommand(conn redcon.Conn, cmd redcon.Command) {
+ err := nd.store.GetValueWithOpNoLock(cmd.Args[1], func(val []byte) error {
+ if val == nil {
+ conn.WriteNull()
+ } else {
+ conn.WriteBulk(val)
+ // since val will be freed, we need flush before return
+ conn.Flush()
+ }
+ return nil
+ })
+ if err != nil {
+ conn.WriteError(err.Error())
+ }
+}
+
func (nd *KVNode) getCommand(conn redcon.Conn, cmd redcon.Command) {
- val, err := nd.store.LocalLookup(cmd.Args[1])
- if err != nil || val == nil {
+ err := nd.store.GetValueWithOp(cmd.Args[1], func(val []byte) error {
+ if val == nil {
+ conn.WriteNull()
+ } else {
+ conn.WriteBulk(val)
+ // since val will be freed, we need flush before return
+ conn.Flush()
+ }
+ return nil
+ })
+ if err != nil {
+ conn.WriteError(err.Error())
+ }
+}
+
+func (nd *KVNode) getVerCommand(conn redcon.Conn, cmd redcon.Command) {
+ val, err := nd.store.KVGetVer(cmd.Args[1])
+ if err != nil {
+ conn.WriteError(err.Error())
+ return
+ }
+ conn.WriteInt64(val)
+}
+
+func (nd *KVNode) getExpiredCommand(conn redcon.Conn, cmd redcon.Command) {
+ val, err := nd.store.KVGetExpired(cmd.Args[1])
+ if err != nil {
+ conn.WriteError(err.Error())
+ return
+ }
+ if val == nil {
conn.WriteNull()
} else {
conn.WriteBulk(val)
}
}
+func (nd *KVNode) getRangeCommand(conn redcon.Conn, cmd redcon.Command) {
+ if len(cmd.Args) != 4 {
+ conn.WriteError(errWrongNumberArgs.Error())
+ return
+ }
+ start, end, err := getRangeArgs(cmd)
+ if err != nil {
+ conn.WriteError(err.Error())
+ return
+ }
+ val, err := nd.store.GetRange(cmd.Args[1], start, end)
+ if err != nil {
+ conn.WriteError(err.Error())
+ return
+ }
+ if val == nil {
+ conn.WriteNull()
+ } else {
+ conn.WriteBulk(val)
+ }
+}
+
+func (nd *KVNode) strlenCommand(conn redcon.Conn, cmd redcon.Command) {
+ val, err := nd.store.StrLen(cmd.Args[1])
+ if err != nil {
+ conn.WriteError(err.Error())
+ return
+ }
+ conn.WriteInt64(val)
+}
+
func (nd *KVNode) existsCommand(cmd redcon.Command) (interface{}, error) {
val, err := nd.store.KVExists(cmd.Args[1:]...)
return val, err
}
+func (nd *KVNode) getbitCommand(conn redcon.Conn, cmd redcon.Command) {
+ if len(cmd.Args) < 3 {
+ conn.WriteError(errWrongNumberArgs.Error())
+ return
+ }
+ offset, err := strconv.ParseInt(string(cmd.Args[2]), 10, 64)
+ if err != nil {
+ conn.WriteError(err.Error())
+ return
+ }
+ val, err := nd.store.BitGetV2(cmd.Args[1], offset)
+ if err != nil {
+ conn.WriteError(err.Error())
+ return
+ }
+ conn.WriteInt64(val)
+}
+
+func getRangeArgs(cmd redcon.Command) (int64, int64, error) {
+ start, end := int64(0), int64(-1)
+ var err error
+ if len(cmd.Args) >= 4 {
+ start, err = strconv.ParseInt(string(cmd.Args[2]), 10, 64)
+ if err != nil {
+ return start, end, err
+ }
+ end, err = strconv.ParseInt(string(cmd.Args[3]), 10, 64)
+ if err != nil {
+ return start, end, err
+ }
+ }
+ return start, end, nil
+}
+
+func (nd *KVNode) bitcountCommand(conn redcon.Conn, cmd redcon.Command) {
+ if len(cmd.Args) != 2 && len(cmd.Args) != 4 {
+ conn.WriteError(errWrongNumberArgs.Error())
+ return
+ }
+ start, end, err := getRangeArgs(cmd)
+ if err != nil {
+ conn.WriteError(err.Error())
+ return
+ }
+
+ val, err := nd.store.BitCountV2(cmd.Args[1], start, end)
+ if err != nil {
+ conn.WriteError(err.Error())
+ return
+ }
+ conn.WriteInt64(val)
+}
+
func (nd *KVNode) mgetCommand(conn redcon.Conn, cmd redcon.Command) {
vals, _ := nd.store.MGet(cmd.Args[1:]...)
conn.WriteArray(len(vals))
@@ -55,36 +242,137 @@ func (nd *KVNode) pfcountCommand(conn redcon.Conn, cmd redcon.Command) {
}
}
-func (nd *KVNode) setCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- conn.WriteString("OK")
+func (nd *KVNode) setCommand(cmd redcon.Command) (interface{}, error) {
+ if len(cmd.Args) > 3 {
+ _, _, _, err := getExNxXXArgs(cmd.Args[3:])
+ if err != nil {
+ return nil, err
+ }
+ } else if len(cmd.Args) != 3 {
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ return nil, err
+ }
+ rsp, err := rebuildFirstKeyAndPropose(nd, cmd, func(cmd redcon.Command, rsp interface{}) (interface{}, error) {
+ if err, ok := rsp.(error); ok {
+ return nil, err
+ }
+ if v, ok := rsp.(int64); ok {
+ if v == int64(0) {
+ return nil, nil
+ }
+ }
+ return "OK", nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return rsp, nil
}
-func (nd *KVNode) setnxCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- if rsp, ok := v.(int64); ok {
- conn.WriteInt64(rsp)
- } else {
- conn.WriteError(errInvalidResponse.Error())
+func (nd *KVNode) setnxCommand(cmd redcon.Command) (interface{}, error) {
+ if len(cmd.Args) != 3 {
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ return nil, err
}
+ key, err := common.CutNamesapce(cmd.Args[1])
+ if err != nil {
+ return nil, err
+ }
+ ex, _ := nd.store.KVExists(key)
+ if ex == 1 {
+ // already exist
+ return int64(0), nil
+ }
+
+ rsp, err := rebuildFirstKeyAndPropose(nd, cmd, nil)
+ if err != nil {
+ return nil, err
+ }
+ return rsp, nil
}
-func (nd *KVNode) msetCommand(cmd redcon.Command, v interface{}) (interface{}, error) {
- return nil, nil
+func (nd *KVNode) setIfEQCommand(cmd redcon.Command) (interface{}, error) {
+ // set key oldvalue newvalue [ex seconds]
+ if len(cmd.Args) != 4 && len(cmd.Args) != 6 {
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ return nil, err
+ }
+ if len(cmd.Args) == 6 {
+ _, err := getExSecs(cmd.Args[4], cmd.Args[5])
+ if err != nil {
+ return nil, err
+ }
+ }
+ key, err := common.CutNamesapce(cmd.Args[1])
+ if err != nil {
+ return nil, err
+ }
+ oldv, err := nd.store.KVGet(key)
+ if err != nil {
+ return int64(0), err
+ }
+ if !bytes.Equal(oldv, cmd.Args[2]) {
+ // old value not matched
+ return int64(0), nil
+ }
+
+ rsp, err := rebuildFirstKeyAndPropose(nd, cmd, nil)
+ if err != nil {
+ return nil, err
+ }
+ return rsp, nil
}
-func (nd *KVNode) incrCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- if rsp, ok := v.(int64); ok {
- conn.WriteInt64(rsp)
- } else {
- conn.WriteError(errInvalidResponse.Error())
+func (nd *KVNode) delIfEQCommand(cmd redcon.Command) (interface{}, error) {
+ if len(cmd.Args) != 3 {
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ return nil, err
+ }
+ key, err := common.CutNamesapce(cmd.Args[1])
+ if err != nil {
+ return nil, err
+ }
+ oldv, err := nd.store.KVGet(key)
+ if err != nil {
+ return int64(0), err
+ }
+ if !bytes.Equal(oldv, cmd.Args[2]) {
+ // old value not matched
+ return int64(0), nil
+ }
+
+ rsp, err := rebuildFirstKeyAndPropose(nd, cmd, nil)
+ if err != nil {
+ return nil, err
}
+ return rsp, nil
}
-func (nd *KVNode) incrbyCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- if rsp, ok := v.(int64); ok {
- conn.WriteInt64(rsp)
- } else {
- conn.WriteError(errInvalidResponse.Error())
+func (nd *KVNode) setbitCommand(cmd redcon.Command) (interface{}, error) {
+ if len(cmd.Args) != 4 {
+ return nil, errWrongNumberArgs
+ }
+
+ offset, err := strconv.ParseInt(string(cmd.Args[2]), 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ on, err := strconv.ParseInt(string(cmd.Args[3]), 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ if offset > rockredis.MaxBitOffset || offset < 0 {
+ return nil, rockredis.ErrBitOverflow
+ }
+ if (on & ^1) != 0 {
+ return nil, fmt.Errorf("bit should be 0 or 1, got %d", on)
}
+
+ v, err := rebuildFirstKeyAndPropose(nd, cmd, nil)
+ if err != nil {
+ return nil, err
+ }
+ return v, nil
}
func (nd *KVNode) delCommand(cmd redcon.Command, v interface{}) (interface{}, error) {
@@ -95,20 +383,31 @@ func (nd *KVNode) delCommand(cmd redcon.Command, v interface{}) (interface{}, er
}
}
-func (nd *KVNode) pfaddCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- if rsp, ok := v.(int64); ok {
- conn.WriteInt64(rsp)
- } else {
- conn.WriteError(errInvalidResponse.Error())
- }
+func (kvsm *kvStoreSM) localNoOpWriteCommand(cmd redcon.Command, ts int64) (interface{}, error) {
+ return nil, nil
}
// local write command execute only on follower or on the local commit of leader
// the return value of follower is ignored, return value of local leader will be
// return to the future response.
func (kvsm *kvStoreSM) localSetCommand(cmd redcon.Command, ts int64) (interface{}, error) {
+ if len(cmd.Args) > 3 {
+ sec, createOnly, updateOnly, err := getExNxXXArgs(cmd.Args[3:])
+ if err != nil {
+ return nil, err
+ }
+ return kvsm.store.KVSetWithOpts(ts, cmd.Args[1], cmd.Args[2], sec, createOnly, updateOnly)
+ }
err := kvsm.store.KVSet(ts, cmd.Args[1], cmd.Args[2])
- return nil, err
+ return int64(1), err
+}
+
+func (kvsm *kvStoreSM) localGetSetCommand(cmd redcon.Command, ts int64) (interface{}, error) {
+ oldV, err := kvsm.store.KVGetSet(ts, cmd.Args[1], cmd.Args[2])
+ if oldV == nil {
+ return nil, err
+ }
+ return oldV, err
}
func (kvsm *kvStoreSM) localSetnxCommand(cmd redcon.Command, ts int64) (interface{}, error) {
@@ -116,6 +415,25 @@ func (kvsm *kvStoreSM) localSetnxCommand(cmd redcon.Command, ts int64) (interfac
return v, err
}
+func (kvsm *kvStoreSM) localSetIfEQCommand(cmd redcon.Command, ts int64) (interface{}, error) {
+ sec := int64(0)
+ if len(cmd.Args) == 6 {
+ var err error
+ sec, err = getExSecs(cmd.Args[4], cmd.Args[5])
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ v, err := kvsm.store.SetIfEQ(ts, cmd.Args[1], cmd.Args[2], cmd.Args[3], sec)
+ return v, err
+}
+
+func (kvsm *kvStoreSM) localDelIfEQCommand(cmd redcon.Command, ts int64) (interface{}, error) {
+ v, err := kvsm.store.DelIfEQ(ts, cmd.Args[1], cmd.Args[2])
+ return v, err
+}
+
func (kvsm *kvStoreSM) localMSetCommand(cmd redcon.Command, ts int64) (interface{}, error) {
args := cmd.Args[1:]
kvlist := make([]common.KVRecord, 0, len(args)/2)
@@ -158,3 +476,37 @@ func (kvsm *kvStoreSM) localPFAddCommand(cmd redcon.Command, ts int64) (interfac
v, err := kvsm.store.PFAdd(ts, cmd.Args[1], cmd.Args[2:]...)
return v, err
}
+
+func (kvsm *kvStoreSM) localBitSetCommand(cmd redcon.Command, ts int64) (interface{}, error) {
+ return kvsm.localBitSetV2Command(cmd, ts)
+}
+
+func (kvsm *kvStoreSM) localBitSetV2Command(cmd redcon.Command, ts int64) (interface{}, error) {
+ offset, err := strconv.ParseInt(string(cmd.Args[2]), 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ on, err := strconv.ParseInt(string(cmd.Args[3]), 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ return kvsm.store.BitSetV2(ts, cmd.Args[1], offset, int(on))
+}
+
+func (kvsm *kvStoreSM) localBitClearCommand(cmd redcon.Command, ts int64) (interface{}, error) {
+ return kvsm.store.BitClear(ts, cmd.Args[1])
+}
+
+func (kvsm *kvStoreSM) localAppendCommand(cmd redcon.Command, ts int64) (interface{}, error) {
+ ret, err := kvsm.store.Append(ts, cmd.Args[1], cmd.Args[2])
+ return ret, err
+}
+
+func (kvsm *kvStoreSM) localSetRangeCommand(cmd redcon.Command, ts int64) (interface{}, error) {
+ offset, err := strconv.ParseInt(string(cmd.Args[2]), 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ ret, err := kvsm.store.SetRange(ts, cmd.Args[1], int(offset), cmd.Args[3])
+ return ret, err
+}
diff --git a/node/keys_test.go b/node/keys_test.go
index dc5aa740..09b59d83 100644
--- a/node/keys_test.go
+++ b/node/keys_test.go
@@ -4,26 +4,34 @@ import (
"errors"
"fmt"
"io/ioutil"
+ "math/rand"
"net"
"net/http"
"net/url"
"os"
+ "strconv"
+ "sync"
"testing"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/rockredis"
- "github.com/absolute8511/ZanRedisDB/stats"
- "github.com/absolute8511/ZanRedisDB/transport/rafthttp"
"github.com/absolute8511/redcon"
"github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/rockredis"
+ "github.com/youzan/ZanRedisDB/stats"
+ "github.com/youzan/ZanRedisDB/transport/rafthttp"
)
func getTestKVNode(t *testing.T) (*KVNode, string, chan struct{}) {
+ return getTestKVNodeWith(t, false)
+}
+
+func getTestKVNodeWith(t *testing.T, mustNoLeader bool) (*KVNode, string, chan struct{}) {
tmpDir, err := ioutil.TempDir("", fmt.Sprintf("kvnode-test-%d", time.Now().UnixNano()))
assert.Nil(t, err)
t.Logf("dir:%v\n", tmpDir)
- raftAddr := "http://127.0.0.1:12345"
+ rport := rand.Int31n(1000) + 33333
+ raftAddr := "http://127.0.0.1:" + strconv.Itoa(int(rport))
var replica ReplicaInfo
replica.NodeID = 1
replica.ReplicaID = 1
@@ -50,9 +58,21 @@ func getTestKVNode(t *testing.T) (*KVNode, string, chan struct{}) {
nsConf.EngType = rockredis.EngType
nsConf.PartitionNum = 1
nsConf.Replicator = 1
+ if mustNoLeader {
+ nsConf.Replicator = 2
+ }
nsConf.RaftGroupConf.GroupID = 1000
nsConf.RaftGroupConf.SeedNodes = append(nsConf.RaftGroupConf.SeedNodes, replica)
- nsConf.ExpirationPolicy = "consistency_deletion"
+ if mustNoLeader {
+ // add a not started node to make leader can not be elected
+ raftAddr := "http://127.0.0.1:" + strconv.Itoa(int(rport+1))
+ var replica2 ReplicaInfo
+ replica2.NodeID = 2
+ replica2.ReplicaID = 2
+ replica2.RaftAddr = raftAddr
+ nsConf.RaftGroupConf.SeedNodes = append(nsConf.RaftGroupConf.SeedNodes, replica2)
+ }
+ nsConf.ExpirationPolicy = common.DefaultExpirationPolicy
mconf := &MachineConfig{
BroadcastAddr: "127.0.0.1",
@@ -76,6 +96,9 @@ func getTestKVNode(t *testing.T) (*KVNode, string, chan struct{}) {
stopC := make(chan struct{})
ln, err := common.NewStoppableListener(url.Host, stopC)
assert.Nil(t, err)
+ if ln == nil {
+ panic(err)
+ }
go func() {
(&http.Server{Handler: raftTransport.Handler()}).Serve(ln)
}()
@@ -108,7 +131,11 @@ func (c *fakeRedisConn) WriteError(msg string) { c.err = errors.New(msg) }
func (c *fakeRedisConn) WriteString(str string) { c.rsp = append(c.rsp, str) }
// WriteBulk writes bulk bytes to the client.
-func (c *fakeRedisConn) WriteBulk(bulk []byte) { c.rsp = append(c.rsp, bulk) }
+func (c *fakeRedisConn) WriteBulk(bulk []byte) {
+ tmp := make([]byte, len(bulk))
+ copy(tmp, bulk)
+ c.rsp = append(c.rsp, tmp)
+}
// WriteBulkString writes a bulk string to the client.
func (c *fakeRedisConn) WriteBulkString(bulk string) { c.rsp = append(c.rsp, bulk) }
@@ -125,7 +152,11 @@ func (c *fakeRedisConn) WriteArray(count int) { c.rsp = append(c.rsp, count) }
func (c *fakeRedisConn) WriteNull() { c.rsp = append(c.rsp, nil) }
// WriteRaw writes raw data to the client.
-func (c *fakeRedisConn) WriteRaw(data []byte) { c.rsp = append(c.rsp, data) }
+func (c *fakeRedisConn) WriteRaw(data []byte) {
+ tmp := make([]byte, len(data))
+ copy(tmp, data)
+ c.rsp = append(c.rsp, tmp)
+}
// Context returns a user-defined context
func (c *fakeRedisConn) Context() interface{} { return nil }
@@ -142,6 +173,7 @@ func (c *fakeRedisConn) ReadPipeline() []redcon.Command { return nil }
func (c *fakeRedisConn) PeekPipeline() []redcon.Command { return nil }
func (c *fakeRedisConn) NetConn() net.Conn { return nil }
+func (c *fakeRedisConn) Flush() error { return nil }
func TestKVNode_kvCommand(t *testing.T) {
nd, dataDir, stopC := getTestKVNode(t)
@@ -150,6 +182,7 @@ func TestKVNode_kvCommand(t *testing.T) {
testKey2 := []byte("default:test:2")
testKey2Value := []byte("2")
testPFKey := []byte("default:test:pf1")
+ testBitKey := []byte("default:test:bit1")
tests := []struct {
name string
args redcon.Command
@@ -158,9 +191,79 @@ func TestKVNode_kvCommand(t *testing.T) {
{"mget", buildCommand([][]byte{[]byte("mget"), testKey, testKey2})},
{"exists", buildCommand([][]byte{[]byte("exists"), testKey, testKey2})},
{"set", buildCommand([][]byte{[]byte("set"), testKey, testKeyValue})},
+ {"setex", buildCommand([][]byte{[]byte("setex"), testKey, []byte("10"), testKeyValue})},
+ {"set", buildCommand([][]byte{[]byte("set"), testKey, testKeyValue, []byte("ex"), []byte("10"), []byte("nx")})},
+ {"noopwrite", buildCommand([][]byte{[]byte("noopwrite"), testKey, testKeyValue})},
+ {"getset", buildCommand([][]byte{[]byte("getset"), testKey, testKeyValue})},
{"setnx", buildCommand([][]byte{[]byte("setnx"), testKey, testKeyValue})},
{"setnx", buildCommand([][]byte{[]byte("setnx"), testKey2, testKey2Value})},
//{"mset", buildCommand([][]byte{[]byte("mset"), testKey, testKeyValue, testKey2, testKey2Value})},
+ {"plset", buildCommand([][]byte{[]byte("mset"), testKey, testKeyValue, testKey2, testKey2Value})},
+ {"del", buildCommand([][]byte{[]byte("del"), testKey, testKey2})},
+ {"incr", buildCommand([][]byte{[]byte("incr"), testKey})},
+ {"incrby", buildCommand([][]byte{[]byte("incrby"), testKey, testKey2Value})},
+ {"get", buildCommand([][]byte{[]byte("get"), testKey})},
+ {"mget", buildCommand([][]byte{[]byte("mget"), testKey, testKey2})},
+ {"exists", buildCommand([][]byte{[]byte("exists"), testKey})},
+ {"pfadd", buildCommand([][]byte{[]byte("pfadd"), testPFKey, testKeyValue})},
+ {"pfcount", buildCommand([][]byte{[]byte("pfcount"), testPFKey})},
+ {"setbit", buildCommand([][]byte{[]byte("setbit"), testBitKey, []byte("1"), []byte("1")})},
+ {"getbit", buildCommand([][]byte{[]byte("getbit"), testBitKey, []byte("1")})},
+ {"bitcount", buildCommand([][]byte{[]byte("bitcount"), testBitKey, []byte("1"), []byte("2")})},
+ {"getrange", buildCommand([][]byte{[]byte("getrange"), testKey, []byte("1"), []byte("2")})},
+ }
+ defer os.RemoveAll(dataDir)
+ defer nd.Stop()
+ defer close(stopC)
+ c := &fakeRedisConn{}
+ defer c.Close()
+ defer c.Reset()
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c.Reset()
+ origCmd := append([]byte{}, tt.args.Raw...)
+ handler, _ := nd.router.GetCmdHandler(tt.name)
+ if handler != nil {
+ handler(c, tt.args)
+ } else {
+ whandler, _ := nd.router.GetWCmdHandler(tt.name)
+ if whandler != nil {
+ rsp, err := whandler(tt.args)
+ assert.Nil(t, err)
+ _, ok := rsp.(error)
+ assert.True(t, !ok)
+ } else {
+ handler, _, _ := nd.router.GetMergeCmdHandler(tt.name)
+ _, err := handler(tt.args)
+ assert.Nil(t, err)
+ }
+ }
+ t.Logf("handler response: %v", c.rsp)
+ assert.Nil(t, c.GetError())
+ assert.Equal(t, origCmd, tt.args.Raw)
+ })
+ }
+}
+
+func TestKVNode_kvCommandWhileNoLeader(t *testing.T) {
+ nd, dataDir, stopC := getTestKVNodeWith(t, true)
+ testKey := []byte("default:test:noleader1")
+ testKeyValue := []byte("1")
+ testKey2 := []byte("default:test:noleader2")
+ testKey2Value := []byte("2")
+ testPFKey := []byte("default:test:noleaderpf1")
+ testBitKey := []byte("default:test:noleaderbit1")
+ tests := []struct {
+ name string
+ args redcon.Command
+ }{
+ {"get", buildCommand([][]byte{[]byte("get"), testKey})},
+ {"mget", buildCommand([][]byte{[]byte("mget"), testKey, testKey2})},
+ {"exists", buildCommand([][]byte{[]byte("exists"), testKey, testKey2})},
+ {"set", buildCommand([][]byte{[]byte("set"), testKey, testKeyValue})},
+ {"getset", buildCommand([][]byte{[]byte("getset"), testKey, testKeyValue})},
+ {"setnx", buildCommand([][]byte{[]byte("setnx"), testKey, testKeyValue})},
+ {"setnx", buildCommand([][]byte{[]byte("setnx"), testKey2, testKey2Value})},
{"del", buildCommand([][]byte{[]byte("del"), testKey, testKey2})},
{"incr", buildCommand([][]byte{[]byte("incr"), testKey})},
{"incrby", buildCommand([][]byte{[]byte("incrby"), testKey, testKey2Value})},
@@ -169,21 +272,195 @@ func TestKVNode_kvCommand(t *testing.T) {
{"exists", buildCommand([][]byte{[]byte("exists"), testKey})},
{"pfadd", buildCommand([][]byte{[]byte("pfadd"), testPFKey, testKeyValue})},
{"pfcount", buildCommand([][]byte{[]byte("pfcount"), testPFKey})},
+ {"setbit", buildCommand([][]byte{[]byte("setbit"), testBitKey, []byte("1"), []byte("1")})},
+ {"getbit", buildCommand([][]byte{[]byte("getbit"), testBitKey, []byte("1")})},
+ {"bitcount", buildCommand([][]byte{[]byte("bitcount"), testBitKey, []byte("1"), []byte("2")})},
+ }
+ defer os.RemoveAll(dataDir)
+ defer nd.Stop()
+ defer close(stopC)
+ c := &fakeRedisConn{}
+ defer c.Close()
+ defer c.Reset()
+ for _, cmd := range tests {
+ c.Reset()
+ origCmd := append([]byte{}, cmd.args.Raw...)
+ handler, _ := nd.router.GetCmdHandler(cmd.name)
+ if handler != nil {
+ handler(c, cmd.args)
+ } else {
+ whandler, _ := nd.router.GetWCmdHandler(cmd.name)
+ if whandler != nil {
+ _, err := whandler(cmd.args)
+ assert.Equal(t, ErrNodeNoLeader, err)
+ } else {
+ handler, isWrite, _ := nd.router.GetMergeCmdHandler(cmd.name)
+ _, err := handler(cmd.args)
+ if isWrite {
+ assert.Equal(t, ErrNodeNoLeader, err)
+ } else {
+ assert.Nil(t, err)
+ }
+ }
+ }
+ t.Logf("handler response: %v", c.rsp)
+ assert.Nil(t, c.GetError())
+ assert.Equal(t, origCmd, cmd.args.Raw)
+ }
+}
+
+func TestKVNode_kvbatchCommand(t *testing.T) {
+ nd, dataDir, stopC := getTestKVNode(t)
+ defer os.RemoveAll(dataDir)
+ defer nd.Stop()
+ defer close(stopC)
+ var wg sync.WaitGroup
+ for i := 0; i < 50; i++ {
+ wg.Add(1)
+ go func(index int) {
+ defer wg.Done()
+ fc := &fakeRedisConn{}
+ defer fc.Close()
+ defer fc.Reset()
+ for k := 0; k < 100; k++ {
+ fc.Reset()
+ setHandler, _ := nd.router.GetWCmdHandler("set")
+ testKey := []byte(fmt.Sprintf("default:test:batch_%v_%v", index, k))
+ rsp, err := setHandler(buildCommand([][]byte{[]byte("set"), testKey, testKey}))
+ assert.Nil(t, err)
+ rsp, err = rsp.(*FutureRsp).WaitRsp()
+ assert.Nil(t, err)
+ assert.Equal(t, "OK", rsp)
+ }
+ }(i)
+ }
+ wg.Wait()
+ fc := &fakeRedisConn{}
+ defer fc.Close()
+ defer fc.Reset()
+ for i := 0; i < 50; i++ {
+ for k := 0; k < 100; k++ {
+ fc.Reset()
+ getHandler, _ := nd.router.GetCmdHandler("get")
+ testKey := []byte(fmt.Sprintf("default:test:batch_%v_%v", i, k))
+ getHandler(fc, buildCommand([][]byte{[]byte("get"), testKey}))
+ assert.Nil(t, fc.GetError())
+ assert.Equal(t, testKey, fc.rsp[0])
+ }
+ }
+}
+
+func TestKVNode_batchWithNonBatchCommand(t *testing.T) {
+ nd, dataDir, stopC := getTestKVNode(t)
+ defer os.RemoveAll(dataDir)
+ defer nd.Stop()
+ defer close(stopC)
+ var wg sync.WaitGroup
+ for i := 0; i < 50; i++ {
+ wg.Add(2)
+ go func(index int) {
+ defer wg.Done()
+ fc := &fakeRedisConn{}
+ defer fc.Close()
+ defer fc.Reset()
+ for k := 0; k < 100; k++ {
+ fc.Reset()
+ setHandler, _ := nd.router.GetWCmdHandler("set")
+ testKey := []byte(fmt.Sprintf("default:test:batchset_%v_%v", index, k))
+ rsp, err := setHandler(buildCommand([][]byte{[]byte("set"), testKey, testKey}))
+ assert.Nil(t, err)
+ rsp, err = rsp.(*FutureRsp).WaitRsp()
+ assert.Nil(t, err)
+ assert.Equal(t, "OK", rsp)
+ }
+ }(i)
+ go func(index int) {
+ defer wg.Done()
+ fc := &fakeRedisConn{}
+ defer fc.Close()
+ defer fc.Reset()
+ for k := 0; k < 100; k++ {
+ fc.Reset()
+ setHandler, _ := nd.router.GetWCmdHandler("incr")
+ testKey := []byte(fmt.Sprintf("default:test:nonbatch_%v_%v", index, k))
+ rsp, err := setHandler(buildCommand([][]byte{[]byte("incr"), testKey}))
+ assert.Nil(t, err)
+ rsp, err = rsp.(*FutureRsp).WaitRsp()
+ assert.Nil(t, err)
+ _, ok := rsp.(error)
+ assert.True(t, !ok)
+ }
+ }(i)
+ }
+ wg.Wait()
+ fc := &fakeRedisConn{}
+ defer fc.Close()
+ defer fc.Reset()
+ for i := 0; i < 50; i++ {
+ for k := 0; k < 100; k++ {
+ fc.Reset()
+ getHandler, _ := nd.router.GetCmdHandler("get")
+ testKey := []byte(fmt.Sprintf("default:test:batchset_%v_%v", i, k))
+ getHandler(fc, buildCommand([][]byte{[]byte("get"), testKey}))
+ assert.Nil(t, fc.GetError())
+ assert.Equal(t, testKey, fc.rsp[0])
+ }
+ }
+ for i := 0; i < 50; i++ {
+ for k := 0; k < 100; k++ {
+ fc.Reset()
+ getHandler, _ := nd.router.GetCmdHandler("get")
+ testKey := []byte(fmt.Sprintf("default:test:nonbatch_%v_%v", i, k))
+ getHandler(fc, buildCommand([][]byte{[]byte("get"), testKey}))
+ assert.Nil(t, fc.GetError())
+ assert.Equal(t, []byte("1"), fc.rsp[0])
+ }
+ }
+}
+
+func TestKVNode_bitV2Command(t *testing.T) {
+ nd, dataDir, stopC := getTestKVNode(t)
+ testBitKey := []byte("default:test:bitv2_1")
+ tests := []struct {
+ name string
+ args redcon.Command
+ }{
+ {"setbitv2", buildCommand([][]byte{[]byte("setbitv2"), testBitKey, []byte("1"), []byte("1")})},
+ {"getbit", buildCommand([][]byte{[]byte("getbit"), testBitKey, []byte("1")})},
+ {"bitcount", buildCommand([][]byte{[]byte("bitcount"), testBitKey, []byte("1"), []byte("2")})},
+ {"bttl", buildCommand([][]byte{[]byte("bttl"), testBitKey})},
+ {"bkeyexist", buildCommand([][]byte{[]byte("bkeyexist"), testBitKey})},
+ {"bexpire", buildCommand([][]byte{[]byte("bexpire"), testBitKey, []byte("10")})},
+ {"bpersist", buildCommand([][]byte{[]byte("bpersist"), testBitKey})},
+ {"bitclear", buildCommand([][]byte{[]byte("bitclear"), testBitKey})},
}
defer os.RemoveAll(dataDir)
defer nd.Stop()
defer close(stopC)
c := &fakeRedisConn{}
+ defer c.Close()
+ defer c.Reset()
for _, cmd := range tests {
c.Reset()
- handler, _, _ := nd.router.GetCmdHandler(cmd.name)
+ origCmd := append([]byte{}, cmd.args.Raw...)
+ handler, _ := nd.router.GetCmdHandler(cmd.name)
if handler != nil {
handler(c, cmd.args)
} else {
- handler, _, _ := nd.router.GetMergeCmdHandler(cmd.name)
- _, err := handler(cmd.args)
- assert.Nil(t, err)
+ whandler, _ := nd.router.GetWCmdHandler(cmd.name)
+ if whandler != nil {
+ rsp, err := whandler(cmd.args)
+ assert.Nil(t, err)
+ _, ok := rsp.(error)
+ assert.True(t, !ok)
+ } else {
+ handler, _, _ := nd.router.GetMergeCmdHandler(cmd.name)
+ _, err := handler(cmd.args)
+ assert.Nil(t, err)
+ }
}
+ t.Logf("handler response: %v", c.rsp)
assert.Nil(t, c.GetError())
+ assert.Equal(t, origCmd, cmd.args.Raw)
}
}
diff --git a/node/kvstore.go b/node/kvstore.go
index b5f72d95..0f03b443 100644
--- a/node/kvstore.go
+++ b/node/kvstore.go
@@ -4,8 +4,9 @@ import (
"errors"
"os"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/rockredis"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/engine"
+ "github.com/youzan/ZanRedisDB/rockredis"
)
// a key-value store
@@ -16,10 +17,12 @@ type KVStore struct {
type KVOptions struct {
DataDir string
+ KeepBackup int
EngType string
ExpirationPolicy common.ExpirationPolicy
- RockOpts rockredis.RockOptions
- SharedConfig *rockredis.SharedRockConfig
+ DataVersion common.DataVersionT
+ RockOpts engine.RockOptions
+ SharedConfig engine.SharedRockConfig
}
func NewKVStore(kvopts *KVOptions) (*KVStore, error) {
@@ -37,14 +40,16 @@ func NewKVStore(kvopts *KVOptions) (*KVStore, error) {
func (s *KVStore) openDB() error {
var err error
if s.opts.EngType == rockredis.EngType {
- cfg := rockredis.NewRockConfig()
+ cfg := rockredis.NewRockRedisDBConfig()
cfg.DataDir = s.opts.DataDir
cfg.RockOptions = s.opts.RockOpts
cfg.ExpirationPolicy = s.opts.ExpirationPolicy
+ cfg.DataVersion = s.opts.DataVersion
cfg.SharedConfig = s.opts.SharedConfig
+ cfg.KeepBackup = s.opts.KeepBackup
s.RockDB, err = rockredis.OpenRockDB(cfg)
if err != nil {
- nodeLog.Warningf("failed to open rocksdb: %v", err)
+ nodeLog.Warningf("failed to open rocksdb: %v, %v", err, cfg.DataDir)
}
} else {
return errors.New("Not recognized engine type:" + s.opts.EngType)
@@ -75,27 +80,23 @@ func (s *KVStore) Destroy() error {
return os.RemoveAll(dataPath)
} else {
if s.opts.EngType == rockredis.EngType {
- f := rockredis.GetDataDirFromBase(s.opts.DataDir)
+ f, err := engine.GetDataDirFromBase(s.opts.RockOpts.EngineType, s.opts.DataDir)
+ if err != nil {
+ return err
+ }
return os.RemoveAll(f)
}
}
return nil
}
-func (s *KVStore) CheckExpiredData(buffer common.ExpiredDataBuffer, stop chan struct{}) error {
- if s.opts.EngType == rockredis.EngType {
- return s.RockDB.CheckExpiredData(buffer, stop)
- }
- return nil
-}
-
func (s *KVStore) LocalLookup(key []byte) ([]byte, error) {
value, err := s.KVGet(key)
return value, err
}
func (s *KVStore) LocalDelete(key []byte) (int64, error) {
- return s.KVDel(key)
+ return s.DelKeys(key)
}
func (s *KVStore) LocalPut(ts int64, key []byte, value []byte) error {
@@ -130,3 +131,9 @@ func (s *KVStore) CommitBatchWrite() error {
return nil
}
}
+
+func (s *KVStore) AbortBatch() {
+ if s.opts.EngType == rockredis.EngType {
+ s.RockDB.AbortBatch()
+ }
+}
diff --git a/node/list.go b/node/list.go
index 2bd21eb9..20db0bb6 100644
--- a/node/list.go
+++ b/node/list.go
@@ -1,9 +1,11 @@
package node
import (
+ "fmt"
"strconv"
"github.com/absolute8511/redcon"
+ "github.com/youzan/ZanRedisDB/common"
)
func (nd *KVNode) lindexCommand(conn redcon.Conn, cmd redcon.Command) {
@@ -56,100 +58,65 @@ func (nd *KVNode) lrangeCommand(conn redcon.Conn, cmd redcon.Command) {
}
}
-func (nd *KVNode) lfixkeyCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- conn.WriteString("OK")
-}
-
-func (nd *KVNode) lpopCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- rsp, ok := v.([]byte)
- if !ok {
- conn.WriteError("Invalid response type")
- return
+func (nd *KVNode) lsetCommand(cmd redcon.Command) (interface{}, error) {
+ if len(cmd.Args) != 4 {
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ return nil, err
}
- // wait response
- conn.WriteBulk(rsp)
-}
-
-func (nd *KVNode) lpushCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- rsp, ok := v.(int64)
- if !ok {
- conn.WriteError("Invalid response type")
- return
+ _, err := strconv.ParseInt(string(cmd.Args[2]), 10, 64)
+ if err != nil {
+ return nil, err
}
- // wait response
- conn.WriteInt64(rsp)
+ rsp, err := rebuildFirstKeyAndPropose(nd, cmd, checkOKRsp)
+ if err != nil {
+ return nil, err
+ }
+ return rsp, nil
}
-func (nd *KVNode) lsetCommand(conn redcon.Conn, cmd redcon.Command) {
- if len(cmd.Args) != 4 {
- conn.WriteError("ERR wrong number of arguments for '" + string(cmd.Args[0]) + "' command")
- return
- }
- _, err := strconv.ParseInt(string(cmd.Args[2]), 10, 64)
+// only check if ok to pop, lpop/rpop
+func (nd *KVNode) preCheckListLength(key []byte) (bool, interface{}, error) {
+ // check if empty list to avoid raft
+ n, err := nd.store.LLen(key)
if err != nil {
- conn.WriteError("Invalid index: " + err.Error())
- return
+ return false, nil, err
}
- _, _, ok := rebuildFirstKeyAndPropose(nd, conn, cmd)
- if !ok {
- return
+ // check if empty set
+ if n == 0 {
+ return false, nil, nil
}
- // wait response
- conn.WriteString("OK")
+ return true, nil, nil
}
-func (nd *KVNode) ltrimCommand(conn redcon.Conn, cmd redcon.Command) {
+func (nd *KVNode) ltrimCommand(cmd redcon.Command) (interface{}, error) {
if len(cmd.Args) != 4 {
- conn.WriteError("ERR wrong number of arguments for '" + string(cmd.Args[0]) + "' command")
- return
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ return nil, err
}
_, err := strconv.ParseInt(string(cmd.Args[2]), 10, 64)
if err != nil {
- conn.WriteError("Invalid start index: " + err.Error())
- return
+ return nil, err
}
_, err = strconv.ParseInt(string(cmd.Args[3]), 10, 64)
if err != nil {
- conn.WriteError("Invalid end index: " + err.Error())
- return
+ return nil, err
}
-
- _, _, ok := rebuildFirstKeyAndPropose(nd, conn, cmd)
- if !ok {
- return
+ key, err := common.CutNamesapce(cmd.Args[1])
+ if err != nil {
+ return nil, err
}
- // wait response
- conn.WriteString("OK")
-}
-
-func (nd *KVNode) rpopCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- rsp, ok := v.([]byte)
- if !ok {
- conn.WriteError("Invalid response type")
- return
+ needContinue, _, err := nd.preCheckListLength(key)
+ if err != nil {
+ return nil, err
}
- // wait response
- conn.WriteBulk(rsp)
-}
-
-func (nd *KVNode) rpushCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- rsp, ok := v.(int64)
- if !ok {
- conn.WriteError("Invalid response type")
- return
+ if !needContinue {
+ return checkOKRsp(cmd, nil)
}
- // wait response
- conn.WriteInt64(rsp)
-}
-
-func (nd *KVNode) lclearCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- rsp, ok := v.(int64)
- if !ok {
- conn.WriteError("Invalid response type")
- return
+ rsp, err := rebuildFirstKeyAndPropose(nd, cmd, checkOKRsp)
+ if err != nil {
+ return nil, err
}
- // wait response
- conn.WriteInt64(rsp)
+ return rsp, nil
}
// local write command execute only on follower or on the local commit of leader
@@ -199,13 +166,13 @@ func (kvsm *kvStoreSM) localRpushCommand(cmd redcon.Command, ts int64) (interfac
}
func (kvsm *kvStoreSM) localLclearCommand(cmd redcon.Command, ts int64) (interface{}, error) {
- return kvsm.store.LClear(cmd.Args[1])
+ return kvsm.store.LClear(ts, cmd.Args[1])
}
func (kvsm *kvStoreSM) localLMClearCommand(cmd redcon.Command, ts int64) (interface{}, error) {
var count int64
for _, lkey := range cmd.Args[1:] {
- if _, err := kvsm.store.LClear(lkey); err != nil {
+ if _, err := kvsm.store.LClear(ts, lkey); err != nil {
return count, err
} else {
count++
diff --git a/node/list_test.go b/node/list_test.go
index 29bf1fad..00c17241 100644
--- a/node/list_test.go
+++ b/node/list_test.go
@@ -33,6 +33,10 @@ func TestKVNode_listCommand(t *testing.T) {
{"lfixkey", buildCommand([][]byte{[]byte("lfixkey"), testKey})},
{"rpop", buildCommand([][]byte{[]byte("rpop"), testKey})},
{"rpush", buildCommand([][]byte{[]byte("rpush"), testKey, testKeyValue})},
+ {"lttl", buildCommand([][]byte{[]byte("lttl"), testKey})},
+ {"lkeyexist", buildCommand([][]byte{[]byte("lkeyexist"), testKey})},
+ {"lexpire", buildCommand([][]byte{[]byte("lexpire"), testKey, []byte("10")})},
+ {"lpersist", buildCommand([][]byte{[]byte("lpersist"), testKey})},
{"lclear", buildCommand([][]byte{[]byte("lclear"), testKey})},
}
defer os.RemoveAll(dataDir)
@@ -41,8 +45,18 @@ func TestKVNode_listCommand(t *testing.T) {
c := &fakeRedisConn{}
for _, cmd := range tests {
c.Reset()
- handler, _, _ := nd.router.GetCmdHandler(cmd.name)
- handler(c, cmd.args)
- assert.Nil(t, c.GetError())
+ origCmd := append([]byte{}, cmd.args.Raw...)
+ handler, ok := nd.router.GetCmdHandler(cmd.name)
+ if ok {
+ handler(c, cmd.args)
+ assert.Nil(t, c.GetError())
+ } else {
+ whandler, _ := nd.router.GetWCmdHandler(cmd.name)
+ rsp, err := whandler(cmd.args)
+ assert.Nil(t, err)
+ _, ok := rsp.(error)
+ assert.True(t, !ok)
+ }
+ assert.Equal(t, origCmd, cmd.args.Raw)
}
}
diff --git a/node/log_sender.go b/node/log_sender.go
index 014bb5f2..06b5a2c7 100644
--- a/node/log_sender.go
+++ b/node/log_sender.go
@@ -8,10 +8,10 @@ import (
"sync"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
- "github.com/absolute8511/ZanRedisDB/syncerpb"
- "github.com/absolute8511/go-zanredisdb"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/syncerpb"
+ "github.com/youzan/go-zanredisdb"
"google.golang.org/grpc"
)
@@ -67,15 +67,13 @@ func (s *RemoteLogSender) getZanCluster() *zanredisdb.Cluster {
if s.remoteClusterAddr == "" || strings.HasPrefix(s.remoteClusterAddr, "test://") {
return nil
}
- conf := &zanredisdb.Conf{
- DialTimeout: rpcTimeout,
- ReadTimeout: rpcTimeout,
- WriteTimeout: rpcTimeout,
- TendInterval: 5,
- Namespace: s.ns,
- }
+ conf := zanredisdb.NewDefaultConf()
+ conf.DialTimeout = rpcTimeout
+ conf.ReadTimeout = rpcTimeout
+ conf.WriteTimeout = rpcTimeout
+ conf.Namespace = s.ns
conf.LookupList = append(conf.LookupList, s.remoteClusterAddr)
- s.zanCluster = zanredisdb.NewCluster(conf)
+ s.zanCluster = zanredisdb.NewCluster(conf, nil)
return s.zanCluster
}
@@ -157,9 +155,9 @@ func (s *RemoteLogSender) getAllAddressesForPart() ([]string, error) {
return addrs, nil
}
-func (s *RemoteLogSender) doSendOnce(r []*BatchInternalRaftRequest) error {
+func (s *RemoteLogSender) doSendOnce(in syncerpb.RaftReqs) error {
if s.remoteClusterAddr == "" {
- nodeLog.Infof("sending log with no remote: %v", r)
+ nodeLog.Infof("sending log with no remote: %v", in.String())
return nil
}
c, addr, err := s.getClient()
@@ -167,26 +165,12 @@ func (s *RemoteLogSender) doSendOnce(r []*BatchInternalRaftRequest) error {
nodeLog.Infof("sending(%v) log failed to get grpc client: %v", addr, err)
return errors.New("failed to get grpc client")
}
- raftLogs := make([]*syncerpb.RaftLogData, len(r))
- for i, e := range r {
- var rld syncerpb.RaftLogData
- raftLogs[i] = &rld
- raftLogs[i].Type = syncerpb.EntryNormalRaw
- raftLogs[i].Data, _ = e.Marshal()
- raftLogs[i].Term = e.OrigTerm
- raftLogs[i].Index = e.OrigIndex
- raftLogs[i].RaftTimestamp = e.Timestamp
- raftLogs[i].RaftGroupName = s.grpName
- raftLogs[i].ClusterName = s.localCluster
- }
-
- in := &syncerpb.RaftReqs{RaftLog: raftLogs}
if nodeLog.Level() > common.LOG_DETAIL {
nodeLog.Debugf("sending(%v) log : %v", addr, in.String())
}
ctx, cancel := context.WithTimeout(context.Background(), sendLogTimeout)
defer cancel()
- rpcErr, err := c.ApplyRaftReqs(ctx, in, grpc.MaxCallSendMsgSize(256<<20))
+ rpcErr, err := c.ApplyRaftReqs(ctx, &in, grpc.MaxCallSendMsgSize(256<<20))
if err != nil {
nodeLog.Infof("sending(%v) log failed: %v", addr, err.Error())
return err
@@ -264,7 +248,7 @@ func (s *RemoteLogSender) notifyApplySnapWithOption(skip bool, raftSnapshot raft
return err
}
if rsp != nil && rsp.ErrCode != 0 && rsp.ErrCode != http.StatusOK {
- nodeLog.Infof("notify apply snapshot failed: %v, %v", addr, rsp)
+ nodeLog.Infof("notify apply snapshot failed: %v, %v, %v", addr, rsp, raftSnapshot.Metadata.String())
return errors.New(rsp.String())
}
return nil
@@ -296,15 +280,59 @@ func (s *RemoteLogSender) getApplySnapStatus(raftSnapshot raftpb.Snapshot, addr
if rsp == nil {
return &applyStatus, errors.New("nil snap status rsp")
}
- nodeLog.Infof("apply snapshot status: %v, %v", addr, rsp.String())
+ nodeLog.Infof("apply snapshot status: %v, %v, %v", addr, rsp.String(), raftSnapshot.Metadata.String())
applyStatus = *rsp
return &applyStatus, nil
}
+func (s *RemoteLogSender) waitTransferSnapStatus(raftSnapshot raftpb.Snapshot,
+ syncAddr string, syncPath string, stop chan struct{}) error {
+ for {
+ s.notifyTransferSnap(raftSnapshot, syncAddr, syncPath)
+ tm := time.NewTimer(time.Second * 5)
+ select {
+ case <-stop:
+ tm.Stop()
+ return common.ErrStopped
+ case <-tm.C:
+ }
+ tm.Stop()
+ addrs, err := s.getAllAddressesForPart()
+ if err != nil {
+ return err
+ }
+ allTransferring := true
+ allReady := true
+ for _, addr := range addrs {
+ applyStatus, err := s.getApplySnapStatus(raftSnapshot, addr)
+ if err != nil {
+ return err
+ }
+ if applyStatus.Status != syncerpb.ApplySuccess {
+ allReady = false
+ }
+ if applyStatus.Status == syncerpb.ApplyWaitingBegin ||
+ applyStatus.Status == syncerpb.ApplyMissing {
+ allTransferring = false
+ break
+ }
+ if applyStatus.Status == syncerpb.ApplyFailed {
+ nodeLog.Infof("node %v failed to transfer snapshot : %v", addr, applyStatus)
+ return errors.New("some node failed to transfer snapshot")
+ }
+ }
+ if allTransferring || allReady {
+ break
+ }
+ }
+ return nil
+}
+
func (s *RemoteLogSender) waitApplySnapStatus(raftSnapshot raftpb.Snapshot, stop chan struct{}) error {
// first, query and wait all replicas to finish snapshot transfer
// if all done, notify apply the transferred snapshot and wait all done
// then wait all apply done.
+ lastNotifyApply := time.Now()
for {
select {
case <-stop:
@@ -318,7 +346,8 @@ func (s *RemoteLogSender) waitApplySnapStatus(raftSnapshot raftpb.Snapshot, stop
// wait all became ApplyTransferSuccess or ApplySuccess
allReady := true
allTransferReady := true
- needWait := false
+ needWaitTransfer := false
+ needWaitApply := false
for _, addr := range addrs {
applyStatus, err := s.getApplySnapStatus(raftSnapshot, addr)
if err != nil {
@@ -327,23 +356,38 @@ func (s *RemoteLogSender) waitApplySnapStatus(raftSnapshot raftpb.Snapshot, stop
if applyStatus.Status != syncerpb.ApplySuccess {
allReady = false
}
- if applyStatus.Status != syncerpb.ApplySuccess && applyStatus.Status != syncerpb.ApplyTransferSuccess {
+ if applyStatus.Status != syncerpb.ApplySuccess &&
+ applyStatus.Status != syncerpb.ApplyTransferSuccess &&
+ applyStatus.Status != syncerpb.ApplyWaiting {
allTransferReady = false
}
- if applyStatus.Status == syncerpb.ApplyWaiting || applyStatus.Status == syncerpb.ApplyWaitingTransfer ||
+ if applyStatus.Status == syncerpb.ApplyWaitingTransfer ||
applyStatus.Status == syncerpb.ApplyUnknown {
- needWait = true
+ needWaitTransfer = true
+ }
+ if applyStatus.Status == syncerpb.ApplyWaiting ||
+ applyStatus.Status == syncerpb.ApplyUnknown {
+ needWaitApply = true
}
if applyStatus.Status == syncerpb.ApplyFailed {
nodeLog.Infof("node %v failed to apply snapshot : %v", addr, applyStatus)
return errors.New("some node failed to apply snapshot")
}
+ if applyStatus.Status == syncerpb.ApplyMissing {
+ nodeLog.Infof("node %v failed to apply snapshot : %v", addr, applyStatus)
+ return errors.New("some node failed to apply snapshot")
+ }
}
- if needWait {
+ if needWaitTransfer || needWaitApply {
select {
case <-stop:
return common.ErrStopped
- case <-time.After(time.Second):
+ case <-time.After(time.Second * 10):
+ if needWaitApply && allTransferReady && time.Since(lastNotifyApply) > time.Minute*5 {
+ // the proposal for apply snapshot may be lost, we need send it again to begin apply
+ s.notifyApplySnap(raftSnapshot)
+ lastNotifyApply = time.Now()
+ }
continue
}
}
@@ -352,6 +396,7 @@ func (s *RemoteLogSender) waitApplySnapStatus(raftSnapshot raftpb.Snapshot, stop
}
if allTransferReady {
s.notifyApplySnap(raftSnapshot)
+ lastNotifyApply = time.Now()
time.Sleep(time.Second)
} else {
return errors.New("some node failed to apply snapshot")
@@ -425,16 +470,16 @@ func (s *RemoteLogSender) getRemoteSyncedRaft(stop chan struct{}) (SyncedState,
return state, err
}
-func (s *RemoteLogSender) sendRaftLog(r []*BatchInternalRaftRequest, stop chan struct{}) error {
- if len(r) == 0 {
+func (s *RemoteLogSender) sendRaftLog(r syncerpb.RaftReqs, stop chan struct{}) error {
+ if len(r.RaftLog) == 0 {
return nil
}
- first := r[0]
+ first := r.RaftLog[0]
err := sendRpcAndRetry(func() error {
err := s.doSendOnce(r)
if err != nil {
nodeLog.Infof("failed to send raft log : %v, at %v-%v",
- err.Error(), first.OrigTerm, first.OrigIndex)
+ err.Error(), first.Term, first.Index)
}
return err
}, "sendRaftLog", stop)
diff --git a/node/multi.go b/node/multi.go
index a345ca24..d0fd8f8b 100644
--- a/node/multi.go
+++ b/node/multi.go
@@ -3,8 +3,8 @@ package node
import (
"errors"
- "github.com/absolute8511/ZanRedisDB/common"
"github.com/absolute8511/redcon"
+ "github.com/youzan/ZanRedisDB/common"
)
func (nd *KVNode) plsetCommand(cmd redcon.Command, rsp interface{}) (interface{}, error) {
diff --git a/node/namespace.go b/node/namespace.go
index 36ea05ad..101f44e3 100644
--- a/node/namespace.go
+++ b/node/namespace.go
@@ -3,24 +3,28 @@ package node
import (
"encoding/json"
"errors"
+ "fmt"
"io/ioutil"
"os"
"path"
+ "path/filepath"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/rockredis"
- "github.com/absolute8511/ZanRedisDB/transport/rafthttp"
- "github.com/spaolacci/murmur3"
+ "github.com/twmb/murmur3"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/engine"
+ "github.com/youzan/ZanRedisDB/metric"
+ "github.com/youzan/ZanRedisDB/transport/rafthttp"
"golang.org/x/net/context"
)
var (
ErrNamespaceAlreadyExist = errors.New("namespace already exist")
+ ErrNamespaceAlreadyStarting = errors.New("namespace is starting")
ErrRaftIDMismatch = errors.New("raft id mismatch")
ErrRaftConfMismatch = errors.New("raft config mismatch")
errTimeoutLeaderTransfer = errors.New("raft leader transfer failed")
@@ -32,14 +36,17 @@ var (
ErrRaftGroupNotReady = errors.New("ERR_CLUSTER_CHANGED: raft group not ready")
ErrProposalCanceled = errors.New("ERR_CLUSTER_CHANGED: raft proposal " + context.Canceled.Error())
errNamespaceConfInvalid = errors.New("namespace config is invalid")
+ ErrLocalMagicCodeConflict = errors.New("namespace magic code conflict on local")
)
var perfLevel int32
type NamespaceNode struct {
- Node *KVNode
- conf *NamespaceConfig
- ready int32
+ Node *KVNode
+ conf *NamespaceConfig
+ ready int32
+ magicCode int64
+ nsDataDir string
}
func (nn *NamespaceNode) IsReady() bool {
@@ -55,12 +62,83 @@ func (nn *NamespaceNode) SwitchForLearnerLeader(isLearnerLeader bool) {
}
func (nn *NamespaceNode) SetDynamicInfo(dync NamespaceDynamicConf) {
+ nn.Node.SetDynamicInfo(dync)
}
-func (nn *NamespaceNode) SetMagicCode(magic int64) error {
+func getMagicCodeFileName(dataPath string, grp string) string {
+ return path.Join(dataPath, "magic_"+grp)
+}
+
+func saveMagicCode(fileName string, magicCode int64) error {
+ var f *os.File
+ var err error
+
+ f, err = os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, common.FILE_PERM)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ _, err = fmt.Fprintf(f, "%d\n",
+ magicCode)
+ if err != nil {
+ return err
+ }
+ f.Sync()
return nil
}
+func LoadMagicCode(fileName string) (int64, error) {
+ var f *os.File
+ var err error
+
+ f, err = os.OpenFile(fileName, os.O_RDONLY, common.FILE_PERM)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return 0, nil
+ }
+ return 0, err
+ }
+ defer f.Close()
+
+ var code int64
+ _, err = fmt.Fscanf(f, "%d\n",
+ &code)
+ if err != nil {
+ return 0, err
+ }
+ return code, nil
+}
+
+func (nn *NamespaceNode) getMagicCode() int64 {
+ return atomic.LoadInt64(&nn.magicCode)
+}
+
+func (nn *NamespaceNode) SetMagicCode(magic int64) error {
+ fileName := getMagicCodeFileName(nn.nsDataDir, nn.conf.Name)
+ if nn.getMagicCode() == 0 {
+ // try read old magic code from file
+ mc, err := LoadMagicCode(fileName)
+ if err != nil {
+ return err
+ }
+ atomic.CompareAndSwapInt64(&nn.magicCode, 0, mc)
+ }
+ // check if already magic code is different, if conflicted should report error
+ if nn.getMagicCode() != 0 && nn.getMagicCode() != magic {
+ nodeLog.Warningf("set magic code to %v conflict on local: %v", magic, nn.getMagicCode())
+ return ErrLocalMagicCodeConflict
+ }
+ if nn.getMagicCode() == magic {
+ return nil
+ }
+ changed := atomic.CompareAndSwapInt64(&nn.magicCode, 0, magic)
+ if !changed {
+ return fmt.Errorf("set magic code to %v conflict on local: %v", magic, nn.getMagicCode())
+ }
+ return saveMagicCode(fileName, magic)
+}
+
func (nn *NamespaceNode) SetDataFixState(needFix bool) {
}
@@ -120,7 +198,16 @@ func (nn *NamespaceNode) GetMembers() []*common.MemberInfo {
}
func (nn *NamespaceNode) Start(forceStandaloneCluster bool) error {
+ if !atomic.CompareAndSwapInt32(&nn.ready, 0, -1) {
+ if atomic.LoadInt32(&nn.ready) == 1 {
+ // already started
+ return nil
+ }
+ // starting
+ return ErrNamespaceAlreadyStarting
+ }
if err := nn.Node.Start(forceStandaloneCluster); err != nil {
+ atomic.StoreInt32(&nn.ready, 0)
return err
}
atomic.StoreInt32(&nn.ready, 1)
@@ -128,30 +215,23 @@ func (nn *NamespaceNode) Start(forceStandaloneCluster bool) error {
}
func (nn *NamespaceNode) TransferMyLeader(to uint64, toRaftID uint64) error {
- waitTimeout := time.Duration(nn.Node.machineConfig.ElectionTick) * time.Duration(nn.Node.machineConfig.TickMs) * time.Millisecond
- ctx, cancel := context.WithTimeout(context.Background(), waitTimeout)
- defer cancel()
- oldLeader := nn.Node.rn.Lead()
- nn.Node.rn.node.TransferLeadership(ctx, oldLeader, toRaftID)
- for nn.Node.rn.Lead() != toRaftID {
- select {
- case <-ctx.Done():
- return errTimeoutLeaderTransfer
- case <-time.After(200 * time.Millisecond):
- }
+ err := nn.Node.TransferLeadership(toRaftID)
+ if err != nil {
+ return err
}
- nodeLog.Infof("finished transfer from %v to %v:%v", oldLeader, to, toRaftID)
+ nodeLog.Infof("finished transfer to %v:%v", to, toRaftID)
return nil
}
type NamespaceMeta struct {
PartitionNum int
+ walEng engine.KVEngine
}
type NamespaceMgr struct {
mutex sync.RWMutex
kvNodes map[string]*NamespaceNode
- nsMetas map[string]NamespaceMeta
+ nsMetas map[string]*NamespaceMeta
groups map[uint64]string
machineConf *MachineConfig
raftTransport *rafthttp.Transport
@@ -166,7 +246,7 @@ func NewNamespaceMgr(transport *rafthttp.Transport, conf *MachineConfig) *Namesp
ns := &NamespaceMgr{
kvNodes: make(map[string]*NamespaceNode),
groups: make(map[uint64]string),
- nsMetas: make(map[string]NamespaceMeta),
+ nsMetas: make(map[string]*NamespaceMeta),
raftTransport: transport,
machineConf: conf,
newLeaderChan: make(chan string, 2048),
@@ -177,9 +257,68 @@ func NewNamespaceMgr(transport *rafthttp.Transport, conf *MachineConfig) *Namesp
} else if regID > 0 {
ns.machineConf.NodeID = regID
}
+ common.RegisterConfChangedHandler(common.ConfSlowLimiterSwitch, ns.HandleSlowLimiterSwitchChanged)
+ RegisterSlowConfChanged()
+
return ns
}
+func (nsm *NamespaceMgr) SetDBOptions(key string, value string) error {
+ key = strings.ToLower(key)
+ switch key {
+ case "rate_limiter_bytes_per_sec":
+ bytes, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return err
+ }
+ nsm.SetRateLimiterBytesPerSec(bytes)
+ case "max_background_compactions":
+ maxCompact, err := strconv.Atoi(value)
+ if err != nil {
+ return err
+ }
+ err = nsm.SetMaxBackgroundOptions(maxCompact, 0)
+ if err != nil {
+ return err
+ }
+ case "max_background_jobs":
+ maxBackJobs, err := strconv.Atoi(value)
+ if err != nil {
+ return err
+ }
+ err = nsm.SetMaxBackgroundOptions(0, maxBackJobs)
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("db options %v not support", key)
+ }
+ return nil
+}
+
+func (nsm *NamespaceMgr) SetMaxBackgroundOptions(maxCompact int, maxBackJobs int) error {
+ var err error
+ nsm.mutex.RLock()
+ defer nsm.mutex.RUnlock()
+ for _, n := range nsm.kvNodes {
+ if !n.IsReady() {
+ continue
+ }
+ err = n.Node.SetMaxBackgroundOptions(maxCompact, maxBackJobs)
+ if err != nil {
+ break
+ }
+ }
+ return err
+}
+
+func (nsm *NamespaceMgr) SetRateLimiterBytesPerSec(bytesPerSec int64) {
+ if nsm.machineConf.RocksDBSharedConfig == nil {
+ return
+ }
+ nsm.machineConf.RocksDBSharedConfig.ChangeLimiter(bytesPerSec)
+}
+
func (nsm *NamespaceMgr) SetIClusterInfo(clusterInfo common.IClusterInfo) {
nsm.clusterInfo = clusterInfo
}
@@ -206,6 +345,36 @@ func (nsm *NamespaceMgr) SaveMachineRegID(regID uint64) error {
common.FILE_PERM)
}
+func (nsm *NamespaceMgr) GetDataRoot() string {
+ return nsm.machineConf.DataRootDir
+}
+
+func (nsm *NamespaceMgr) CheckLocalNamespaces() map[string]int64 {
+ // scan local namespace and magic number
+ scanDir := nsm.machineConf.DataRootDir
+ // scan all local undeleted ns-part dirs and read the magic code
+ dirList, err := filepath.Glob(path.Join(scanDir, "*-*"))
+ if err != nil {
+ nodeLog.Infof("found local data root %s error: %v", scanDir, err.Error())
+ return nil
+ }
+ magicList := make(map[string]int64)
+ for _, dir := range dirList {
+ nodeLog.Infof("found local dir in data root: %v", dir)
+ grpName := path.Base(dir)
+ ns, _ := common.GetNamespaceAndPartition(grpName)
+ if ns != "" {
+ code, err := LoadMagicCode(getMagicCodeFileName(dir, grpName))
+ if err != nil {
+ continue
+ }
+ nodeLog.Infof("add valid namespace magic : %v, %v", grpName, code)
+ magicList[grpName] = code
+ }
+ }
+ return magicList
+}
+
func (nsm *NamespaceMgr) Start() {
nsm.stopC = make(chan struct{})
atomic.StoreInt32(&nsm.stopping, 0)
@@ -243,10 +412,32 @@ func (nsm *NamespaceMgr) Stop() {
n.Close()
}
nsm.wg.Wait()
- nodeLog.Infof("namespace manager stopped")
+ start := time.Now()
+ for {
+ // wait stopped callback done
+ time.Sleep(time.Second)
+ if len(nsm.GetNamespaces()) == 0 {
+ break
+ }
+ if time.Since(start) > time.Second*10 {
+ nodeLog.Warningf("some namespace not stopped while waiting timeout")
+ break
+ }
+ }
if nsm.machineConf.RocksDBSharedConfig != nil {
nsm.machineConf.RocksDBSharedConfig.Destroy()
}
+ nsm.mutex.RLock()
+ defer nsm.mutex.RUnlock()
+ for _, meta := range nsm.nsMetas {
+ if meta.walEng != nil {
+ meta.walEng.CloseAll()
+ }
+ }
+ if nsm.machineConf.WALRocksDBSharedConfig != nil {
+ nsm.machineConf.WALRocksDBSharedConfig.Destroy()
+ }
+ nodeLog.Infof("namespace manager stopped")
}
func (nsm *NamespaceMgr) IsAllRecoveryDone() bool {
@@ -272,6 +463,59 @@ func (nsm *NamespaceMgr) GetNamespaces() map[string]*NamespaceNode {
return tmp
}
+func initRaftStorageEng(cfg *engine.RockEngConfig) engine.KVEngine {
+ nodeLog.Infof("using rocksdb raft storage dir:%v", cfg.DataDir)
+ cfg.DisableWAL = true
+ cfg.DisableMergeCounter = true
+ cfg.EnableTableCounter = false
+ cfg.OptimizeFiltersForHits = true
+ // basically, we no need compress wal since it will be cleaned after snapshot
+ cfg.MinLevelToCompress = 5
+ // TODO: check memtable_insert_with_hint_prefix_extractor and DeleteRange bug
+ if cfg.InsertHintFixedLen == 0 {
+ cfg.InsertHintFixedLen = 10
+ }
+ cfg.AutoCompacted = true
+ db, err := engine.NewKVEng(cfg)
+ if err == nil {
+ err = db.OpenEng()
+ if err == nil {
+ db.SetOptsForLogStorage()
+ go db.CompactAllRange()
+ return db
+ }
+ }
+ nodeLog.Warningf("failed to open rocks raft db: %v, fallback to memory entries", err.Error())
+ return nil
+}
+
+func (nsm *NamespaceMgr) getWALEng(ns string, dataDir string, id uint64, gid uint32, meta *NamespaceMeta) engine.KVEngine {
+ if !nsm.machineConf.UseRocksWAL || meta == nil {
+ return nil
+ }
+ rsDir := path.Join(dataDir, "rswal", ns)
+
+ if nsm.machineConf.SharedRocksWAL {
+ if meta.walEng != nil {
+ return meta.walEng
+ }
+ } else {
+ sharding := fmt.Sprintf("%v-%v", gid, id)
+ rsDir = path.Join(rsDir, sharding)
+ }
+
+ walEngCfg := engine.NewRockConfig()
+ walEngCfg.DataDir = rsDir
+ walEngCfg.RockOptions = nsm.machineConf.WALRocksDBOpts
+ walEngCfg.SharedConfig = nsm.machineConf.WALRocksDBSharedConfig
+ engine.FillDefaultOptions(&walEngCfg.RockOptions)
+ eng := initRaftStorageEng(walEngCfg)
+ if nsm.machineConf.SharedRocksWAL {
+ meta.walEng = eng
+ }
+ return eng
+}
+
func (nsm *NamespaceMgr) InitNamespaceNode(conf *NamespaceConfig, raftID uint64, join bool) (*NamespaceNode, error) {
if atomic.LoadInt32(&nsm.stopping) == 1 {
return nil, errStopping
@@ -283,14 +527,28 @@ func (nsm *NamespaceMgr) InitNamespaceNode(conf *NamespaceConfig, raftID uint64,
return nil, err
}
+ dv, err := common.StringToDataVersionType(conf.DataVersion)
+ if err != nil {
+ nodeLog.Infof("namespace %v invalid data version: %v", conf.Name, conf.DataVersion)
+ return nil, err
+ }
+ if expPolicy == common.WaitCompact && dv == common.DefaultDataVer {
+ return nil, errors.New("can not use compact ttl for old data version")
+ }
+ if dv != common.DefaultDataVer {
+ nodeLog.Infof("namespace %v data version: %v, expire policy: %v", conf.Name, conf.DataVersion, expPolicy)
+ }
+
kvOpts := &KVOptions{
DataDir: path.Join(nsm.machineConf.DataRootDir, conf.Name),
+ KeepBackup: nsm.machineConf.KeepBackup,
EngType: conf.EngType,
RockOpts: nsm.machineConf.RocksDBOpts,
ExpirationPolicy: expPolicy,
+ DataVersion: dv,
SharedConfig: nsm.machineConf.RocksDBSharedConfig,
}
- rockredis.FillDefaultOptions(&kvOpts.RockOpts)
+ engine.FillDefaultOptions(&kvOpts.RockOpts)
if conf.PartitionNum <= 0 {
return nil, errNamespaceConfInvalid
@@ -327,25 +585,50 @@ func (nsm *NamespaceMgr) InitNamespaceNode(conf *NamespaceConfig, raftID uint64,
RaftPeers: clusterNodes,
SnapCount: conf.SnapCount,
SnapCatchup: conf.SnapCatchup,
- Replicator: conf.Replicator,
+ Replicator: int32(conf.Replicator),
OptimizedFsync: conf.OptimizedFsync,
- KeepWAL: nsm.machineConf.KeepWAL,
+ nodeConfig: nsm.machineConf,
}
- kv, err := NewKVNode(kvOpts, nsm.machineConf, raftConf, nsm.raftTransport,
- join, nsm.onNamespaceDeleted(raftConf.GroupID, conf.Name),
+ d, _ = json.MarshalIndent(&raftConf, "", " ")
+ nodeLog.Infof("namespace raft config: %v", string(d))
+
+ var meta *NamespaceMeta
+ if oldMeta, ok := nsm.nsMetas[conf.BaseName]; !ok {
+ meta = &NamespaceMeta{
+ PartitionNum: conf.PartitionNum,
+ }
+ nsm.nsMetas[conf.BaseName] = meta
+ nodeLog.Infof("namespace meta init: %v", conf)
+ } else {
+ if oldMeta.PartitionNum != conf.PartitionNum {
+ nodeLog.Errorf("namespace meta mismatch: %v, old: %v", conf, oldMeta)
+ // update the meta if mismatch, it may happen if create the same namespace with different
+ // config for old deleted namespace
+ if oldMeta.walEng != nil {
+ oldMeta.walEng.CloseAll()
+ }
+ meta = &NamespaceMeta{
+ PartitionNum: conf.PartitionNum,
+ }
+ nsm.nsMetas[conf.BaseName] = meta
+ } else {
+ meta = oldMeta
+ }
+ }
+ rs := nsm.getWALEng(conf.BaseName, nsm.machineConf.DataRootDir, raftConf.ID, uint32(raftConf.GroupID), meta)
+ raftConf.SetEng(rs)
+
+ kv, err := NewKVNode(kvOpts, raftConf, nsm.raftTransport,
+ join, nsm.onNamespaceStopped(raftConf.GroupID, conf.Name),
nsm.clusterInfo, nsm.newLeaderChan)
if err != nil {
return nil, err
}
- if _, ok := nsm.nsMetas[conf.BaseName]; !ok {
- nsm.nsMetas[conf.BaseName] = NamespaceMeta{
- PartitionNum: conf.PartitionNum,
- }
- }
n := &NamespaceNode{
- Node: kv,
- conf: conf,
+ Node: kv,
+ conf: conf,
+ nsDataDir: kvOpts.DataDir,
}
nsm.kvNodes[conf.Name] = n
@@ -353,11 +636,14 @@ func (nsm *NamespaceMgr) InitNamespaceNode(conf *NamespaceConfig, raftID uint64,
return n, nil
}
+func HashedKey(pk []byte) int {
+ return int(murmur3.Sum32(pk))
+}
func GetHashedPartitionID(pk []byte, pnum int) int {
- return int(murmur3.Sum32(pk)) % pnum
+ return HashedKey(pk) % pnum
}
-func (nsm *NamespaceMgr) GetNamespaceNodeWithPrimaryKey(nsBaseName string, pk []byte) (*NamespaceNode, error) {
+func (nsm *NamespaceMgr) GetNamespaceNodeWithPrimaryKeySum(nsBaseName string, pk []byte, pkSum int) (*NamespaceNode, error) {
nsm.mutex.RLock()
defer nsm.mutex.RUnlock()
v, ok := nsm.nsMetas[nsBaseName]
@@ -365,7 +651,7 @@ func (nsm *NamespaceMgr) GetNamespaceNodeWithPrimaryKey(nsBaseName string, pk []
nodeLog.Infof("namespace %v meta not found", nsBaseName)
return nil, ErrNamespaceNotFound
}
- pid := GetHashedPartitionID(pk, v.PartitionNum)
+ pid := pkSum % v.PartitionNum
fullName := common.GetNsDesp(nsBaseName, pid)
n, ok := nsm.kvNodes[fullName]
if !ok {
@@ -378,6 +664,11 @@ func (nsm *NamespaceMgr) GetNamespaceNodeWithPrimaryKey(nsBaseName string, pk []
return n, nil
}
+func (nsm *NamespaceMgr) GetNamespaceNodeWithPrimaryKey(nsBaseName string, pk []byte) (*NamespaceNode, error) {
+ pkSum := HashedKey(pk)
+ return nsm.GetNamespaceNodeWithPrimaryKeySum(nsBaseName, pk, pkSum)
+}
+
func (nsm *NamespaceMgr) GetNamespaceNodes(nsBaseName string, leaderOnly bool) (map[string]*NamespaceNode, error) {
nsNodes := make(map[string]*NamespaceNode)
@@ -431,6 +722,23 @@ func (nsm *NamespaceMgr) GetNamespaceNodeFromGID(gid uint64) *NamespaceNode {
return kv
}
+func (nsm *NamespaceMgr) GetWALDBStats(leaderOnly bool) map[string]map[string]interface{} {
+ nsm.mutex.RLock()
+ nsStats := make(map[string]map[string]interface{}, len(nsm.kvNodes))
+ for k, n := range nsm.kvNodes {
+ if !n.IsReady() {
+ continue
+ }
+ if leaderOnly && !n.Node.IsLead() {
+ continue
+ }
+ dbStats := n.Node.GetWALDBInternalStats()
+ nsStats[k] = dbStats
+ }
+ nsm.mutex.RUnlock()
+ return nsStats
+}
+
func (nsm *NamespaceMgr) GetDBStats(leaderOnly bool) map[string]string {
nsm.mutex.RLock()
nsStats := make(map[string]string, len(nsm.kvNodes))
@@ -448,10 +756,10 @@ func (nsm *NamespaceMgr) GetDBStats(leaderOnly bool) map[string]string {
return nsStats
}
-func (nsm *NamespaceMgr) GetLogSyncStatsInSyncer() ([]common.LogSyncStats, []common.LogSyncStats) {
+func (nsm *NamespaceMgr) GetLogSyncStatsInSyncer() ([]metric.LogSyncStats, []metric.LogSyncStats) {
nsm.mutex.RLock()
- nsRecvStats := make([]common.LogSyncStats, 0, len(nsm.kvNodes))
- nsSyncStats := make([]common.LogSyncStats, 0, len(nsm.kvNodes))
+ nsRecvStats := make([]metric.LogSyncStats, 0, len(nsm.kvNodes))
+ nsSyncStats := make([]metric.LogSyncStats, 0, len(nsm.kvNodes))
for k, n := range nsm.kvNodes {
if !n.IsReady() {
continue
@@ -469,12 +777,12 @@ func (nsm *NamespaceMgr) GetLogSyncStatsInSyncer() ([]common.LogSyncStats, []com
return nsRecvStats, nsSyncStats
}
-func (nsm *NamespaceMgr) GetLogSyncStats(leaderOnly bool, srcClusterName string) []common.LogSyncStats {
+func (nsm *NamespaceMgr) GetLogSyncStats(leaderOnly bool, srcClusterName string) []metric.LogSyncStats {
if srcClusterName == "" {
return nil
}
nsm.mutex.RLock()
- nsStats := make([]common.LogSyncStats, 0, len(nsm.kvNodes))
+ nsStats := make([]metric.LogSyncStats, 0, len(nsm.kvNodes))
for k, n := range nsm.kvNodes {
if !n.IsReady() {
continue
@@ -486,7 +794,7 @@ func (nsm *NamespaceMgr) GetLogSyncStats(leaderOnly bool, srcClusterName string)
if term == 0 && index == 0 {
continue
}
- var s common.LogSyncStats
+ var s metric.LogSyncStats
s.Name = k
s.IsLeader = n.Node.IsLead()
s.Term = term
@@ -498,9 +806,9 @@ func (nsm *NamespaceMgr) GetLogSyncStats(leaderOnly bool, srcClusterName string)
return nsStats
}
-func (nsm *NamespaceMgr) GetStats(leaderOnly bool) []common.NamespaceStats {
+func (nsm *NamespaceMgr) GetStats(leaderOnly bool, table string, needTableDetail bool) []metric.NamespaceStats {
nsm.mutex.RLock()
- nsStats := make([]common.NamespaceStats, 0, len(nsm.kvNodes))
+ nsStats := make([]metric.NamespaceStats, 0, len(nsm.kvNodes))
for k, n := range nsm.kvNodes {
if !n.IsReady() {
continue
@@ -508,7 +816,7 @@ func (nsm *NamespaceMgr) GetStats(leaderOnly bool) []common.NamespaceStats {
if leaderOnly && !n.Node.IsLead() {
continue
}
- ns := n.Node.GetStats()
+ ns := n.Node.GetStats(table, needTableDetail)
ns.Name = k
ns.EngType = n.conf.EngType
ns.IsLeader = n.Node.IsLead()
@@ -518,7 +826,7 @@ func (nsm *NamespaceMgr) GetStats(leaderOnly bool) []common.NamespaceStats {
return nsStats
}
-func (nsm *NamespaceMgr) OptimizeDB(ns string, table string) {
+func (nsm *NamespaceMgr) getNsNodeList(ns string) []*NamespaceNode {
nsm.mutex.RLock()
nodeList := make([]*NamespaceNode, 0, len(nsm.kvNodes))
for k, n := range nsm.kvNodes {
@@ -529,6 +837,59 @@ func (nsm *NamespaceMgr) OptimizeDB(ns string, table string) {
nodeList = append(nodeList, n)
}
nsm.mutex.RUnlock()
+ return nodeList
+}
+
+func (nsm *NamespaceMgr) BackupDB(ns string, checkLast bool) {
+ nodeList := nsm.getNsNodeList(ns)
+ for _, n := range nodeList {
+ if atomic.LoadInt32(&nsm.stopping) == 1 {
+ return
+ }
+ if n.IsReady() {
+ n.Node.BackupDB(checkLast)
+ }
+ }
+}
+
+func (nsm *NamespaceMgr) EnableTopn(ns string, on bool) {
+ nodeList := nsm.getNsNodeList(ns)
+ for _, n := range nodeList {
+ if atomic.LoadInt32(&nsm.stopping) == 1 {
+ return
+ }
+ if n.IsReady() {
+ n.Node.sm.EnableTopn(on)
+ }
+ }
+}
+
+func (nsm *NamespaceMgr) ClearTopn(ns string) {
+ nodeList := nsm.getNsNodeList(ns)
+ for _, n := range nodeList {
+ if atomic.LoadInt32(&nsm.stopping) == 1 {
+ return
+ }
+ if n.IsReady() {
+ n.Node.sm.ClearTopn()
+ }
+ }
+}
+
+func (nsm *NamespaceMgr) DisableOptimizeDB(disable bool) {
+ nodeList := nsm.getNsNodeList("")
+ for _, n := range nodeList {
+ if atomic.LoadInt32(&nsm.stopping) == 1 {
+ return
+ }
+ if n.IsReady() {
+ n.Node.DisableOptimizeDB(disable)
+ }
+ }
+}
+
+func (nsm *NamespaceMgr) OptimizeDB(ns string, table string) {
+ nodeList := nsm.getNsNodeList(ns)
for _, n := range nodeList {
if atomic.LoadInt32(&nsm.stopping) == 1 {
return
@@ -539,17 +900,35 @@ func (nsm *NamespaceMgr) OptimizeDB(ns string, table string) {
}
}
-func (nsm *NamespaceMgr) DeleteRange(ns string, dtr DeleteTableRange) error {
- nsm.mutex.RLock()
- nodeList := make([]*NamespaceNode, 0, len(nsm.kvNodes))
- for k, n := range nsm.kvNodes {
- baseName, _ := common.GetNamespaceAndPartition(k)
- if ns != baseName {
- continue
+func (nsm *NamespaceMgr) OptimizeDBExpire(ns string) {
+ nodeList := nsm.getNsNodeList(ns)
+ for _, n := range nodeList {
+ if atomic.LoadInt32(&nsm.stopping) == 1 {
+ return
+ }
+ if n.IsReady() {
+ n.Node.OptimizeDBExpire()
}
- nodeList = append(nodeList, n)
}
- nsm.mutex.RUnlock()
+}
+
+func (nsm *NamespaceMgr) OptimizeDBAnyRange(ns string, r CompactAPIRange) {
+ nodeList := nsm.getNsNodeList(ns)
+ for _, n := range nodeList {
+ if atomic.LoadInt32(&nsm.stopping) == 1 {
+ return
+ }
+ if n.IsReady() {
+ n.Node.OptimizeDBAnyRange(r)
+ }
+ }
+}
+
+func (nsm *NamespaceMgr) DeleteRange(ns string, dtr DeleteTableRange) error {
+ if ns == "" {
+ return errors.New("namespace can not be empty")
+ }
+ nodeList := nsm.getNsNodeList(ns)
for _, n := range nodeList {
if atomic.LoadInt32(&nsm.stopping) == 1 {
return common.ErrStopped
@@ -564,17 +943,75 @@ func (nsm *NamespaceMgr) DeleteRange(ns string, dtr DeleteTableRange) error {
return nil
}
-func (nsm *NamespaceMgr) onNamespaceDeleted(gid uint64, ns string) func() {
+// this is used to clean some shared namespace data bewteen all partitions, only remove them until
+// all the partitions are removed and the whole namespace is deleted from cluster
+func (nsm *NamespaceMgr) CleanSharedNsFiles(baseNS string) error {
+ nsm.mutex.Lock()
+ defer nsm.mutex.Unlock()
+ // check if all parts of this namespace is deleted, if so we should remove meta
+ for fullName, _ := range nsm.kvNodes {
+ n, _ := common.GetNamespaceAndPartition(fullName)
+ if n == baseNS {
+ err := fmt.Errorf("some node is still running while clean namespace files: %s, %s", baseNS, fullName)
+ nodeLog.Info(err.Error())
+ return err
+ }
+ }
+ meta, ok := nsm.nsMetas[baseNS]
+ if ok && meta.walEng != nil {
+ nodeLog.Infof("all partitions of namespace %v stopped, removing shared data", baseNS)
+ meta.walEng.CloseAll()
+ }
+ delete(nsm.nsMetas, baseNS)
+ if nsm.machineConf.UseRocksWAL && baseNS != "" {
+ // clean the rock wal files
+ rsDir := path.Join(nsm.machineConf.DataRootDir, "rswal", baseNS)
+ ts := strconv.Itoa(int(time.Now().UnixNano()))
+ err := os.Rename(rsDir,
+ rsDir+"-deleted-"+ts)
+ if err != nil {
+ nodeLog.Warningf("remove shared data failed: %s", err.Error())
+ return err
+ }
+ nodeLog.Infof("remove shared wal data for namespace: %s", baseNS)
+ }
+ return nil
+}
+
+func (nsm *NamespaceMgr) onNamespaceStopped(gid uint64, ns string) func() {
return func() {
nsm.mutex.Lock()
+ defer nsm.mutex.Unlock()
_, ok := nsm.kvNodes[ns]
- if ok {
- nodeLog.Infof("namespace deleted: %v-%v", ns, gid)
- nsm.kvNodes[ns] = nil
- delete(nsm.kvNodes, ns)
- delete(nsm.groups, gid)
+ if !ok {
+ return
+ }
+ // note we should not delete any data here, since it is not known we just stopped or it is destroyed
+ nodeLog.Infof("namespace stopped: %v-%v", ns, gid)
+ nsm.kvNodes[ns] = nil
+ delete(nsm.kvNodes, ns)
+ delete(nsm.groups, gid)
+ baseNS, _ := common.GetNamespaceAndPartition(ns)
+ meta, ok := nsm.nsMetas[baseNS]
+ if !ok {
+ return
+ }
+ found := false
+ // check if all parts of this namespace is deleted, if so we should remove meta
+ for fullName, _ := range nsm.kvNodes {
+ n, _ := common.GetNamespaceAndPartition(fullName)
+ if n == baseNS {
+ found = true
+ break
+ }
+ }
+ if !found {
+ nodeLog.Infof("all partitions of namespace %v stopped, removing meta", baseNS)
+ if meta.walEng != nil {
+ meta.walEng.CloseAll()
+ }
+ delete(nsm.nsMetas, baseNS)
}
- nsm.mutex.Unlock()
}
}
@@ -603,15 +1040,6 @@ func (nsm *NamespaceMgr) processRaftTick() {
}
}
-// TODO:
-func (nsm *NamespaceMgr) SetNamespaceMagicCode(node *NamespaceNode, magic int64) error {
- return nil
-}
-
-func (nsm *NamespaceMgr) CheckMagicCode(ns string, magic int64, fix bool) error {
- return nil
-}
-
func (nsm *NamespaceMgr) checkNamespaceRaftLeader() {
ticker := time.NewTicker(time.Second * 15)
defer ticker.Stop()
@@ -691,16 +1119,22 @@ func (nsm *NamespaceMgr) clearUnusedRaftPeer() {
}
}
-func SetPerfLevel(level int) {
- atomic.StoreInt32(&perfLevel, int32(level))
- rockredis.SetPerfLevel(level)
-}
-
-func IsPerfEnabled() bool {
- lv := GetPerfLevel()
- return rockredis.IsPerfEnabledLevel(lv)
-}
-
-func GetPerfLevel() int {
- return int(atomic.LoadInt32(&perfLevel))
+func (nsm *NamespaceMgr) HandleSlowLimiterSwitchChanged(v interface{}) {
+ nodeLog.Infof("config for slow limiter changed to : %v", v)
+ on, ok := v.(int)
+ if !ok {
+ return
+ }
+ nsm.mutex.RLock()
+ for _, n := range nsm.kvNodes {
+ if !n.IsReady() || n.Node == nil {
+ continue
+ }
+ if on > 0 {
+ n.Node.slowLimiter.TurnOn()
+ } else {
+ n.Node.slowLimiter.TurnOff()
+ }
+ }
+ nsm.mutex.RUnlock()
}
diff --git a/node/namespace_test.go b/node/namespace_test.go
new file mode 100644
index 00000000..c888a39a
--- /dev/null
+++ b/node/namespace_test.go
@@ -0,0 +1,265 @@
+package node
+
+import (
+ "fmt"
+ io "io"
+ "io/ioutil"
+ "os"
+ "strconv"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/raft"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/rockredis"
+ "github.com/youzan/ZanRedisDB/stats"
+ "github.com/youzan/ZanRedisDB/transport/rafthttp"
+ "golang.org/x/net/context"
+)
+
+type fakeRaft struct {
+}
+
+func (fr *fakeRaft) SaveDBFrom(r io.Reader, m raftpb.Message) (int64, error) {
+ return 0, nil
+}
+func (fr *fakeRaft) Process(ctx context.Context, m raftpb.Message) error {
+ return nil
+}
+func (fr *fakeRaft) IsPeerRemoved(id uint64) bool {
+ return false
+}
+func (fr *fakeRaft) ReportUnreachable(id uint64, group raftpb.Group) {
+}
+func (fr *fakeRaft) ReportSnapshot(id uint64, group raftpb.Group, status raft.SnapshotStatus) {
+}
+
+func getTestNamespaceMgr(t *testing.T) (*NamespaceMgr, string) {
+ tmpDir, err := ioutil.TempDir("", fmt.Sprintf("rocksdb-test-%d", time.Now().UnixNano()))
+ if err != nil {
+ t.Fatal(err)
+ }
+ mconf := &MachineConfig{
+ DataRootDir: tmpDir,
+ TickMs: 100,
+ ElectionTick: 10,
+ UseRocksWAL: true,
+ SharedRocksWAL: true,
+ }
+ ts := &stats.TransportStats{}
+ ts.Initialize()
+ raftTransport := &rafthttp.Transport{
+ DialTimeout: time.Second * 5,
+ ClusterID: "",
+ Raft: &fakeRaft{},
+ Snapshotter: &fakeRaft{},
+ TrStats: ts,
+ PeersStats: stats.NewPeersStats(),
+ ErrorC: nil,
+ }
+ nsMgr := NewNamespaceMgr(raftTransport, mconf)
+ return nsMgr, tmpDir
+}
+
+func TestInitNodeWithSharedRocksdb(t *testing.T) {
+ nsMgr, tmpDir := getTestNamespaceMgr(t)
+ defer os.RemoveAll(tmpDir)
+ nsMgr.Start()
+ defer nsMgr.Stop()
+
+ var replica ReplicaInfo
+ replica.NodeID = 1
+ replica.ReplicaID = 1
+ replica.RaftAddr = "127.0.0.1:1111"
+
+ nsConf := NewNSConfig()
+ nsConf.Name = "default-0"
+ nsConf.BaseName = "default"
+ nsConf.EngType = rockredis.EngType
+ nsConf.PartitionNum = 3
+ nsConf.Replicator = 1
+ nsConf.RaftGroupConf.GroupID = 1000
+ nsConf.RaftGroupConf.SeedNodes = append(nsConf.RaftGroupConf.SeedNodes, replica)
+
+ n0, err := nsMgr.InitNamespaceNode(nsConf, 1, false)
+ assert.Nil(t, err)
+
+ nsConf1 := NewNSConfig()
+ nsConf1.Name = "default-1"
+ nsConf1.BaseName = "default"
+ nsConf1.EngType = rockredis.EngType
+ nsConf1.PartitionNum = 3
+ nsConf1.Replicator = 1
+ nsConf1.RaftGroupConf.GroupID = 1000
+ nsConf1.RaftGroupConf.SeedNodes = append(nsConf.RaftGroupConf.SeedNodes, replica)
+ n1, err := nsMgr.InitNamespaceNode(nsConf1, 1, false)
+ assert.Nil(t, err)
+
+ nsConf2 := NewNSConfig()
+ nsConf2.Name = "default-2"
+ nsConf2.BaseName = "default"
+ nsConf2.EngType = rockredis.EngType
+ nsConf2.PartitionNum = 3
+ nsConf2.Replicator = 1
+ nsConf2.RaftGroupConf.GroupID = 1000
+ nsConf2.RaftGroupConf.SeedNodes = append(nsConf.RaftGroupConf.SeedNodes, replica)
+ n2, err := nsMgr.InitNamespaceNode(nsConf2, 1, false)
+ assert.Nil(t, err)
+ // test if raft storage is rocksdb
+ n0Rock, ok := n0.Node.rn.raftStorage.(*raft.RocksStorage)
+ assert.True(t, ok)
+ n1Rock, ok := n1.Node.rn.raftStorage.(*raft.RocksStorage)
+ assert.True(t, ok)
+ n2Rock, ok := n2.Node.rn.raftStorage.(*raft.RocksStorage)
+ assert.True(t, ok)
+ assert.Equal(t, n0Rock.Eng(), n1Rock.Eng())
+ assert.Equal(t, n0Rock.Eng(), n2Rock.Eng())
+}
+
+func TestLoadLocalMagicList(t *testing.T) {
+ nsMgr, tmpDir := getTestNamespaceMgr(t)
+ defer os.RemoveAll(tmpDir)
+ nsMgr.Start()
+ defer nsMgr.Stop()
+
+ var replica ReplicaInfo
+ replica.NodeID = 1
+ replica.ReplicaID = 1
+ replica.RaftAddr = "127.0.0.1:1111"
+
+ nsConf := NewNSConfig()
+ nsConf.Name = "default-0"
+ nsConf.BaseName = "default"
+ nsConf.EngType = rockredis.EngType
+ nsConf.PartitionNum = 3
+ nsConf.Replicator = 1
+ nsConf.RaftGroupConf.GroupID = 1000
+ nsConf.RaftGroupConf.SeedNodes = append(nsConf.RaftGroupConf.SeedNodes, replica)
+
+ n0, err := nsMgr.InitNamespaceNode(nsConf, 1, false)
+ assert.Nil(t, err)
+ magicCode := 12345
+ err = n0.SetMagicCode(int64(magicCode))
+ assert.Nil(t, err)
+ // set same should be ok
+ err = n0.SetMagicCode(int64(magicCode))
+ assert.Nil(t, err)
+ // set different should be error
+ err = n0.SetMagicCode(int64(magicCode + 1))
+ assert.NotNil(t, err)
+
+ nsConf1 := NewNSConfig()
+ nsConf1.Name = "default-1"
+ nsConf1.BaseName = "default"
+ nsConf1.EngType = rockredis.EngType
+ nsConf1.PartitionNum = 3
+ nsConf1.Replicator = 1
+ nsConf1.RaftGroupConf.GroupID = 1001
+ nsConf1.RaftGroupConf.SeedNodes = append(nsConf.RaftGroupConf.SeedNodes, replica)
+ n1, err := nsMgr.InitNamespaceNode(nsConf1, 1, false)
+ assert.Nil(t, err)
+ err = n1.SetMagicCode(int64(magicCode))
+ assert.Nil(t, err)
+
+ nsConf2 := NewNSConfig()
+ nsConf2.Name = "default-2"
+ nsConf2.BaseName = "default"
+ nsConf2.EngType = rockredis.EngType
+ nsConf2.PartitionNum = 3
+ nsConf2.Replicator = 1
+ nsConf2.RaftGroupConf.GroupID = 1002
+ nsConf2.RaftGroupConf.SeedNodes = append(nsConf.RaftGroupConf.SeedNodes, replica)
+ n2, err := nsMgr.InitNamespaceNode(nsConf2, 1, false)
+ assert.Nil(t, err)
+ err = n2.SetMagicCode(int64(magicCode))
+ assert.Nil(t, err)
+
+ magicList := nsMgr.CheckLocalNamespaces()
+ assert.Equal(t, 3, len(magicList))
+ for ns, code := range magicList {
+ baseNs, part := common.GetNamespaceAndPartition(ns)
+ assert.Equal(t, "default", baseNs)
+ assert.True(t, part < 3, part)
+ assert.True(t, part >= 0, part)
+ assert.Equal(t, int64(magicCode), code)
+ }
+}
+
+type testMapGet struct {
+ mutex sync.RWMutex
+ kvNodes map[string]*NamespaceNode
+ nsMetas map[string]*NamespaceMeta
+ kvNodes2 sync.Map
+ nsMetas2 sync.Map
+}
+
+func (m *testMapGet) getNode(nsBaseName string, pk []byte) *NamespaceNode {
+ m.mutex.RLock()
+ defer m.mutex.RUnlock()
+ v, ok := m.nsMetas[nsBaseName]
+ if !ok {
+ return nil
+ }
+ pid := GetHashedPartitionID(pk, v.PartitionNum)
+ fullName := common.GetNsDesp(nsBaseName, pid)
+ n, ok := m.kvNodes[fullName]
+ if !ok {
+ return nil
+ }
+ return n
+}
+
+func (m *testMapGet) getNode2(nsBaseName string, pk []byte) *NamespaceNode {
+ v, ok := m.nsMetas2.Load(nsBaseName)
+ if !ok {
+ return nil
+ }
+ vv := v.(*NamespaceMeta)
+ pid := GetHashedPartitionID(pk, vv.PartitionNum)
+ fullName := common.GetNsDesp(nsBaseName, pid)
+ n, ok := m.kvNodes2.Load(fullName)
+ if !ok {
+ return nil
+ }
+ return n.(*NamespaceNode)
+}
+
+func BenchmarkNamespaceMgr_GetNamespaceNodeWithPrimaryKey(b *testing.B) {
+ nsm := &testMapGet{
+ kvNodes: make(map[string]*NamespaceNode),
+ nsMetas: make(map[string]*NamespaceMeta),
+ }
+ nsm.nsMetas["test"] = &NamespaceMeta{
+ PartitionNum: 16,
+ }
+ nsm.nsMetas2.Store("test", &NamespaceMeta{
+ PartitionNum: 16,
+ })
+ for i := 0; i < 16; i++ {
+ k := fmt.Sprintf("test-%v", i)
+ nsm.kvNodes[k] = &NamespaceNode{
+ ready: 1,
+ }
+ nsm.kvNodes2.Store(k, &NamespaceNode{
+ ready: 1,
+ })
+ }
+ b.ResetTimer()
+ var wg sync.WaitGroup
+ for g := 0; g < 1000; g++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i := 0; i < b.N; i++ {
+ got := nsm.getNode2("test", []byte(strconv.Itoa(i)))
+ if got == nil {
+ panic("failed get node")
+ }
+ }
+ }()
+ }
+ wg.Wait()
+}
diff --git a/node/node.go b/node/node.go
index 736ee8bc..9a76824e 100644
--- a/node/node.go
+++ b/node/node.go
@@ -7,9 +7,9 @@ import (
"errors"
"fmt"
"io"
+ math "math"
"os"
"path"
- "runtime"
"strconv"
"sync"
"sync/atomic"
@@ -17,28 +17,63 @@ import (
"golang.org/x/net/context"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/pkg/wait"
- "github.com/absolute8511/ZanRedisDB/raft"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
- "github.com/absolute8511/ZanRedisDB/rockredis"
- "github.com/absolute8511/ZanRedisDB/transport/rafthttp"
+ ps "github.com/prometheus/client_golang/prometheus"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/metric"
+ "github.com/youzan/ZanRedisDB/pkg/wait"
+ "github.com/youzan/ZanRedisDB/raft"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/rockredis"
+ "github.com/youzan/ZanRedisDB/settings"
+ "github.com/youzan/ZanRedisDB/transport/rafthttp"
)
+var enableSnapTransferTest = false
+var enableSnapSaveTest = false
+var enableSnapApplyTest = false
+var enableSnapApplyBlockingTest = false
+var snapApplyBlockingC = make(chan time.Duration, 1)
+var enableSnapApplyRestoreStorageTest = false
+var UseRedisV2 = false
+
+func EnableSnapForTest(transfer bool, save bool, apply bool, restore bool) {
+ enableSnapTransferTest = transfer
+ enableSnapSaveTest = save
+ enableSnapApplyTest = apply
+ enableSnapApplyRestoreStorageTest = restore
+}
+
+func EnableSnapBlockingForTest(b bool) {
+ enableSnapApplyBlockingTest = b
+}
+
+func PutSnapBlockingTime(d time.Duration) {
+ snapApplyBlockingC <- d
+}
+
var (
- errInvalidResponse = errors.New("Invalid response type")
- errSyntaxError = errors.New("syntax error")
- errUnknownData = errors.New("unknown request data type")
- errTooMuchBatchSize = errors.New("the batch size exceed the limit")
- errRaftNotReadyForWrite = errors.New("ERR_CLUSTER_CHANGED: the raft is not ready for write")
+ errInvalidResponse = errors.New("Invalid response type")
+ errSyntaxError = errors.New("syntax error")
+ errUnknownData = errors.New("unknown request data type")
+ errTooMuchBatchSize = errors.New("the batch size exceed the limit")
+ errRaftNotReadyForWrite = errors.New("ERR_CLUSTER_CHANGED: the raft is not ready for write")
+ errWrongNumberArgs = errors.New("ERR wrong number of arguments for redis command")
+ ErrReadIndexTimeout = errors.New("wait read index timeout")
+ ErrNotLeader = errors.New("not raft leader")
+ ErrTransferLeaderSelfErr = errors.New("transfer leader to self not allowed")
)
const (
- RedisReq int8 = 0
- CustomReq int8 = 1
- SchemaChangeReq int8 = 2
- proposeTimeout = time.Second * 4
- proposeQueueLen = 500
+ RedisReq int8 = 0
+ CustomReq int8 = 1
+ SchemaChangeReq int8 = 2
+ RedisV2Req int8 = 3
+ proposeTimeout = time.Second * 4
+ raftSlow = time.Millisecond * 200
+ maxPoolIDLen = 256
+ waitPoolSize = 6
+ minPoolIDLen = 4
+ checkApplyFallbehind = 1000
)
const (
@@ -50,6 +85,12 @@ const (
ProposeOp_DeleteTable int = 6
)
+type CompactAPIRange struct {
+ StartFrom []byte `json:"start_from,omitempty"`
+ EndTo []byte `json:"end_to,omitempty"`
+ Dryrun bool `json:"dryrun,omitempty"`
+}
+
type DeleteTableRange struct {
Table string `json:"table,omitempty"`
StartFrom []byte `json:"start_from,omitempty"`
@@ -57,6 +98,9 @@ type DeleteTableRange struct {
// to avoid drop all table data, this is needed to delete all data in table
DeleteAll bool `json:"delete_all,omitempty"`
Dryrun bool `json:"dryrun,omitempty"`
+ // flag to indicate should this be replicated to the remote cluster
+ // to avoid delete too much by accident
+ NoReplayToRemoteCluster bool `json:"noreplay_to_remote_cluster"`
}
func (dtr DeleteTableRange) CheckValid() error {
@@ -78,9 +122,76 @@ type nodeProgress struct {
appliedi uint64
}
-type internalReq struct {
- reqData InternalRaftRequest
- done chan struct{}
+type RequestResultCode int
+
+const (
+ ReqComplete RequestResultCode = iota
+ ReqCancelled
+ ReqTimeouted
+)
+
+type waitReqHeaders struct {
+ wr wait.WaitResult
+ done chan struct{}
+ reqs BatchInternalRaftRequest
+ buf *bytes.Buffer
+ pool *sync.Pool
+}
+
+func (wrh *waitReqHeaders) release(reuseBuf bool) {
+ if wrh != nil {
+ wrh.wr = nil
+ if wrh.pool != nil {
+ wrh.reqs.Reqs = wrh.reqs.Reqs[:0]
+ if reuseBuf {
+ if wrh.buf != nil {
+ wrh.buf.Reset()
+ }
+ } else {
+ wrh.buf = &bytes.Buffer{}
+ }
+ wrh.pool.Put(wrh)
+ }
+ }
+}
+
+type waitReqPoolArray []*sync.Pool
+
+func newWaitReqPoolArray() waitReqPoolArray {
+ wa := make(waitReqPoolArray, waitPoolSize)
+ for i := 0; i < len(wa); i++ {
+ waitReqPool := &sync.Pool{}
+ l := minPoolIDLen * int(math.Pow(float64(2), float64(i)))
+ _ = l
+ waitReqPool.New = func() interface{} {
+ obj := &waitReqHeaders{}
+ obj.reqs.Reqs = make([]InternalRaftRequest, 0, 1)
+ obj.done = make(chan struct{}, 1)
+ obj.buf = &bytes.Buffer{}
+ obj.pool = waitReqPool
+ return obj
+ }
+ wa[i] = waitReqPool
+ }
+ return wa
+}
+
+func (wa waitReqPoolArray) getWaitReq(idLen int) *waitReqHeaders {
+ if idLen > maxPoolIDLen {
+ obj := &waitReqHeaders{}
+ obj.reqs.Reqs = make([]InternalRaftRequest, 0, idLen)
+ obj.done = make(chan struct{}, 1)
+ obj.buf = &bytes.Buffer{}
+ return obj
+ }
+ index := 0
+ for i := 0; i < waitPoolSize; i++ {
+ index = i
+ if idLen <= minPoolIDLen*int(math.Pow(float64(2), float64(i))) {
+ break
+ }
+ }
+ return wa[index].Get().(*waitReqHeaders)
}
type customProposeData struct {
@@ -95,7 +206,7 @@ type customProposeData struct {
// a key-value node backed by raft
type KVNode struct {
- reqProposeC chan *internalReq
+ readyC chan struct{}
rn *raftNode
store *KVStore
sm StateMachine
@@ -104,17 +215,26 @@ type KVNode struct {
stopDone chan struct{}
w wait.Wait
router *common.CmdRouter
- deleteCb func()
- clusterWriteStats common.WriteStats
+ stopCb func()
+ clusterWriteStats metric.WriteStats
ns string
machineConfig *MachineConfig
wg sync.WaitGroup
commitC <-chan applyInfo
- committedIndex uint64
+ appliedIndex uint64
+ lastSnapIndex uint64
clusterInfo common.IClusterInfo
- expireHandler *ExpireHandler
expirationPolicy common.ExpirationPolicy
remoteSyncedStates *remoteSyncedStateMgr
+ applyWait wait.WaitTime
+ // used for read index
+ readMu sync.RWMutex
+ readWaitC chan struct{}
+ readNotifier *notifier
+ wrPools waitReqPoolArray
+ slowLimiter *SlowLimiter
+ lastFailedSnapIndex uint64
+ applyingSnapshot int32
}
type KVSnapInfo struct {
@@ -139,52 +259,80 @@ func (si *KVSnapInfo) GetData() ([]byte, error) {
return d, nil
}
-func NewKVNode(kvopts *KVOptions, machineConfig *MachineConfig, config *RaftConfig,
- transport *rafthttp.Transport, join bool, deleteCb func(),
+func NewKVNode(kvopts *KVOptions, config *RaftConfig,
+ transport *rafthttp.Transport, join bool, stopCb func(),
clusterInfo common.IClusterInfo, newLeaderChan chan string) (*KVNode, error) {
config.WALDir = path.Join(config.DataDir, fmt.Sprintf("wal-%d", config.ID))
config.SnapDir = path.Join(config.DataDir, fmt.Sprintf("snap-%d", config.ID))
- config.nodeConfig = machineConfig
stopChan := make(chan struct{})
w := wait.New()
- sm, err := NewStateMachine(kvopts, *machineConfig, config.ID, config.GroupName, clusterInfo, w)
+ sl := NewSlowLimiter(config.GroupName)
+ sm, err := NewStateMachine(kvopts, *config.nodeConfig, config.ID, config.GroupName, clusterInfo, w, sl)
if err != nil {
return nil, err
}
s := &KVNode{
- reqProposeC: make(chan *internalReq, proposeQueueLen),
+ readyC: make(chan struct{}, 1),
stopChan: stopChan,
stopDone: make(chan struct{}),
store: nil,
sm: sm,
w: w,
router: common.NewCmdRouter(),
- deleteCb: deleteCb,
+ stopCb: stopCb,
ns: config.GroupName,
- machineConfig: machineConfig,
+ machineConfig: config.nodeConfig,
expirationPolicy: kvopts.ExpirationPolicy,
remoteSyncedStates: newRemoteSyncedStateMgr(),
+ applyWait: wait.NewTimeList(),
+ readWaitC: make(chan struct{}, 1),
+ readNotifier: newNotifier(),
+ wrPools: newWaitReqPoolArray(),
+ slowLimiter: sl,
}
+
if kvsm, ok := sm.(*kvStoreSM); ok {
s.store = kvsm.store
}
s.clusterInfo = clusterInfo
- s.expireHandler = NewExpireHandler(s)
s.registerHandler()
+ var rs raft.IExtRaftStorage
+ if config.nodeConfig.UseRocksWAL && config.rockEng != nil {
+ rs = raft.NewRocksStorage(
+ config.ID,
+ uint32(config.GroupID),
+ config.nodeConfig.SharedRocksWAL,
+ config.rockEng)
+ } else {
+ rs = raft.NewRealMemoryStorage()
+ }
commitC, raftNode, err := newRaftNode(config, transport,
- join, s, newLeaderChan)
+ join, s, rs, newLeaderChan)
if err != nil {
return nil, err
}
+ raftNode.slowLimiter = s.slowLimiter
s.rn = raftNode
s.commitC = commitC
return s, nil
}
+func (nd *KVNode) GetRaftConfig() *RaftConfig {
+ return nd.rn.config
+}
+
+func (nd *KVNode) GetLearnerRole() string {
+ return nd.machineConfig.LearnerRole
+}
+
+func (nd *KVNode) GetFullName() string {
+ return nd.ns
+}
+
func (nd *KVNode) Start(standalone bool) error {
// handle start/stop carefully
// if the object has self start()/stop() interface, and do not use the stopChan in KVNode,
@@ -209,10 +357,10 @@ func (nd *KVNode) Start(standalone bool) error {
nd.wg.Add(1)
go func() {
defer nd.wg.Done()
- nd.handleProposeReq()
+ nd.readIndexLoop()
}()
- nd.expireHandler.Start()
+ nd.slowLimiter.Start()
return nil
}
@@ -220,23 +368,70 @@ func (nd *KVNode) StopRaft() {
nd.rn.StopNode()
}
+func (nd *KVNode) IsStopping() bool {
+ return atomic.LoadInt32(&nd.stopping) == 1
+}
+
func (nd *KVNode) Stop() {
if !atomic.CompareAndSwapInt32(&nd.stopping, 0, 1) {
return
}
defer close(nd.stopDone)
close(nd.stopChan)
- nd.expireHandler.Stop()
nd.wg.Wait()
nd.rn.StopNode()
nd.sm.Close()
// deleted cb should be called after stopped, otherwise it
// may init the same node after deleted while the node is stopping.
- go nd.deleteCb()
+ go nd.stopCb()
+ nd.slowLimiter.Stop()
nd.rn.Infof("node %v stopped", nd.ns)
}
+// backup to avoid replay after restart, however we can check if last backup is almost up to date to
+// avoid do backup again. In raft, restart will compact all logs before snapshot, so if we backup too new
+// it may cause the snapshot transfer after a full restart raft cluster.
+func (nd *KVNode) BackupDB(checkLast bool) {
+ if checkLast {
+ if nd.rn.Lead() == raft.None {
+ return
+ }
+ if nd.GetAppliedIndex()-nd.GetLastSnapIndex() <= uint64(nd.rn.config.SnapCount/100) {
+ return
+ }
+ }
+ p := &customProposeData{
+ ProposeOp: ProposeOp_Backup,
+ NeedBackup: true,
+ }
+ d, _ := json.Marshal(p)
+ nd.CustomPropose(d)
+}
+
+func (nd *KVNode) OptimizeDBExpire() {
+ if nd.IsStopping() {
+ return
+ }
+ nd.rn.Infof("node %v begin optimize db expire meta", nd.ns)
+ defer nd.rn.Infof("node %v end optimize db", nd.ns)
+ nd.sm.OptimizeExpire()
+ // since we can not know whether leader or follower is done on optimize
+ // we backup anyway after optimize
+ nd.BackupDB(false)
+}
+
+func (nd *KVNode) DisableOptimizeDB(disable bool) {
+ if nd.IsStopping() {
+ return
+ }
+ nd.rn.Infof("node %v disable optimize db flag %v", nd.ns, disable)
+ nd.sm.DisableOptimize(disable)
+}
+
func (nd *KVNode) OptimizeDB(table string) {
+ if nd.IsStopping() {
+ return
+ }
nd.rn.Infof("node %v begin optimize db, table %v", nd.ns, table)
defer nd.rn.Infof("node %v end optimize db", nd.ns)
nd.sm.Optimize(table)
@@ -245,13 +440,20 @@ func (nd *KVNode) OptimizeDB(table string) {
if table == "" {
// since we can not know whether leader or follower is done on optimize
// we backup anyway after optimize
- p := &customProposeData{
- ProposeOp: ProposeOp_Backup,
- NeedBackup: true,
- }
- d, _ := json.Marshal(p)
- nd.CustomPropose(d)
+ nd.BackupDB(false)
+ }
+}
+
+func (nd *KVNode) OptimizeDBAnyRange(r CompactAPIRange) {
+ if nd.IsStopping() {
+ return
+ }
+ nd.rn.Infof("node %v begin optimize db range %v", nd.ns, r)
+ defer nd.rn.Infof("node %v end optimize db range", nd.ns)
+ if r.Dryrun {
+ return
}
+ nd.sm.OptimizeAnyRange(r)
}
func (nd *KVNode) DeleteRange(drange DeleteTableRange) error {
@@ -269,6 +471,7 @@ func (nd *KVNode) DeleteRange(drange DeleteTableRange) error {
if err != nil {
nd.rn.Infof("node %v delete table range %v failed: %v", nd.ns, drange, err)
}
+ nd.rn.Infof("node %v delete table range %v ", nd.ns, drange)
return err
}
@@ -289,13 +492,20 @@ func (nd *KVNode) GetRaftStatus() raft.Status {
// this is used for leader to determine whether a follower is up to date.
func (nd *KVNode) IsReplicaRaftReady(raftID uint64) bool {
- s := nd.rn.node.Status()
+ allowLagCnt := settings.Soft.LeaderTransferLag
+ s := nd.GetRaftStatus()
+ if s.Progress == nil {
+ return false
+ }
pg, ok := s.Progress[raftID]
if !ok {
return false
}
+ if pg.IsPaused() {
+ return false
+ }
if pg.State.String() == "ProgressStateReplicate" {
- if pg.Match+maxInflightMsgs >= s.Commit {
+ if pg.Match+allowLagCnt >= s.Commit {
return true
}
} else if pg.State.String() == "ProgressStateProbe" {
@@ -338,8 +548,27 @@ func (nd *KVNode) GetDBInternalStats() string {
return ""
}
-func (nd *KVNode) GetStats() common.NamespaceStats {
- ns := nd.sm.GetStats()
+func (nd *KVNode) SetMaxBackgroundOptions(maxCompact int, maxBackJobs int) error {
+ if nd.store != nil {
+ return nd.store.SetMaxBackgroundOptions(maxCompact, maxBackJobs)
+ }
+ return nil
+}
+
+func (nd *KVNode) GetWALDBInternalStats() map[string]interface{} {
+ if nd.rn == nil {
+ return nil
+ }
+
+ eng := nd.rn.config.rockEng
+ if eng == nil {
+ return nil
+ }
+ return eng.GetInternalStatus()
+}
+
+func (nd *KVNode) GetStats(table string, needTableDetail bool) metric.NamespaceStats {
+ ns := nd.sm.GetStats(table, needTableDetail)
ns.ClusterWriteStats = nd.clusterWriteStats.Copy()
return ns
}
@@ -358,158 +587,97 @@ func (nd *KVNode) CleanData() error {
return nd.sm.CleanData()
}
-func (nd *KVNode) GetHandler(cmd string) (common.CommandFunc, bool, bool) {
+func (nd *KVNode) GetHandler(cmd string) (common.CommandFunc, bool) {
return nd.router.GetCmdHandler(cmd)
}
+func (nd *KVNode) GetWriteHandler(cmd string) (common.WriteCommandFunc, bool) {
+ return nd.router.GetWCmdHandler(cmd)
+}
func (nd *KVNode) GetMergeHandler(cmd string) (common.MergeCommandFunc, bool, bool) {
return nd.router.GetMergeCmdHandler(cmd)
}
-func (nd *KVNode) handleProposeReq() {
- var reqList BatchInternalRaftRequest
- reqList.Reqs = make([]*InternalRaftRequest, 0, 100)
- var lastReq *internalReq
- // TODO: combine pipeline and batch to improve performance
- // notice the maxPendingProposals config while using pipeline, avoid
- // sending too much pipeline which overflow the proposal buffer.
- //lastReqList := make([]*internalReq, 0, 1024)
-
- defer func() {
- if e := recover(); e != nil {
- buf := make([]byte, 4096)
- n := runtime.Stack(buf, false)
- buf = buf[0:n]
- nd.rn.Errorf("handle propose loop panic: %s:%v", buf, e)
- }
- for _, r := range reqList.Reqs {
- nd.w.Trigger(r.Header.ID, common.ErrStopped)
- }
- nd.rn.Infof("handle propose loop exit")
- for {
- select {
- case r := <-nd.reqProposeC:
- nd.w.Trigger(r.reqData.Header.ID, common.ErrStopped)
- default:
- return
- }
- }
- }()
- for {
- pc := nd.reqProposeC
- if len(reqList.Reqs) >= proposeQueueLen*2 {
- pc = nil
- }
- select {
- case r := <-pc:
- reqList.Reqs = append(reqList.Reqs, &r.reqData)
- lastReq = r
- default:
- if len(reqList.Reqs) == 0 {
- select {
- case r := <-nd.reqProposeC:
- reqList.Reqs = append(reqList.Reqs, &r.reqData)
- lastReq = r
- case <-nd.stopChan:
- return
- }
- }
- reqList.ReqNum = int32(len(reqList.Reqs))
- reqList.Timestamp = time.Now().UnixNano()
- buffer, err := reqList.Marshal()
- // buffer will be reused by raft?
- // TODO:buffer, err := reqList.MarshalTo()
- if err != nil {
- nd.rn.Infof("failed to marshal request: %v", err)
- for _, r := range reqList.Reqs {
- nd.w.Trigger(r.Header.ID, err)
- }
- reqList.Reqs = reqList.Reqs[:0]
- continue
- }
- lastReq.done = make(chan struct{})
- //nd.rn.Infof("handle req %v, marshal buffer: %v, raw: %v, %v", len(reqList.Reqs),
- // realN, buffer, reqList.Reqs)
- start := lastReq.reqData.Header.Timestamp
- cost := reqList.Timestamp - start
- if cost >= int64(proposeTimeout.Nanoseconds())/2 {
- nd.rn.Infof("ignore slow for begin propose too late: %v, cost %v", len(reqList.Reqs), cost)
- for _, r := range reqList.Reqs {
- nd.w.Trigger(r.Header.ID, common.ErrQueueTimeout)
- }
- } else {
- leftProposeTimeout := proposeTimeout + time.Second - time.Duration(cost)
-
- ctx, cancel := context.WithTimeout(context.Background(), leftProposeTimeout)
- err = nd.rn.node.ProposeWithDrop(ctx, buffer, cancel)
- if err != nil {
- nd.rn.Infof("propose failed: %v, err: %v", len(reqList.Reqs), err.Error())
- for _, r := range reqList.Reqs {
- nd.w.Trigger(r.Header.ID, err)
- }
- } else {
- //lastReqList = append(lastReqList, lastReq)
- select {
- case <-lastReq.done:
- case <-ctx.Done():
- err := ctx.Err()
- waitLeader := false
- if err == context.DeadlineExceeded {
- waitLeader = true
- nd.rn.Infof("propose timeout: %v, %v", err.Error(), len(reqList.Reqs))
- }
- if err == context.Canceled {
- // proposal canceled can be caused by leader transfer or no leader
- err = ErrProposalCanceled
- waitLeader = true
- nd.rn.Infof("propose canceled : %v", len(reqList.Reqs))
- }
- for _, r := range reqList.Reqs {
- nd.w.Trigger(r.Header.ID, err)
- }
- if waitLeader {
- time.Sleep(proposeTimeout/100 + time.Millisecond*100)
- }
- case <-nd.stopChan:
- cancel()
- return
- }
- }
- cancel()
- cost = time.Now().UnixNano() - start
- }
- if cost >= int64(time.Second.Nanoseconds())/2 {
- nd.rn.Infof("slow for batch propose: %v, cost %v", len(reqList.Reqs), cost)
- }
- for i := range reqList.Reqs {
- reqList.Reqs[i] = nil
- }
- reqList.Reqs = reqList.Reqs[:0]
- lastReq = nil
+func (nd *KVNode) ProposeInternal(ctx context.Context, irr InternalRaftRequest, cancel context.CancelFunc, start time.Time) (*waitReqHeaders, error) {
+ wrh := nd.wrPools.getWaitReq(1)
+ wrh.reqs.Timestamp = irr.Header.Timestamp
+ if len(wrh.done) != 0 {
+ wrh.done = make(chan struct{}, 1)
+ }
+ var e raftpb.Entry
+ if irr.Header.DataType == int32(RedisV2Req) {
+ e.DataType = irr.Header.DataType
+ e.Timestamp = irr.Header.Timestamp
+ e.ID = irr.Header.ID
+ e.Data = irr.Data
+ } else {
+ wrh.reqs.Reqs = append(wrh.reqs.Reqs, irr)
+ wrh.reqs.ReqNum = 1
+ needSize := wrh.reqs.Size()
+ wrh.buf.Grow(needSize)
+ b := wrh.buf.Bytes()
+ //buffer, err := wrh.reqs.Marshal()
+ // buffer will be reused by raft
+ n, err := wrh.reqs.MarshalTo(b[:needSize])
+ if err != nil {
+ wrh.release(true)
+ return nil, err
}
+
+ rbuf := make([]byte, n)
+ copy(rbuf, b[:n])
+ e.Data = rbuf
}
+ marshalCost := time.Since(start)
+ wrh.wr = nd.w.RegisterWithC(irr.Header.ID, wrh.done)
+ err := nd.rn.node.ProposeEntryWithDrop(ctx, e, cancel)
+ if err != nil {
+ nd.rn.Infof("propose failed : %v", err.Error())
+ metric.ErrorCnt.With(ps.Labels{
+ "namespace": nd.GetFullName(),
+ "error_info": "raft_propose_failed",
+ }).Inc()
+ nd.w.Trigger(irr.Header.ID, err)
+ wrh.release(false)
+ return nil, err
+ }
+ proposalCost := time.Since(start)
+ if proposalCost >= time.Millisecond {
+ metric.RaftWriteLatency.With(ps.Labels{
+ "namespace": nd.GetFullName(),
+ "step": "marshal_propose",
+ }).Observe(float64(marshalCost.Milliseconds()))
+ metric.RaftWriteLatency.With(ps.Labels{
+ "namespace": nd.GetFullName(),
+ "step": "propose_to_queue",
+ }).Observe(float64(proposalCost.Milliseconds()))
+ }
+ return wrh, nil
}
-func (nd *KVNode) IsWriteReady() bool {
- return atomic.LoadInt32(&nd.rn.memberCnt) > int32(nd.rn.config.Replicator/2)
+func (nd *KVNode) SetDynamicInfo(dync NamespaceDynamicConf) {
+ if nd.rn != nil && nd.rn.config != nil {
+ atomic.StoreInt32(&nd.rn.config.Replicator, int32(dync.Replicator))
+ }
}
-func (nd *KVNode) ProposeRawAndWait(buffer []byte, term uint64, index uint64, raftTs int64) error {
- var reqList BatchInternalRaftRequest
- err := reqList.Unmarshal(buffer)
- if err != nil {
- nd.rn.Infof("propose raw failed: %v at (%v-%v)", err.Error(), term, index)
- return err
- }
- if nodeLog.Level() >= common.LOG_DETAIL {
- nd.rn.Infof("propose raw (%v): %v at (%v-%v)", len(buffer), buffer, term, index)
+func (nd *KVNode) IsWriteReady() bool {
+ // to allow write while replica changed from 1 to 2, we
+ // should check if the replicator is 2
+ rep := atomic.LoadInt32(&nd.rn.config.Replicator)
+ if rep == 2 {
+ return atomic.LoadInt32(&nd.rn.memberCnt) > 0
}
+ return atomic.LoadInt32(&nd.rn.memberCnt) > int32(rep/2)
+}
+
+func (nd *KVNode) ProposeRawAsyncFromSyncer(buffer []byte, reqList *BatchInternalRaftRequest, term uint64, index uint64, raftTs int64) (*FutureRsp, *BatchInternalRaftRequest, error) {
reqList.Type = FromClusterSyncer
reqList.ReqId = nd.rn.reqIDGen.Next()
reqList.OrigTerm = term
reqList.OrigIndex = index
if reqList.Timestamp != raftTs {
- return fmt.Errorf("invalid sync raft request for mismatch timestamp: %v vs %v", reqList.Timestamp, raftTs)
+ return nil, reqList, fmt.Errorf("invalid sync raft request for mismatch timestamp: %v vs %v", reqList.Timestamp, raftTs)
}
for _, req := range reqList.Reqs {
@@ -517,57 +685,84 @@ func (nd *KVNode) ProposeRawAndWait(buffer []byte, term uint64, index uint64, ra
req.Header.ID = nd.rn.reqIDGen.Next()
}
dataLen := reqList.Size()
+ var err error
if dataLen <= len(buffer) {
n, err := reqList.MarshalTo(buffer[:dataLen])
if err != nil {
- return err
+ return nil, reqList, err
}
if n != dataLen {
- return errors.New("marshal length mismatch")
+ return nil, reqList, errors.New("marshal length mismatch")
}
} else {
buffer, err = reqList.Marshal()
if err != nil {
- return err
+ return nil, reqList, err
}
}
- start := time.Now()
- ch := nd.w.Register(reqList.ReqId)
+ // must register before propose
+ wr := nd.w.Register(reqList.ReqId)
ctx, cancel := context.WithTimeout(context.Background(), proposeTimeout)
if nodeLog.Level() >= common.LOG_DETAIL {
nd.rn.Infof("propose raw after rewrite(%v): %v at (%v-%v)", dataLen, buffer[:dataLen], term, index)
}
- defer cancel()
err = nd.rn.node.ProposeWithDrop(ctx, buffer[:dataLen], cancel)
if err != nil {
- return err
+ cancel()
+ nd.w.Trigger(reqList.ReqId, err)
+ return nil, reqList, err
}
- var ok bool
- var rsp interface{}
- select {
- case rsp = <-ch:
+
+ var futureRsp FutureRsp
+ futureRsp.waitFunc = func() (interface{}, error) {
+ var rsp interface{}
+ var ok bool
+ var err error
+ // will always return a response, timed out or get a error
+ select {
+ case <-ctx.Done():
+ err = ctx.Err()
+ if err == context.Canceled {
+ // proposal canceled can be caused by leader transfer or no leader
+ err = ErrProposalCanceled
+ }
+ nd.w.Trigger(reqList.ReqId, err)
+ rsp = err
+ case <-wr.WaitC():
+ // WaitC should be called only once
+ rsp = wr.GetResult()
+ }
+ cancel()
if err, ok = rsp.(error); ok {
rsp = nil
+ //nd.rn.Infof("request return error: %v, %v", req.String(), err.Error())
} else {
err = nil
}
- case <-nd.stopChan:
- err = common.ErrStopped
- case <-ctx.Done():
- err = ctx.Err()
- if err == context.DeadlineExceeded {
- nd.rn.Infof("propose timeout: %v", err.Error())
- }
- if err == context.Canceled {
- // proposal canceled can be caused by leader transfer or no leader
- err = ErrProposalCanceled
- nd.rn.Infof("propose canceled ")
- }
+ return rsp, err
+ }
+ return &futureRsp, reqList, nil
+}
+
+func (nd *KVNode) ProposeRawAndWaitFromSyncer(reqList *BatchInternalRaftRequest, term uint64, index uint64, raftTs int64) error {
+ f, _, err := nd.ProposeRawAsyncFromSyncer(nil, reqList, term, index, raftTs)
+ if err != nil {
+ return err
}
+ start := time.Now()
+ rsp, err := f.WaitRsp()
+ if err != nil {
+ return err
+ }
+ var ok bool
+ if err, ok = rsp.(error); ok {
+ return err
+ }
+
cost := time.Since(start).Nanoseconds()
for _, req := range reqList.Reqs {
- if req.Header.DataType == int32(RedisReq) {
- nd.clusterWriteStats.UpdateWriteStats(int64(len(req.Data)), cost/1000)
+ if req.Header.DataType == int32(RedisReq) || req.Header.DataType == int32(RedisV2Req) {
+ nd.UpdateWriteStats(int64(len(req.Data)), cost/1000)
}
}
if cost >= int64(proposeTimeout.Nanoseconds())/2 {
@@ -576,78 +771,134 @@ func (nd *KVNode) ProposeRawAndWait(buffer []byte, term uint64, index uint64, ra
return err
}
-func (nd *KVNode) queueRequest(req *internalReq) (interface{}, error) {
+func (nd *KVNode) UpdateWriteStats(vSize int64, latencyUs int64) {
+ nd.clusterWriteStats.UpdateWriteStats(vSize, latencyUs)
+
+ if latencyUs >= time.Millisecond.Microseconds() {
+ metric.ClusterWriteLatency.With(ps.Labels{
+ "namespace": nd.GetFullName(),
+ }).Observe(float64(latencyUs / 1000))
+ }
+ metric.WriteCmdCounter.With(ps.Labels{
+ "namespace": nd.GetFullName(),
+ }).Inc()
+}
+
+type FutureRsp struct {
+ waitFunc func() (interface{}, error)
+ rspHandle func(interface{}) (interface{}, error)
+}
+
+// note: should not call twice on wait
+func (fr *FutureRsp) WaitRsp() (interface{}, error) {
+ rsp, err := fr.waitFunc()
+ if err != nil {
+ return nil, err
+ }
+ if fr.rspHandle != nil {
+ return fr.rspHandle(rsp)
+ }
+ return rsp, nil
+}
+
+// make sure call the WaitRsp on FutureRsp to release the resource in the future.
+func (nd *KVNode) queueRequest(start time.Time, req InternalRaftRequest) (*FutureRsp, error) {
if !nd.IsWriteReady() {
return nil, errRaftNotReadyForWrite
}
if !nd.rn.HasLead() {
+ metric.ErrorCnt.With(ps.Labels{
+ "namespace": nd.GetFullName(),
+ "error_info": "raft_propose_failed_noleader",
+ }).Inc()
return nil, ErrNodeNoLeader
}
- start := time.Now()
- req.reqData.Header.Timestamp = start.UnixNano()
- ch := nd.w.Register(req.reqData.Header.ID)
- select {
- case nd.reqProposeC <- req:
- default:
+ req.Header.Timestamp = start.UnixNano()
+ ctx, cancel := context.WithTimeout(context.Background(), proposeTimeout)
+ wrh, err := nd.ProposeInternal(ctx, req, cancel, start)
+ if err != nil {
+ cancel()
+ return nil, err
+ }
+ var futureRsp FutureRsp
+ futureRsp.waitFunc = func() (interface{}, error) {
+ //nd.rn.Infof("queue request: %v", req.reqData.String())
+ var rsp interface{}
+ var ok bool
+ // will always return a response, timed out or get a error
select {
- case nd.reqProposeC <- req:
- case <-nd.stopChan:
- nd.w.Trigger(req.reqData.Header.ID, common.ErrStopped)
- case <-time.After(proposeTimeout / 2):
- nd.w.Trigger(req.reqData.Header.ID, common.ErrQueueTimeout)
+ case <-ctx.Done():
+ err = ctx.Err()
+ if err == context.Canceled {
+ // proposal canceled can be caused by leader transfer or no leader
+ err = ErrProposalCanceled
+ }
+ nd.w.Trigger(req.Header.ID, err)
+ rsp = err
+ case <-wrh.wr.WaitC():
+ rsp = wrh.wr.GetResult()
}
- }
- //nd.rn.Infof("queue request: %v", req.reqData.String())
- var err error
- var rsp interface{}
- var ok bool
- select {
- case rsp = <-ch:
- if req.done != nil {
- close(req.done)
+ cancel()
+
+ defer wrh.release(err == nil)
+ if err != nil {
+ return nil, err
}
if err, ok = rsp.(error); ok {
rsp = nil
- } else {
- err = nil
- }
- case <-nd.stopChan:
- rsp = nil
- err = common.ErrStopped
- }
- if req.reqData.Header.DataType == int32(RedisReq) {
- cost := time.Since(start)
- nd.clusterWriteStats.UpdateWriteStats(int64(len(req.reqData.Data)), cost.Nanoseconds()/1000)
- if err == nil && !nd.IsWriteReady() {
- nd.rn.Infof("write request %v on raft success but raft member is less than replicator",
- req.reqData.String())
- return nil, errRaftNotReadyForWrite
- }
- if cost >= time.Second {
- nd.rn.Infof("write request %v slow cost: %v",
- req.reqData.String(), cost)
+ return nil, err
}
+ return rsp, nil
+ }
+ return &futureRsp, nil
+}
+
+func (nd *KVNode) RedisV2ProposeAsync(buf []byte) (*FutureRsp, error) {
+ h := RequestHeader{
+ ID: nd.rn.reqIDGen.Next(),
+ DataType: int32(RedisV2Req),
+ }
+ raftReq := InternalRaftRequest{
+ Header: h,
+ Data: buf,
}
- return rsp, err
+ start := time.Now()
+ return nd.queueRequest(start, raftReq)
}
-func (nd *KVNode) Propose(buf []byte) (interface{}, error) {
- h := &RequestHeader{
+func (nd *KVNode) RedisProposeAsync(buf []byte) (*FutureRsp, error) {
+ h := RequestHeader{
ID: nd.rn.reqIDGen.Next(),
DataType: int32(RedisReq),
}
+
raftReq := InternalRaftRequest{
Header: h,
Data: buf,
}
- req := &internalReq{
- reqData: raftReq,
+ start := time.Now()
+ return nd.queueRequest(start, raftReq)
+}
+
+func (nd *KVNode) RedisPropose(buf []byte) (interface{}, error) {
+ h := RequestHeader{
+ ID: nd.rn.reqIDGen.Next(),
+ DataType: int32(RedisReq),
+ }
+ raftReq := InternalRaftRequest{
+ Header: h,
+ Data: buf,
+ }
+ start := time.Now()
+ fr, err := nd.queueRequest(start, raftReq)
+ if err != nil {
+ return nil, err
}
- return nd.queueRequest(req)
+ return fr.WaitRsp()
}
func (nd *KVNode) CustomPropose(buf []byte) (interface{}, error) {
- h := &RequestHeader{
+ h := RequestHeader{
ID: nd.rn.reqIDGen.Next(),
DataType: int32(CustomReq),
}
@@ -655,14 +906,16 @@ func (nd *KVNode) CustomPropose(buf []byte) (interface{}, error) {
Header: h,
Data: buf,
}
- req := &internalReq{
- reqData: raftReq,
+ start := time.Now()
+ fr, err := nd.queueRequest(start, raftReq)
+ if err != nil {
+ return nil, err
}
- return nd.queueRequest(req)
+ return fr.WaitRsp()
}
func (nd *KVNode) ProposeChangeTableSchema(table string, sc *SchemaChange) error {
- h := &RequestHeader{
+ h := RequestHeader{
ID: nd.rn.reqIDGen.Next(),
DataType: int32(SchemaChangeReq),
}
@@ -671,11 +924,13 @@ func (nd *KVNode) ProposeChangeTableSchema(table string, sc *SchemaChange) error
Header: h,
Data: buf,
}
- req := &internalReq{
- reqData: raftReq,
- }
- _, err := nd.queueRequest(req)
+ start := time.Now()
+ fr, err := nd.queueRequest(start, raftReq)
+ if err != nil {
+ return err
+ }
+ _, err = fr.WaitRsp()
return err
}
@@ -769,18 +1024,33 @@ func (nd *KVNode) proposeConfChange(cc raftpb.ConfChange) error {
}
func (nd *KVNode) Tick() {
- nd.rn.node.Tick()
+ succ := nd.rn.node.Tick()
+ if !succ {
+ nd.rn.Infof("miss tick, current commit channel: %v", len(nd.commitC))
+ nd.rn.node.NotifyEventCh()
+ }
+}
+
+func (nd *KVNode) GetLastSnapIndex() uint64 {
+ return atomic.LoadUint64(&nd.lastSnapIndex)
}
-func (nd *KVNode) GetCommittedIndex() uint64 {
- return atomic.LoadUint64(&nd.committedIndex)
+func (nd *KVNode) SetLastSnapIndex(ci uint64) {
+ atomic.StoreUint64(&nd.lastSnapIndex, ci)
}
-func (nd *KVNode) SetCommittedIndex(ci uint64) {
- atomic.StoreUint64(&nd.committedIndex, ci)
+func (nd *KVNode) GetAppliedIndex() uint64 {
+ return atomic.LoadUint64(&nd.appliedIndex)
+}
+
+func (nd *KVNode) SetAppliedIndex(ci uint64) {
+ atomic.StoreUint64(&nd.appliedIndex, ci)
}
func (nd *KVNode) IsRaftSynced(checkCommitIndex bool) bool {
+ if nd.rn == nil {
+ return false
+ }
if nd.rn.Lead() == raft.None {
select {
case <-time.After(time.Duration(nd.machineConfig.ElectionTick/10) * time.Millisecond * time.Duration(nd.machineConfig.TickMs)):
@@ -788,7 +1058,7 @@ func (nd *KVNode) IsRaftSynced(checkCommitIndex bool) bool {
return false
}
if nd.rn.Lead() == raft.None {
- nodeLog.Infof("not synced, since no leader ")
+ nd.rn.Infof("not synced, since no leader ")
nd.rn.maybeTryElection()
return false
}
@@ -797,69 +1067,177 @@ func (nd *KVNode) IsRaftSynced(checkCommitIndex bool) bool {
// leader always raft synced.
return true
}
+ // here, the raft may still not started, so do not try wait raft event
+ ai := nd.GetAppliedIndex()
+ ci := nd.GetRaftStatus().Commit
+ nd.rn.Infof("check raft synced, apply: %v, commit: %v", ai, ci)
+ if nd.IsApplyingSnapshot() {
+ return false
+ }
if !checkCommitIndex {
return true
}
+ if ai+checkApplyFallbehind < ci {
+ return false
+ }
to := time.Second * 5
- req := make([]byte, 8)
- binary.BigEndian.PutUint64(req, nd.rn.reqIDGen.Next())
ctx, cancel := context.WithTimeout(context.Background(), to)
- if err := nd.rn.node.ReadIndex(ctx, req); err != nil {
- cancel()
- if err == raft.ErrStopped {
- }
- nodeLog.Warningf("failed to get the read index from raft: %v", err)
+ err := nd.linearizableReadNotify(ctx)
+ cancel()
+
+ if err != nil {
+ nd.rn.Infof("wait raft not synced, %v", err.Error())
return false
}
- cancel()
+ return true
+}
+
+func (nd *KVNode) linearizableReadNotify(ctx context.Context) error {
+ nd.readMu.RLock()
+ n := nd.readNotifier
+ nd.readMu.RUnlock()
+
+ // signal linearizable loop for current notify if it hasn't been already
+ select {
+ case nd.readWaitC <- struct{}{}:
+ default:
+ }
+
+ // wait for read state notification
+ select {
+ case <-n.c:
+ return n.err
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-nd.stopChan:
+ return common.ErrStopped
+ }
+}
+func (nd *KVNode) readIndexLoop() {
var rs raft.ReadState
- var (
- timeout bool
- done bool
- )
- for !timeout && !done {
+ to := time.Second * 5
+ for {
+ req := make([]byte, 8)
+ id1 := nd.rn.reqIDGen.Next()
+ binary.BigEndian.PutUint64(req, id1)
select {
- case rs := <-nd.rn.readStateC:
- done = bytes.Equal(rs.RequestCtx, req)
- if !done {
- }
- case <-time.After(to):
- nodeLog.Infof("timeout waiting for read index response")
- timeout = true
+ case <-nd.readWaitC:
case <-nd.stopChan:
- return false
+ return
}
+ nextN := newNotifier()
+ nd.readMu.Lock()
+ nr := nd.readNotifier
+ nd.readNotifier = nextN
+ nd.readMu.Unlock()
+
+ ctx, cancel := context.WithTimeout(context.Background(), to)
+ if err := nd.rn.node.ReadIndex(ctx, req); err != nil {
+ cancel()
+ nr.notify(err)
+ if err == raft.ErrStopped {
+ return
+ }
+ nodeLog.Warningf("failed to get the read index from raft: %v", err)
+ continue
+ }
+ cancel()
+
+ var (
+ timeout bool
+ done bool
+ )
+ for !timeout && !done {
+ select {
+ case rs := <-nd.rn.readStateC:
+ done = bytes.Equal(rs.RequestCtx, req)
+ if !done {
+ // a previous request might time out. now we should ignore the response of it and
+ // continue waiting for the response of the current requests.
+ id2 := uint64(0)
+ if len(rs.RequestCtx) == 8 {
+ id2 = binary.BigEndian.Uint64(rs.RequestCtx)
+ }
+ nd.rn.Infof("ignored out of date read index: %v, %v", id2, id1)
+ }
+ case <-time.After(to):
+ nd.rn.Infof("timeout waiting for read index response: %v", id1)
+ timeout = true
+ nr.notify(ErrReadIndexTimeout)
+ case <-nd.stopChan:
+ return
+ }
+ }
+ if !done {
+ continue
+ }
+ ai := nd.GetAppliedIndex()
+ if ai < rs.Index && rs.Index > 0 {
+ select {
+ case <-nd.applyWait.Wait(rs.Index):
+ case <-nd.stopChan:
+ return
+ }
+ }
+ nr.notify(nil)
}
- if !done {
- return false
- }
- ci := nd.GetCommittedIndex()
- if rs.Index <= 0 || ci >= rs.Index-1 {
- return true
- }
- nodeLog.Infof("not synced, committed %v, read index %v", ci, rs.Index)
- return false
}
-func (nd *KVNode) applySnapshot(np *nodeProgress, applyEvent *applyInfo) error {
+func (nd *KVNode) applySnapshot(np *nodeProgress, applyEvent *applyInfo) {
if raft.IsEmptySnap(applyEvent.snapshot) {
- return nil
+ return
}
// signaled to load snapshot
nd.rn.Infof("applying snapshot at index %d, snapshot: %v\n", np.snapi, applyEvent.snapshot.String())
- defer nd.rn.Infof("finished applying snapshot at index %d\n", np)
+ defer nd.rn.Infof("finished applying snapshot at index %v\n", np)
if applyEvent.snapshot.Metadata.Index <= np.appliedi {
nodeLog.Panicf("snapshot index [%d] should > progress.appliedIndex [%d] + 1",
applyEvent.snapshot.Metadata.Index, np.appliedi)
}
+ atomic.StoreInt32(&nd.applyingSnapshot, 1)
+ defer atomic.StoreInt32(&nd.applyingSnapshot, 0)
+ err := nd.PrepareSnapshot(applyEvent.snapshot)
+ if enableSnapTransferTest {
+ err = errors.New("auto test failed in snapshot transfer")
+ }
+ if applyEvent.applySnapshotResult != nil {
+ select {
+ case applyEvent.applySnapshotResult <- err:
+ case <-nd.stopChan:
+ }
+ }
+ if err != nil {
+ nd.rn.Errorf("prepare snapshot failed: %v", err.Error())
+ go func() {
+ select {
+ case <-nd.stopChan:
+ default:
+ nd.Stop()
+ }
+ }()
+ <-nd.stopChan
+ return
+ }
+
+ // need wait raft to persist the snap onto disk here
+ select {
+ case <-applyEvent.raftDone:
+ case <-nd.stopChan:
+ return
+ }
// the snapshot restore may fail because of the remote snapshot is deleted
// and can not rsync from any other nodes.
// while starting we can not ignore or delete the snapshot since the wal may be cleaned on other snapshot.
- if err := nd.RestoreFromSnapshot(false, applyEvent.snapshot); err != nil {
- nodeLog.Error(err)
+ if enableSnapApplyTest {
+ err = errors.New("failed to restore from snapshot in failed test")
+ } else {
+ err = nd.RestoreFromSnapshot(applyEvent.snapshot)
+ }
+ if err != nil {
+ nd.rn.Errorf("restore snapshot failed: %v", err.Error())
go func() {
select {
case <-nd.stopChan:
@@ -868,14 +1246,18 @@ func (nd *KVNode) applySnapshot(np *nodeProgress, applyEvent *applyInfo) error {
}
}()
<-nd.stopChan
- return err
+ return
+ }
+ if enableSnapApplyBlockingTest {
+ wt := <-snapApplyBlockingC
+ time.Sleep(wt)
}
np.confState = applyEvent.snapshot.Metadata.ConfState
np.snapi = applyEvent.snapshot.Metadata.Index
np.appliedt = applyEvent.snapshot.Metadata.Term
np.appliedi = applyEvent.snapshot.Metadata.Index
- return nil
+ nd.SetLastSnapIndex(np.snapi)
}
// return (self removed, any conf changed, error)
@@ -887,24 +1269,34 @@ func (nd *KVNode) applyConfChangeEntry(evnt raftpb.Entry, confState *raftpb.Conf
return removeSelf, changed, err
}
-func (nd *KVNode) applyEntry(evnt raftpb.Entry, isReplaying bool) bool {
+func (nd *KVNode) applyEntry(evnt raftpb.Entry, isReplaying bool, batch IBatchOperator) bool {
forceBackup := false
+ var reqList BatchInternalRaftRequest
+ isRemoteSnapTransfer := false
+ isRemoteSnapApply := false
if evnt.Data != nil {
- // try redis command
- var reqList BatchInternalRaftRequest
- parseErr := reqList.Unmarshal(evnt.Data)
- if parseErr != nil {
- nd.rn.Infof("parse request failed: %v, data len %v, entry: %v, raw:%v",
- parseErr, len(evnt.Data), evnt,
- evnt.String())
+ if evnt.DataType == int32(RedisV2Req) {
+ var r InternalRaftRequest
+ r.Header.ID = evnt.ID
+ r.Header.Timestamp = evnt.Timestamp
+ r.Header.DataType = evnt.DataType
+ r.Data = evnt.Data
+ reqList.ReqNum = 1
+ reqList.Reqs = append(reqList.Reqs, r)
+ reqList.Timestamp = evnt.Timestamp
+ } else {
+ parseErr := reqList.Unmarshal(evnt.Data)
+ if parseErr != nil {
+ nd.rn.Errorf("parse request failed: %v, data len %v, entry: %v, raw:%v",
+ parseErr, len(evnt.Data), evnt,
+ evnt.String())
+ }
}
if len(reqList.Reqs) != int(reqList.ReqNum) {
nd.rn.Infof("request check failed %v, real len:%v",
reqList, len(reqList.Reqs))
}
- isRemoteSnapTransfer := false
- isRemoteSnapApply := false
if reqList.Type == FromClusterSyncer {
isApplied := nd.isAlreadyApplied(reqList)
// check if retrying duplicate req, we can just ignore old retry
@@ -922,45 +1314,38 @@ func (nd *KVNode) applyEntry(evnt raftpb.Entry, isReplaying bool) bool {
return false
}
isRemoteSnapTransfer, isRemoteSnapApply = nd.preprocessRemoteSnapApply(reqList)
- }
- var retErr error
- forceBackup, retErr = nd.sm.ApplyRaftRequest(isReplaying, reqList, evnt.Term, evnt.Index, nd.stopChan)
- if reqList.Type == FromClusterSyncer {
- nd.postprocessRemoteSnapApply(reqList, isRemoteSnapTransfer, isRemoteSnapApply, retErr)
+ if !isRemoteSnapApply && !isRemoteSnapTransfer {
+ // check if the commit index is continue on remote
+ if !nd.isContinueCommit(reqList) {
+ nd.rn.Errorf("request %v-%v is not continue while syncing from remote", reqList.OrigTerm, reqList.OrigIndex)
+ }
+ }
}
}
+ // if event.Data is nil, maybe some other event like the leader transfer
+ var retErr error
+ forceBackup, retErr = nd.sm.ApplyRaftRequest(isReplaying, batch, reqList, evnt.Term, evnt.Index, nd.stopChan)
+ if reqList.Type == FromClusterSyncer {
+ nd.postprocessRemoteApply(reqList, isRemoteSnapTransfer, isRemoteSnapApply, retErr)
+ }
return forceBackup
}
-func (nd *KVNode) applyAll(np *nodeProgress, applyEvent *applyInfo) (bool, bool) {
- var lastCommittedIndex uint64
- if len(applyEvent.ents) > 0 {
- lastCommittedIndex = applyEvent.ents[len(applyEvent.ents)-1].Index
- }
- if applyEvent.snapshot.Metadata.Index > lastCommittedIndex {
- lastCommittedIndex = applyEvent.snapshot.Metadata.Index
- }
- if lastCommittedIndex > nd.GetCommittedIndex() {
- nd.SetCommittedIndex(lastCommittedIndex)
- }
- snapErr := nd.applySnapshot(np, applyEvent)
- if applyEvent.applySnapshotResult != nil {
- select {
- case applyEvent.applySnapshotResult <- snapErr:
- case <-nd.stopChan:
- return false, false
- }
- }
- if snapErr != nil {
- nd.rn.Errorf("apply snapshot failed: %v", snapErr.Error())
- return false, false
- }
+func (nd *KVNode) applyEntries(np *nodeProgress, applyEvent *applyInfo) (bool, bool) {
if len(applyEvent.ents) == 0 {
return false, false
}
firsti := applyEvent.ents[0].Index
if firsti > np.appliedi+1 {
- nodeLog.Panicf("first index of committed entry[%d] should <= appliedi[%d] + 1", firsti, np.appliedi)
+ nodeLog.Errorf("first index of committed entry[%d] should <= appliedi[%d] + 1", firsti, np.appliedi)
+ go func() {
+ select {
+ case <-nd.stopChan:
+ default:
+ nd.Stop()
+ }
+ }()
+ return false, false
}
var ents []raftpb.Entry
if np.appliedi+1-firsti < uint64(len(applyEvent.ents)) {
@@ -972,15 +1357,24 @@ func (nd *KVNode) applyAll(np *nodeProgress, applyEvent *applyInfo) (bool, bool)
var shouldStop bool
var confChanged bool
forceBackup := false
+ batch := nd.sm.GetBatchOperator()
for i := range ents {
evnt := ents[i]
isReplaying := evnt.Index <= nd.rn.lastIndex
switch evnt.Type {
case raftpb.EntryNormal:
- forceBackup = nd.applyEntry(evnt, isReplaying)
+ needBackup := nd.applyEntry(evnt, isReplaying, batch)
+ if needBackup {
+ forceBackup = true
+ }
case raftpb.EntryConfChange:
+ if batch != nil {
+ batch.CommitBatch()
+ }
removeSelf, changed, _ := nd.applyConfChangeEntry(evnt, &np.confState)
- confChanged = changed
+ if changed {
+ confChanged = changed
+ }
shouldStop = shouldStop || removeSelf
}
np.appliedi = evnt.Index
@@ -990,6 +1384,9 @@ func (nd *KVNode) applyAll(np *nodeProgress, applyEvent *applyInfo) (bool, bool)
nd.rn.MarkReplayFinished()
}
}
+ if batch != nil {
+ batch.CommitBatch()
+ }
if shouldStop {
nd.rn.Infof("I am removed from raft group: %v", nd.ns)
go func() {
@@ -1006,6 +1403,37 @@ func (nd *KVNode) applyAll(np *nodeProgress, applyEvent *applyInfo) (bool, bool)
return confChanged, forceBackup
}
+func (nd *KVNode) applyAll(np *nodeProgress, applyEvent *applyInfo) (bool, bool) {
+ // TODO: handle concurrent apply,
+ // if has conf change event or snapshot event, must wait all previous events done
+ // note if not the conf change event, then the confChanged flag must be false
+ nd.applySnapshot(np, applyEvent)
+ start := time.Now()
+ confChanged, forceBackup := nd.applyEntries(np, applyEvent)
+ cost := time.Since(start)
+ if cost > raftSlow {
+ nd.rn.Infof("raft apply slow cost: %v, number %v", cost, len(applyEvent.ents))
+ }
+ if cost >= time.Millisecond {
+ metric.RaftWriteLatency.With(ps.Labels{
+ "namespace": nd.GetFullName(),
+ "step": "raft_sm_applyall",
+ }).Observe(float64(cost.Milliseconds()))
+ }
+
+ lastIndex := np.appliedi
+ if applyEvent.snapshot.Metadata.Index > lastIndex {
+ lastIndex = applyEvent.snapshot.Metadata.Index
+ }
+ if lastIndex > nd.GetAppliedIndex() {
+ nd.SetAppliedIndex(lastIndex)
+ }
+ nd.applyWait.Trigger(lastIndex)
+ return confChanged, forceBackup
+}
+
+// TODO: maybe we can apply in concurrent for some storage engine to avoid a single slow write block others
+// we can use the same goroutine for the same table prefix, which can make sure we will be ordered in the same business data
func (nd *KVNode) applyCommits(commitC <-chan applyInfo) {
defer func() {
nd.rn.Infof("apply commit exit")
@@ -1021,6 +1449,13 @@ func (nd *KVNode) applyCommits(commitC <-chan applyInfo) {
appliedi: snap.Metadata.Index,
}
nd.rn.Infof("starting state: %v\n", np)
+ // init applied index
+ lastIndex := np.appliedi
+ if lastIndex > nd.GetAppliedIndex() {
+ nd.SetAppliedIndex(lastIndex)
+ }
+ nd.applyWait.Trigger(lastIndex)
+
for {
select {
case <-nd.stopChan:
@@ -1040,16 +1475,23 @@ func (nd *KVNode) applyCommits(commitC <-chan applyInfo) {
nodeLog.Panicf("wrong events : %v", ent)
}
confChanged, forceBackup := nd.applyAll(&np, &ent)
+
+ // wait for the raft routine to finish the disk writes before triggering a
+ // snapshot. or applied index might be greater than the last index in raft
+ // storage, since the raft routine might be slower than apply routine.
select {
case <-ent.raftDone:
case <-nd.stopChan:
return
}
- nd.maybeTriggerSnapshot(&np, confChanged, forceBackup)
- nd.rn.handleSendSnapshot(&np)
if ent.applyWaitDone != nil {
close(ent.applyWaitDone)
}
+ if len(commitC) == 0 {
+ nd.rn.node.NotifyEventCh()
+ }
+ nd.maybeTriggerSnapshot(&np, confChanged, forceBackup)
+ nd.rn.handleSendSnapshot(&np)
}
}
}
@@ -1058,14 +1500,16 @@ func (nd *KVNode) maybeTriggerSnapshot(np *nodeProgress, confChanged bool, force
if np.appliedi-np.snapi <= 0 {
return
}
- if np.appliedi <= nd.rn.lastIndex {
+ // we need force backup if too much logs since last snapshot
+ behandToomuch := np.appliedi-np.snapi > uint64(nd.rn.config.SnapCount*5)
+ if np.appliedi <= nd.rn.lastIndex && !behandToomuch {
// replaying local log
if forceBackup {
nd.rn.Infof("ignore backup while replaying [applied index: %d | last replay index: %d]", np.appliedi, nd.rn.lastIndex)
}
return
}
- if nd.rn.Lead() == raft.None {
+ if nd.rn.Lead() == raft.None && !behandToomuch {
return
}
@@ -1075,14 +1519,20 @@ func (nd *KVNode) maybeTriggerSnapshot(np *nodeProgress, confChanged bool, force
}
}
+ if np.appliedi < atomic.LoadUint64(&nd.lastFailedSnapIndex)+uint64(nd.rn.config.SnapCount) {
+ return
+ }
+ // TODO: need wait the concurrent apply buffer empty
nd.rn.Infof("start snapshot [applied index: %d | last snapshot index: %d]", np.appliedi, np.snapi)
err := nd.rn.beginSnapshot(np.appliedt, np.appliedi, np.confState)
if err != nil {
nd.rn.Infof("begin snapshot failed: %v", err)
+ atomic.StoreUint64(&nd.lastFailedSnapIndex, np.appliedi)
return
}
np.snapi = np.appliedi
+ nd.SetLastSnapIndex(np.snapi)
}
func (nd *KVNode) GetSnapshot(term uint64, index uint64) (Snapshot, error) {
@@ -1096,7 +1546,7 @@ func (nd *KVNode) GetSnapshot(term uint64, index uint64) (Snapshot, error) {
return si, nil
}
-func (nd *KVNode) RestoreFromSnapshot(startup bool, raftSnapshot raftpb.Snapshot) error {
+func (nd *KVNode) PrepareSnapshot(raftSnapshot raftpb.Snapshot) error {
snapshot := raftSnapshot.Data
var si KVSnapInfo
err := json.Unmarshal(snapshot, &si)
@@ -1104,8 +1554,23 @@ func (nd *KVNode) RestoreFromSnapshot(startup bool, raftSnapshot raftpb.Snapshot
return err
}
nd.rn.RestoreMembers(si)
+ nd.rn.Infof("prepare snapshot here: %v", raftSnapshot.String())
+ err = nd.sm.PrepareSnapshot(raftSnapshot, nd.stopChan)
+ return err
+}
+
+func (nd *KVNode) RestoreFromSnapshot(raftSnapshot raftpb.Snapshot) error {
nd.rn.Infof("should recovery from snapshot here: %v", raftSnapshot.String())
- err = nd.sm.RestoreFromSnapshot(startup, raftSnapshot, nd.stopChan)
+ err := nd.sm.RestoreFromSnapshot(raftSnapshot, nd.stopChan)
+ if err != nil {
+ return err
+ }
+ snapshot := raftSnapshot.Data
+ var si KVSnapInfo
+ err = json.Unmarshal(snapshot, &si)
+ if err != nil {
+ return err
+ }
nd.remoteSyncedStates.RestoreStates(si.RemoteSyncedStates)
return err
}
@@ -1126,33 +1591,98 @@ func (nd *KVNode) GetLastLeaderChangedTime() int64 {
return nd.rn.getLastLeaderChangedTime()
}
+func (nd *KVNode) IsApplyingSnapshot() bool {
+ return atomic.LoadInt32(&nd.applyingSnapshot) == 1
+}
+
func (nd *KVNode) ReportMeLeaderToCluster() {
if nd.clusterInfo == nil {
return
}
if nd.rn.IsLead() {
+ if nd.IsApplyingSnapshot() {
+ nd.rn.Errorf("should not update raft leader to me while applying snapshot")
+ // should give up the leader in raft
+ stats := nd.GetRaftStatus()
+ for rid, pr := range stats.Progress {
+ if pr.IsLearner || !nd.IsReplicaRaftReady(rid) {
+ continue
+ }
+ err := nd.TransferLeadership(rid)
+ if err == nil {
+ break
+ }
+ }
+ return
+ }
changed, err := nd.clusterInfo.UpdateMeForNamespaceLeader(nd.ns)
if err != nil {
nd.rn.Infof("update raft leader to me failed: %v", err)
} else if changed {
- nd.rn.Infof("update %v raft leader to me : %v", nd.ns, nd.rn.config.ID)
+ nd.rn.Infof("update %v raft leader to me : %v, at %v-%v", nd.ns, nd.rn.config.ID, nd.GetAppliedIndex(), nd.GetRaftStatus().Commit)
}
}
}
// should not block long in this
func (nd *KVNode) OnRaftLeaderChanged() {
- nd.expireHandler.LeaderChanged()
-
if nd.rn.IsLead() {
go nd.ReportMeLeaderToCluster()
}
}
+func (nd *KVNode) TransferLeadership(toRaftID uint64) error {
+ nd.rn.Infof("begin transfer leader to %v", toRaftID)
+ if !nd.rn.IsLead() {
+ return ErrNotLeader
+ }
+ oldLeader := nd.rn.Lead()
+ if oldLeader == toRaftID {
+ return ErrTransferLeaderSelfErr
+ }
+ waitTimeout := time.Duration(nd.machineConfig.ElectionTick) * time.Duration(nd.machineConfig.TickMs) * time.Millisecond
+ ctx, cancel := context.WithTimeout(context.Background(), waitTimeout)
+ defer cancel()
+ nd.rn.node.TransferLeadership(ctx, oldLeader, toRaftID)
+ for nd.rn.Lead() != toRaftID {
+ select {
+ case <-ctx.Done():
+ return errTimeoutLeaderTransfer
+ case <-time.After(200 * time.Millisecond):
+ }
+ }
+ nd.rn.Infof("finished transfer from %v to %v", oldLeader, toRaftID)
+ return nil
+}
+
func (nd *KVNode) Process(ctx context.Context, m raftpb.Message) error {
+ // avoid prepare snapshot while the node is starting
+ if m.Type == raftpb.MsgSnap && !raft.IsEmptySnap(m.Snapshot) {
+ // we prepare the snapshot data here before we send install snapshot message to raft
+ // to avoid block raft loop while transfer the snapshot data
+ nd.rn.SetPrepareSnapshot(true)
+ defer nd.rn.SetPrepareSnapshot(false)
+ nd.rn.Infof("prepare transfer snapshot : %v\n", m.Snapshot.String())
+ defer nd.rn.Infof("transfer snapshot done : %v\n", m.Snapshot.String())
+ if enableSnapTransferTest {
+ go nd.Stop()
+ return errors.New("auto test failed in snapshot transfer")
+ }
+ err := nd.sm.PrepareSnapshot(m.Snapshot, nd.stopChan)
+ if err != nil {
+ // we ignore here to allow retry in the raft loop
+ nd.rn.Infof("transfer snapshot failed: %v, %v", m.Snapshot.String(), err.Error())
+ }
+ }
return nd.rn.Process(ctx, m)
}
+func (nd *KVNode) UpdateSnapshotState(term uint64, index uint64) {
+ if nd.sm != nil {
+ nd.sm.UpdateSnapshotState(term, index)
+ }
+}
+
func (nd *KVNode) ReportUnreachable(id uint64, group raftpb.Group) {
nd.rn.ReportUnreachable(id, group)
}
@@ -1166,3 +1696,13 @@ func (nd *KVNode) SaveDBFrom(r io.Reader, msg raftpb.Message) (int64, error) {
}
func (nd *KVNode) IsPeerRemoved(peerID uint64) bool { return false }
+
+func (nd *KVNode) CanPass(ts int64, cmd string, table string) bool {
+ return nd.slowLimiter.CanPass(ts, cmd, table)
+}
+func (nd *KVNode) MaybeAddSlow(ts int64, cost time.Duration, cmd, table string) {
+ nd.slowLimiter.MaybeAddSlow(ts, cost, cmd, table)
+}
+func (nd *KVNode) PreWaitQueue(ctx context.Context, cmd string, table string) (*SlowWaitDone, error) {
+ return nd.slowLimiter.PreWaitQueue(ctx, cmd, table)
+}
diff --git a/node/node_cmd_reg.go b/node/node_cmd_reg.go
index 2b4c1859..39dc03a1 100644
--- a/node/node_cmd_reg.go
+++ b/node/node_cmd_reg.go
@@ -1,10 +1,32 @@
package node
+func getWriteCmdType(cmd string) string {
+ switch cmd {
+ case "zadd", "zfixkey", "zincrby", "zrem", "zremrangebyrank", "zremrangebyscore", "zremrangebylex", "zclear", "zmclear", "zexpire", "zpersist":
+ return "zset"
+ case "sadd", "srem", "sclear", "smclear", "spop", "sexpire", "spersist":
+ return "set"
+ case "lfixkey", "lpush", "lpop", "lset", "ltrim", "rpop", "rpush", "lclear", "lmclear", "lexpire", "lpersist":
+ return "list"
+ default:
+ return "default"
+ }
+}
+
func (kvsm *kvStoreSM) registerHandlers() {
+
+ kvsm.router.RegisterInternal("noopwrite", kvsm.localNoOpWriteCommand)
// only write command need to be registered as internal
// kv
kvsm.router.RegisterInternal("del", kvsm.localDelCommand)
+ kvsm.router.RegisterInternal("delifeq", kvsm.localDelIfEQCommand)
kvsm.router.RegisterInternal("set", kvsm.localSetCommand)
+ kvsm.router.RegisterInternal("setifeq", kvsm.localSetIfEQCommand)
+ kvsm.router.RegisterInternal("append", kvsm.localAppendCommand)
+ kvsm.router.RegisterInternal("setrange", kvsm.localSetRangeCommand)
+ kvsm.router.RegisterInternal("getset", kvsm.localGetSetCommand)
+ kvsm.router.RegisterInternal("setbit", kvsm.localBitSetCommand)
+ kvsm.router.RegisterInternal("setbitv2", kvsm.localBitSetV2Command)
kvsm.router.RegisterInternal("setnx", kvsm.localSetnxCommand)
kvsm.router.RegisterInternal("mset", kvsm.localMSetCommand)
kvsm.router.RegisterInternal("incr", kvsm.localIncrCommand)
@@ -12,6 +34,9 @@ func (kvsm *kvStoreSM) registerHandlers() {
kvsm.router.RegisterInternal("plset", kvsm.localPlsetCommand)
kvsm.router.RegisterInternal("pfadd", kvsm.localPFAddCommand)
//kvsm.router.RegisterInternal("pfcount", kvsm.localPFCountCommand)
+ // bitmap
+ kvsm.router.RegisterInternal("bitclear", kvsm.localBitClearCommand)
+
// hash
kvsm.router.RegisterInternal("hset", kvsm.localHSetCommand)
kvsm.router.RegisterInternal("hsetnx", kvsm.localHSetNXCommand)
@@ -51,18 +76,28 @@ func (kvsm *kvStoreSM) registerHandlers() {
kvsm.router.RegisterInternal("sclear", kvsm.localSclear)
kvsm.router.RegisterInternal("smclear", kvsm.localSmclear)
kvsm.router.RegisterInternal("spop", kvsm.localSpop)
- // expire
+ // expire&persist
kvsm.router.RegisterInternal("setex", kvsm.localSetexCommand)
kvsm.router.RegisterInternal("expire", kvsm.localExpireCommand)
kvsm.router.RegisterInternal("lexpire", kvsm.localListExpireCommand)
kvsm.router.RegisterInternal("hexpire", kvsm.localHashExpireCommand)
kvsm.router.RegisterInternal("sexpire", kvsm.localSetExpireCommand)
kvsm.router.RegisterInternal("zexpire", kvsm.localZSetExpireCommand)
+ kvsm.router.RegisterInternal("bexpire", kvsm.localBitExpireCommand)
+
kvsm.router.RegisterInternal("persist", kvsm.localPersistCommand)
kvsm.router.RegisterInternal("hpersist", kvsm.localHashPersistCommand)
kvsm.router.RegisterInternal("lpersist", kvsm.localListPersistCommand)
kvsm.router.RegisterInternal("spersist", kvsm.localSetPersistCommand)
kvsm.router.RegisterInternal("zpersist", kvsm.localZSetPersistCommand)
+ kvsm.router.RegisterInternal("bpersist", kvsm.localBitPersistCommand)
+
+ if enableSlowLimiterTest && kvsm.slowLimiter != nil {
+ kvsm.router.RegisterInternal("slowwrite1s_test", kvsm.slowLimiter.testSlowWrite1s)
+ kvsm.router.RegisterInternal("slowwrite100ms_test", kvsm.slowLimiter.testSlowWrite100ms)
+ kvsm.router.RegisterInternal("slowwrite50ms_test", kvsm.slowLimiter.testSlowWrite50ms)
+ kvsm.router.RegisterInternal("slowwrite5ms_test", kvsm.slowLimiter.testSlowWrite5ms)
+ }
}
func (nd *KVNode) registerHandler() {
@@ -70,144 +105,204 @@ func (nd *KVNode) registerHandler() {
// other learner role should only sync from raft log, so no need redis API
return
}
+ // for test on no rocks
+ nd.router.RegisterWrite("noopwrite", wrapWriteCommandKV(nd, checkOKRsp))
// for kv
- nd.router.Register(false, "get", wrapReadCommandK(nd.getCommand))
- nd.router.Register(false, "mget", wrapReadCommandKK(nd.mgetCommand))
- nd.router.Register(true, "set", wrapWriteCommandKV(nd, nd.setCommand))
- nd.router.Register(true, "setnx", wrapWriteCommandKV(nd, nd.setnxCommand))
- nd.router.Register(true, "incr", wrapWriteCommandK(nd, nd.incrCommand))
- nd.router.Register(true, "incrby", wrapWriteCommandKV(nd, nd.incrbyCommand))
- nd.router.Register(true, "pfadd", wrapWriteCommandKAnySubkey(nd, nd.pfaddCommand, 0))
- nd.router.Register(false, "pfcount", wrapReadCommandK(nd.pfcountCommand))
+ nd.router.RegisterRead("get", wrapReadCommandK(nd.getCommand))
+ nd.router.RegisterRead("stale.get", wrapReadCommandK(nd.getCommand))
+ nd.router.RegisterRead("stale.getversion", wrapReadCommandK(nd.getVerCommand))
+ nd.router.RegisterRead("stale.getexpired", wrapReadCommandK(nd.getExpiredCommand))
+ nd.router.RegisterRead("strlen", wrapReadCommandK(nd.strlenCommand))
+ nd.router.RegisterRead("getrange", wrapReadCommandKAnySubkeyN(nd.getRangeCommand, 2))
+ nd.router.RegisterRead("getnolock", wrapReadCommandK(nd.getNoLockCommand))
+ nd.router.RegisterRead("getbit", wrapReadCommandKAnySubkeyN(nd.getbitCommand, 1))
+ nd.router.RegisterRead("bitcount", wrapReadCommandKAnySubkey(nd.bitcountCommand))
+ nd.router.RegisterRead("mget", wrapReadCommandKK(nd.mgetCommand))
+ nd.router.RegisterWrite("set", nd.setCommand)
+ nd.router.RegisterWrite("append", wrapWriteCommandKV(nd, checkAndRewriteIntRsp))
+ nd.router.RegisterWrite("setrange", wrapWriteCommandKAnySubkey(nd, checkAndRewriteIntRsp, 2))
+ nd.router.RegisterWrite("getset", wrapWriteCommandKV(nd, checkAndRewriteBulkRsp))
+ nd.router.RegisterWrite("setbit", nd.setbitCommand)
+ nd.router.RegisterWrite("setbitv2", nd.setbitCommand)
+ nd.router.RegisterWrite("setnx", nd.setnxCommand)
+ nd.router.RegisterWrite("setifeq", nd.setIfEQCommand)
+ nd.router.RegisterWrite("delifeq", nd.delIfEQCommand)
+ nd.router.RegisterWrite("incr", wrapWriteCommandK(nd, nil, checkAndRewriteIntRsp))
+ nd.router.RegisterWrite("incrby", wrapWriteCommandKV(nd, checkAndRewriteIntRsp))
+ nd.router.RegisterWrite("pfadd", wrapWriteCommandKAnySubkey(nd, checkAndRewriteIntRsp, 0))
+ nd.router.RegisterRead("pfcount", wrapReadCommandK(nd.pfcountCommand))
+ nd.router.RegisterWrite("bitclear", wrapWriteCommandK(nd, nil, checkAndRewriteIntRsp))
// for hash
- nd.router.Register(false, "hget", wrapReadCommandKSubkey(nd.hgetCommand))
- nd.router.Register(false, "hgetall", wrapReadCommandK(nd.hgetallCommand))
- nd.router.Register(false, "hkeys", wrapReadCommandK(nd.hkeysCommand))
- nd.router.Register(false, "hexists", wrapReadCommandKSubkey(nd.hexistsCommand))
- nd.router.Register(false, "hmget", wrapReadCommandKSubkeySubkey(nd.hmgetCommand))
- nd.router.Register(false, "hlen", wrapReadCommandK(nd.hlenCommand))
- nd.router.Register(true, "hset", wrapWriteCommandKSubkeyV(nd, nd.hsetCommand))
- nd.router.Register(true, "hsetnx", wrapWriteCommandKSubkeyV(nd, nd.hsetnxCommand))
- nd.router.Register(true, "hmset", wrapWriteCommandKSubkeyVSubkeyV(nd, nd.hmsetCommand))
- nd.router.Register(true, "hdel", wrapWriteCommandKSubkeySubkey(nd, nd.hdelCommand))
- nd.router.Register(true, "hincrby", wrapWriteCommandKSubkeyV(nd, nd.hincrbyCommand))
- nd.router.Register(true, "hclear", wrapWriteCommandK(nd, nd.hclearCommand))
+ nd.router.RegisterRead("hget", wrapReadCommandKSubkey(nd.hgetCommand))
+ nd.router.RegisterRead("stale.hget.version", wrapReadCommandKSubkey(nd.hgetVerCommand))
+ nd.router.RegisterRead("stale.hgetall.expired", wrapReadCommandK(nd.hgetallExpiredCommand))
+ nd.router.RegisterRead("stale.hmget.expired", wrapReadCommandKSubkeySubkey(nd.hmgetExpiredCommand))
+ nd.router.RegisterRead("hgetall", wrapReadCommandK(nd.hgetallCommand))
+ nd.router.RegisterRead("hkeys", wrapReadCommandK(nd.hkeysCommand))
+ nd.router.RegisterRead("hvals", wrapReadCommandK(nd.hvalsCommand))
+ nd.router.RegisterRead("hexists", wrapReadCommandKSubkey(nd.hexistsCommand))
+ nd.router.RegisterRead("hmget", wrapReadCommandKSubkeySubkey(nd.hmgetCommand))
+ nd.router.RegisterRead("hlen", wrapReadCommandK(nd.hlenCommand))
+ nd.router.RegisterWrite("hset", wrapWriteCommandKSubkeyV(nd, checkAndRewriteIntRsp))
+ nd.router.RegisterWrite("hsetnx", wrapWriteCommandKSubkeyV(nd, checkAndRewriteIntRsp))
+ nd.router.RegisterWrite("hmset", wrapWriteCommandKSubkeyVSubkeyV(nd, checkOKRsp))
+ nd.router.RegisterWrite("hdel", wrapWriteCommandKSubkeySubkey(nd, checkAndRewriteIntRsp))
+ nd.router.RegisterWrite("hincrby", wrapWriteCommandKSubkeyV(nd, checkAndRewriteIntRsp))
+ nd.router.RegisterWrite("hclear", wrapWriteCommandK(nd, nil, checkAndRewriteIntRsp))
// for json
- nd.router.Register(false, "json.get", wrapReadCommandKSubkeySubkey(nd.jsonGetCommand))
- nd.router.Register(false, "json.keyexists", wrapReadCommandK(nd.jsonKeyExistsCommand))
+ nd.router.RegisterRead("json.get", wrapReadCommandKAnySubkey(nd.jsonGetCommand))
+ nd.router.RegisterRead("json.keyexists", wrapReadCommandK(nd.jsonKeyExistsCommand))
// get the same path from several json keys
- nd.router.Register(false, "json.mkget", nd.jsonmkGetCommand)
- nd.router.Register(false, "json.type", wrapReadCommandKAnySubkey(nd.jsonTypeCommand))
- nd.router.Register(false, "json.arrlen", wrapReadCommandKAnySubkey(nd.jsonArrayLenCommand))
- nd.router.Register(false, "json.objkeys", wrapReadCommandKAnySubkey(nd.jsonObjKeysCommand))
- nd.router.Register(false, "json.objlen", wrapReadCommandKAnySubkey(nd.jsonObjLenCommand))
- nd.router.Register(true, "json.set", wrapWriteCommandKSubkeyV(nd, nd.jsonSetCommand))
- nd.router.Register(true, "json.del", wrapWriteCommandKSubkeySubkey(nd, nd.jsonDelCommand))
- nd.router.Register(true, "json.arrappend", wrapWriteCommandKAnySubkey(nd, nd.jsonArrayAppendCommand, 2))
- nd.router.Register(true, "json.arrpop", wrapWriteCommandKAnySubkey(nd, nd.jsonArrayPopCommand, 0))
+ nd.router.RegisterRead("json.mkget", nd.jsonmkGetCommand)
+ nd.router.RegisterRead("json.type", wrapReadCommandKAnySubkey(nd.jsonTypeCommand))
+ nd.router.RegisterRead("json.arrlen", wrapReadCommandKAnySubkey(nd.jsonArrayLenCommand))
+ nd.router.RegisterRead("json.objkeys", wrapReadCommandKAnySubkey(nd.jsonObjKeysCommand))
+ nd.router.RegisterRead("json.objlen", wrapReadCommandKAnySubkey(nd.jsonObjLenCommand))
+ nd.router.RegisterWrite("json.set", wrapWriteCommandKSubkeyV(nd, checkOKRsp))
+ nd.router.RegisterWrite("json.del", wrapWriteCommandKAnySubkey(nd, checkAndRewriteIntRsp, 0))
+ nd.router.RegisterWrite("json.arrappend", wrapWriteCommandKAnySubkey(nd, checkAndRewriteIntRsp, 2))
+ nd.router.RegisterWrite("json.arrpop", wrapWriteCommandKAnySubkey(nd, checkAndRewriteBulkRsp, 0))
// for list
- nd.router.Register(false, "lindex", wrapReadCommandKSubkey(nd.lindexCommand))
- nd.router.Register(false, "llen", wrapReadCommandK(nd.llenCommand))
- nd.router.Register(false, "lrange", wrapReadCommandKAnySubkey(nd.lrangeCommand))
- nd.router.Register(true, "lfixkey", wrapWriteCommandK(nd, nd.lfixkeyCommand))
- nd.router.Register(true, "lpop", wrapWriteCommandK(nd, nd.lpopCommand))
- nd.router.Register(true, "lpush", wrapWriteCommandKVV(nd, nd.lpushCommand))
- nd.router.Register(true, "lset", nd.lsetCommand)
- nd.router.Register(true, "ltrim", nd.ltrimCommand)
- nd.router.Register(true, "rpop", wrapWriteCommandK(nd, nd.rpopCommand))
- nd.router.Register(true, "rpush", wrapWriteCommandKVV(nd, nd.rpushCommand))
- nd.router.Register(true, "lclear", wrapWriteCommandK(nd, nd.lclearCommand))
+ nd.router.RegisterRead("lindex", wrapReadCommandKSubkey(nd.lindexCommand))
+ nd.router.RegisterRead("llen", wrapReadCommandK(nd.llenCommand))
+ nd.router.RegisterRead("lrange", wrapReadCommandKAnySubkey(nd.lrangeCommand))
+ nd.router.RegisterWrite("lfixkey", wrapWriteCommandK(nd, nil, checkOKRsp))
+ nd.router.RegisterWrite("lpop", wrapWriteCommandK(nd, nd.preCheckListLength, checkAndRewriteBulkRsp))
+ nd.router.RegisterWrite("lpush", wrapWriteCommandKAnySubkey(nd, checkAndRewriteIntRsp, 1))
+ nd.router.RegisterWrite("lset", nd.lsetCommand)
+ nd.router.RegisterWrite("ltrim", nd.ltrimCommand)
+ nd.router.RegisterWrite("rpop", wrapWriteCommandK(nd, nd.preCheckListLength, checkAndRewriteBulkRsp))
+ nd.router.RegisterWrite("rpush", wrapWriteCommandKAnySubkey(nd, checkAndRewriteIntRsp, 1))
+ nd.router.RegisterWrite("lclear", wrapWriteCommandK(nd, nil, checkAndRewriteIntRsp))
// for zset
- nd.router.Register(false, "zscore", wrapReadCommandKSubkey(nd.zscoreCommand))
- nd.router.Register(false, "zcount", wrapReadCommandKAnySubkey(nd.zcountCommand))
- nd.router.Register(false, "zcard", wrapReadCommandK(nd.zcardCommand))
- nd.router.Register(false, "zlexcount", wrapReadCommandKAnySubkey(nd.zlexcountCommand))
- nd.router.Register(false, "zrange", wrapReadCommandKAnySubkey(nd.zrangeCommand))
- nd.router.Register(false, "zrevrange", wrapReadCommandKAnySubkey(nd.zrevrangeCommand))
- nd.router.Register(false, "zrangebylex", wrapReadCommandKAnySubkey(nd.zrangebylexCommand))
- nd.router.Register(false, "zrangebyscore", wrapReadCommandKAnySubkey(nd.zrangebyscoreCommand))
- nd.router.Register(false, "zrevrangebyscore", wrapReadCommandKAnySubkey(nd.zrevrangebyscoreCommand))
- nd.router.Register(false, "zrank", wrapReadCommandKSubkey(nd.zrankCommand))
- nd.router.Register(false, "zrevrank", wrapReadCommandKSubkey(nd.zrevrankCommand))
-
- nd.router.Register(true, "zfixkey", wrapWriteCommandK(nd, nd.zfixkeyCommand))
- nd.router.Register(true, "zadd", nd.zaddCommand)
- nd.router.Register(true, "zincrby", nd.zincrbyCommand)
- nd.router.Register(true, "zrem", wrapWriteCommandKSubkeySubkey(nd, nd.zremCommand))
- nd.router.Register(true, "zremrangebyrank", nd.zremrangebyrankCommand)
- nd.router.Register(true, "zremrangebyscore", nd.zremrangebyscoreCommand)
- nd.router.Register(true, "zremrangebylex", nd.zremrangebylexCommand)
- nd.router.Register(true, "zclear", wrapWriteCommandK(nd, nd.zclearCommand))
+ nd.router.RegisterRead("zscore", wrapReadCommandKSubkey(nd.zscoreCommand))
+ nd.router.RegisterRead("zcount", wrapReadCommandKAnySubkey(nd.zcountCommand))
+ nd.router.RegisterRead("zcard", wrapReadCommandK(nd.zcardCommand))
+ nd.router.RegisterRead("zlexcount", wrapReadCommandKAnySubkey(nd.zlexcountCommand))
+ nd.router.RegisterRead("zrange", wrapReadCommandKAnySubkey(nd.zrangeCommand))
+ nd.router.RegisterRead("zrevrange", wrapReadCommandKAnySubkey(nd.zrevrangeCommand))
+ nd.router.RegisterRead("zrangebylex", wrapReadCommandKAnySubkey(nd.zrangebylexCommand))
+ nd.router.RegisterRead("zrangebyscore", wrapReadCommandKAnySubkey(nd.zrangebyscoreCommand))
+ nd.router.RegisterRead("zrevrangebyscore", wrapReadCommandKAnySubkey(nd.zrevrangebyscoreCommand))
+ nd.router.RegisterRead("zrank", wrapReadCommandKSubkey(nd.zrankCommand))
+ nd.router.RegisterRead("zrevrank", wrapReadCommandKSubkey(nd.zrevrankCommand))
+
+ nd.router.RegisterWrite("zfixkey", wrapWriteCommandK(nd, nil, checkOKRsp))
+ nd.router.RegisterWrite("zadd", nd.zaddCommand)
+ nd.router.RegisterWrite("zincrby", nd.zincrbyCommand)
+ nd.router.RegisterWrite("zrem", nd.zremCommand)
+ nd.router.RegisterWrite("zremrangebyrank", nd.zremrangebyrankCommand)
+ nd.router.RegisterWrite("zremrangebyscore", nd.zremrangebyscoreCommand)
+ nd.router.RegisterWrite("zremrangebylex", nd.zremrangebylexCommand)
+ nd.router.RegisterWrite("zclear", wrapWriteCommandK(nd, nil, checkAndRewriteIntRsp))
// for set
- nd.router.Register(false, "scard", wrapReadCommandK(nd.scardCommand))
- nd.router.Register(false, "sismember", wrapReadCommandKSubkey(nd.sismemberCommand))
- nd.router.Register(false, "smembers", wrapReadCommandK(nd.smembersCommand))
- nd.router.Register(true, "spop", nd.spopCommand)
- nd.router.Register(true, "sadd", wrapWriteCommandKSubkeySubkey(nd, nd.saddCommand))
- nd.router.Register(true, "srem", wrapWriteCommandKSubkeySubkey(nd, nd.sremCommand))
- nd.router.Register(true, "sclear", wrapWriteCommandK(nd, nd.sclearCommand))
+ nd.router.RegisterRead("scard", wrapReadCommandK(nd.scardCommand))
+ nd.router.RegisterRead("sismember", wrapReadCommandKSubkey(nd.sismemberCommand))
+ nd.router.RegisterRead("smembers", wrapReadCommandK(nd.smembersCommand))
+ nd.router.RegisterRead("srandmember", wrapReadCommandKAnySubkey(nd.srandmembersCommand))
+ nd.router.RegisterWrite("spop", nd.spopCommand)
+ nd.router.RegisterWrite("sadd", nd.saddCommand)
+ nd.router.RegisterWrite("srem", nd.sremCommand)
+ nd.router.RegisterWrite("sclear", wrapWriteCommandK(nd, nil, checkAndRewriteIntRsp))
// for ttl
- nd.router.Register(false, "ttl", wrapReadCommandK(nd.ttlCommand))
- nd.router.Register(false, "httl", wrapReadCommandK(nd.httlCommand))
- nd.router.Register(false, "lttl", wrapReadCommandK(nd.lttlCommand))
- nd.router.Register(false, "sttl", wrapReadCommandK(nd.sttlCommand))
- nd.router.Register(false, "zttl", wrapReadCommandK(nd.zttlCommand))
-
- nd.router.Register(true, "setex", wrapWriteCommandKVV(nd, nd.setexCommand))
- nd.router.Register(true, "expire", wrapWriteCommandKV(nd, nd.expireCommand))
- nd.router.Register(true, "hexpire", wrapWriteCommandKV(nd, nd.hashExpireCommand))
- nd.router.Register(true, "lexpire", wrapWriteCommandKV(nd, nd.listExpireCommand))
- nd.router.Register(true, "sexpire", wrapWriteCommandKV(nd, nd.setExpireCommand))
- nd.router.Register(true, "zexpire", wrapWriteCommandKV(nd, nd.zsetExpireCommand))
-
- nd.router.Register(true, "persist", wrapWriteCommandK(nd, nd.persistCommand))
- nd.router.Register(true, "hpersist", wrapWriteCommandK(nd, nd.persistCommand))
- nd.router.Register(true, "lpersist", wrapWriteCommandK(nd, nd.persistCommand))
- nd.router.Register(true, "spersist", wrapWriteCommandK(nd, nd.persistCommand))
- nd.router.Register(true, "zpersist", wrapWriteCommandK(nd, nd.persistCommand))
+ nd.router.RegisterRead("ttl", wrapReadCommandK(nd.ttlCommand))
+ nd.router.RegisterRead("httl", wrapReadCommandK(nd.httlCommand))
+ nd.router.RegisterRead("lttl", wrapReadCommandK(nd.lttlCommand))
+ nd.router.RegisterRead("sttl", wrapReadCommandK(nd.sttlCommand))
+ nd.router.RegisterRead("zttl", wrapReadCommandK(nd.zttlCommand))
+ nd.router.RegisterRead("bttl", wrapReadCommandK(nd.bttlCommand))
+ // extended exist
+ nd.router.RegisterRead("hkeyexist", wrapReadCommandK(nd.hKeyExistCommand))
+ nd.router.RegisterRead("lkeyexist", wrapReadCommandK(nd.lKeyExistCommand))
+ nd.router.RegisterRead("skeyexist", wrapReadCommandK(nd.sKeyExistCommand))
+ nd.router.RegisterRead("zkeyexist", wrapReadCommandK(nd.zKeyExistCommand))
+ nd.router.RegisterRead("bkeyexist", wrapReadCommandK(nd.bKeyExistCommand))
+
+ nd.router.RegisterWrite("setex", wrapWriteCommandKVV(nd, checkOKRsp))
+ nd.router.RegisterWrite("expire", wrapWriteCommandKV(nd, checkAndRewriteIntRsp))
+ nd.router.RegisterWrite("hexpire", wrapWriteCommandKV(nd, checkAndRewriteIntRsp))
+ nd.router.RegisterWrite("lexpire", wrapWriteCommandKV(nd, checkAndRewriteIntRsp))
+ nd.router.RegisterWrite("sexpire", wrapWriteCommandKV(nd, checkAndRewriteIntRsp))
+ nd.router.RegisterWrite("zexpire", wrapWriteCommandKV(nd, checkAndRewriteIntRsp))
+ nd.router.RegisterWrite("bexpire", wrapWriteCommandKV(nd, checkAndRewriteIntRsp))
+
+ nd.router.RegisterWrite("persist", wrapWriteCommandK(nd, nil, checkAndRewriteIntRsp))
+ nd.router.RegisterWrite("hpersist", wrapWriteCommandK(nd, nil, checkAndRewriteIntRsp))
+ nd.router.RegisterWrite("lpersist", wrapWriteCommandK(nd, nil, checkAndRewriteIntRsp))
+ nd.router.RegisterWrite("spersist", wrapWriteCommandK(nd, nil, checkAndRewriteIntRsp))
+ nd.router.RegisterWrite("zpersist", wrapWriteCommandK(nd, nil, checkAndRewriteIntRsp))
+ nd.router.RegisterWrite("bpersist", wrapWriteCommandK(nd, nil, checkAndRewriteIntRsp))
// for scan
- nd.router.Register(false, "hscan", wrapReadCommandKAnySubkey(nd.hscanCommand))
- nd.router.Register(false, "sscan", wrapReadCommandKAnySubkey(nd.sscanCommand))
- nd.router.Register(false, "zscan", wrapReadCommandKAnySubkey(nd.zscanCommand))
+ nd.router.RegisterRead("hscan", wrapReadCommandKAnySubkey(nd.hscanCommand))
+ nd.router.RegisterRead("sscan", wrapReadCommandKAnySubkey(nd.sscanCommand))
+ nd.router.RegisterRead("zscan", wrapReadCommandKAnySubkey(nd.zscanCommand))
+ nd.router.RegisterRead("hrevscan", wrapReadCommandKAnySubkey(nd.hscanCommand))
+ nd.router.RegisterRead("srevscan", wrapReadCommandKAnySubkey(nd.sscanCommand))
+ nd.router.RegisterRead("zrevscan", wrapReadCommandKAnySubkey(nd.zscanCommand))
// for geohash
- nd.router.Register(true, "geoadd", nd.geoaddCommand)
- nd.router.Register(false, "geohash", wrapReadCommandKAnySubkeyN(nd.geohashCommand, 1))
- nd.router.Register(false, "geodist", wrapReadCommandKAnySubkey(nd.geodistCommand))
- nd.router.Register(false, "geopos", wrapReadCommandKAnySubkeyN(nd.geoposCommand, 1))
- nd.router.Register(false, "georadius", wrapReadCommandKAnySubkeyN(nd.geoRadiusCommand, 4))
- nd.router.Register(false, "georadiusbymember", wrapReadCommandKAnySubkeyN(nd.geoRadiusByMemberCommand, 3))
+ nd.router.RegisterWrite("geoadd", nd.geoaddCommand)
+ nd.router.RegisterRead("geohash", wrapReadCommandKAnySubkeyN(nd.geohashCommand, 1))
+ nd.router.RegisterRead("geodist", wrapReadCommandKAnySubkey(nd.geodistCommand))
+ nd.router.RegisterRead("geopos", wrapReadCommandKAnySubkeyN(nd.geoposCommand, 1))
+ nd.router.RegisterRead("georadius", wrapReadCommandKAnySubkeyN(nd.geoRadiusCommand, 4))
+ nd.router.RegisterRead("georadiusbymember", wrapReadCommandKAnySubkeyN(nd.geoRadiusByMemberCommand, 3))
//for cross mutil partion
nd.router.RegisterMerge("scan", wrapMergeCommand(nd.scanCommand))
nd.router.RegisterMerge("advscan", nd.advanceScanCommand)
+ nd.router.RegisterMerge("revscan", wrapMergeCommand(nd.scanCommand))
+ nd.router.RegisterMerge("advrevscan", nd.advanceScanCommand)
nd.router.RegisterMerge("fullscan", nd.fullScanCommand)
nd.router.RegisterMerge("hidx.from", nd.hindexSearchCommand)
nd.router.RegisterMerge("exists", wrapMergeCommandKK(nd.existsCommand))
- nd.router.RegisterWriteMerge("del", wrapWriteMergeCommandKK(nd, nd.delCommand))
+ // make sure the merged write command will be stopped if cluster is not allowed to write
+ nd.router.RegisterWriteMerge("del", wrapWriteMergeCommandKK(nd, checkAndRewriteIntRsp))
//nd.router.RegisterWriteMerge("mset", nd.msetCommand)
- nd.router.RegisterWriteMerge("plset", wrapWriteMergeCommandKVKV(nd, nd.plsetCommand))
+ nd.router.RegisterWriteMerge("plset", wrapWriteMergeCommandKVKV(nd, nil))
+ if enableSlowLimiterTest {
+ nd.router.RegisterWrite("slowwrite1s_test", wrapWriteCommandKV(nd, checkOKRsp))
+ nd.router.RegisterWrite("slowwrite100ms_test", wrapWriteCommandKV(nd, checkOKRsp))
+ nd.router.RegisterWrite("slowwrite50ms_test", wrapWriteCommandKV(nd, checkOKRsp))
+ nd.router.RegisterWrite("slowwrite5ms_test", wrapWriteCommandKV(nd, checkOKRsp))
+ }
}
func (kvsm *kvStoreSM) registerConflictHandlers() {
// only write command
kvsm.cRouter.Register("del", kvsm.checkKVConflict)
+ kvsm.cRouter.Register("delifeq", kvsm.checkKVConflict)
kvsm.cRouter.Register("set", kvsm.checkKVConflict)
+ kvsm.cRouter.Register("setifeq", kvsm.checkKVConflict)
+ kvsm.cRouter.Register("append", kvsm.checkKVConflict)
+ kvsm.cRouter.Register("setrange", kvsm.checkKVConflict)
+ kvsm.cRouter.Register("getset", kvsm.checkKVConflict)
kvsm.cRouter.Register("setnx", kvsm.checkKVConflict)
kvsm.cRouter.Register("incr", kvsm.checkKVConflict)
kvsm.cRouter.Register("incrby", kvsm.checkKVConflict)
kvsm.cRouter.Register("plset", kvsm.checkKVKVConflict)
// hll
kvsm.cRouter.Register("pfadd", kvsm.checkHLLConflict)
+ // bitmap
+ kvsm.cRouter.Register("setbitv2", kvsm.checkBitmapConflict)
+ kvsm.cRouter.Register("setbit", kvsm.checkBitmapConflict)
+ kvsm.cRouter.Register("bitclear", kvsm.checkBitmapConflict)
+ kvsm.cRouter.Register("bexpire", kvsm.checkBitmapConflict)
+ kvsm.cRouter.Register("bpersist", kvsm.checkBitmapConflict)
// hash
kvsm.cRouter.Register("hset", kvsm.checkHashKFVConflict)
kvsm.cRouter.Register("hsetnx", kvsm.checkHashKFVConflict)
kvsm.cRouter.Register("hincrby", kvsm.checkHashKFVConflict)
kvsm.cRouter.Register("hmset", kvsm.checkHashKFVConflict)
kvsm.cRouter.Register("hdel", kvsm.checkHashKFFConflict)
+ kvsm.cRouter.Register("hincrby", kvsm.checkHashKFVConflict)
+ // no field can be checked in these commands
+ //kvsm.cRouter.Register("hclear", kvsm.localHclearCommand)
+ //kvsm.cRouter.Register("hexpire", kvsm.localHashExpireCommand)
+ //kvsm.cRouter.Register("hpersist", kvsm.localHashPersistCommand)
// list
kvsm.cRouter.Register("lpop", kvsm.checkListConflict)
@@ -216,6 +311,9 @@ func (kvsm *kvStoreSM) registerConflictHandlers() {
kvsm.cRouter.Register("ltrim", kvsm.checkListConflict)
kvsm.cRouter.Register("rpop", kvsm.checkListConflict)
kvsm.cRouter.Register("rpush", kvsm.checkListConflict)
+ kvsm.cRouter.Register("lclear", kvsm.checkListConflict)
+ kvsm.cRouter.Register("lexpire", kvsm.checkListConflict)
+ kvsm.cRouter.Register("lpersist", kvsm.checkListConflict)
// zset
kvsm.cRouter.Register("zadd", kvsm.checkZSetConflict)
kvsm.cRouter.Register("zincrby", kvsm.checkZSetConflict)
@@ -223,12 +321,19 @@ func (kvsm *kvStoreSM) registerConflictHandlers() {
kvsm.cRouter.Register("zremrangebyrank", kvsm.checkZSetConflict)
kvsm.cRouter.Register("zremrangebyscore", kvsm.checkZSetConflict)
kvsm.cRouter.Register("zremrangebylex", kvsm.checkZSetConflict)
+ kvsm.cRouter.Register("zclear", kvsm.checkZSetConflict)
+ kvsm.cRouter.Register("zexpire", kvsm.checkZSetConflict)
+ kvsm.cRouter.Register("zpersist", kvsm.checkZSetConflict)
// set
kvsm.cRouter.Register("sadd", kvsm.checkSetConflict)
kvsm.cRouter.Register("srem", kvsm.checkSetConflict)
kvsm.cRouter.Register("spop", kvsm.checkSetConflict)
+ kvsm.cRouter.Register("sclear", kvsm.checkSetConflict)
+ kvsm.cRouter.Register("sexpire", kvsm.checkSetConflict)
+ kvsm.cRouter.Register("spersist", kvsm.checkSetConflict)
// expire
kvsm.cRouter.Register("setex", kvsm.checkKVConflict)
kvsm.cRouter.Register("expire", kvsm.checkKVConflict)
kvsm.cRouter.Register("persist", kvsm.checkKVConflict)
+ // for json
}
diff --git a/node/node_test.go b/node/node_test.go
new file mode 100644
index 00000000..e2f759ea
--- /dev/null
+++ b/node/node_test.go
@@ -0,0 +1,131 @@
+package node
+
+import (
+ "context"
+ "errors"
+ "flag"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/engine"
+ "github.com/youzan/ZanRedisDB/pkg/idutil"
+ "github.com/youzan/ZanRedisDB/pkg/wait"
+ "github.com/youzan/ZanRedisDB/rockredis"
+)
+
+func TestMain(m *testing.M) {
+ flag.Parse()
+ if testing.Verbose() {
+ SetLogLevel(int(common.LOG_DETAIL))
+ rockredis.SetLogLevel(int32(common.LOG_DETAIL))
+ engine.SetLogLevel(int32(common.LOG_DETAIL))
+ }
+ ret := m.Run()
+ os.Exit(ret)
+}
+
+func TestWaitReqPools(t *testing.T) {
+ wrPools := newWaitReqPoolArray()
+
+ wr := wrPools.getWaitReq(1)
+ assert.Equal(t, 1, cap(wr.reqs.Reqs))
+ //assert.Equal(t, minPoolIDLen, cap(wr.ids))
+ //wr = wrPools.getWaitReq(minPoolIDLen)
+ //assert.Equal(t, minPoolIDLen, cap(wr.ids))
+ //wr = wrPools.getWaitReq(minPoolIDLen + 1)
+ //assert.Equal(t, minPoolIDLen*2, cap(wr.ids))
+ //wr = wrPools.getWaitReq(minPoolIDLen * 2)
+ //assert.Equal(t, minPoolIDLen*2, cap(wr.ids))
+ //wr = wrPools.getWaitReq(minPoolIDLen*2 + 1)
+ //assert.Equal(t, minPoolIDLen*2*2, cap(wr.ids))
+ //wr = wrPools.getWaitReq(minPoolIDLen * 2 * 2)
+ //assert.Equal(t, minPoolIDLen*2*2, cap(wr.ids))
+ //wr = wrPools.getWaitReq(minPoolIDLen*2*2 + 1)
+ //assert.Equal(t, minPoolIDLen*2*2*2, cap(wr.ids))
+ //wr = wrPools.getWaitReq(minPoolIDLen * 2 * 2 * 2)
+ //assert.Equal(t, minPoolIDLen*2*2*2, cap(wr.ids))
+ wr.release(true)
+ //wr = wrPools.getWaitReq(maxPoolIDLen)
+ //assert.Equal(t, minPoolIDLen*int(math.Pow(float64(2), float64(waitPoolSize-1))), cap(wr.ids))
+ //wr.release()
+ wr = wrPools.getWaitReq(maxPoolIDLen + 1)
+ assert.Equal(t, maxPoolIDLen+1, cap(wr.reqs.Reqs))
+ wr.release(true)
+}
+
+func TestProposeWaitMoreThanOnceTrigger(t *testing.T) {
+ var reqList BatchInternalRaftRequest
+ w := wait.New()
+ reqIDGen := idutil.NewGenerator(uint16(1), time.Now())
+ reqList.ReqId = reqIDGen.Next()
+ // must register before propose
+ wr := w.Register(reqList.ReqId)
+ ctx, cancel := context.WithTimeout(context.Background(), proposeTimeout)
+
+ var futureRsp FutureRsp
+ futureRsp.waitFunc = func() (interface{}, error) {
+ var rsp interface{}
+ var ok bool
+ var err error
+ // will always return a response, timed out or get a error
+ select {
+ case <-ctx.Done():
+ err = ctx.Err()
+ if err == context.Canceled {
+ // proposal canceled can be caused by leader transfer or no leader
+ err = ErrProposalCanceled
+ }
+ w.Trigger(reqList.ReqId, err)
+ rsp = err
+ case <-wr.WaitC():
+ rsp = wr.GetResult()
+ }
+ cancel()
+ if err, ok = rsp.(error); ok {
+ rsp = nil
+ //nd.rn.Infof("request return error: %v, %v", req.String(), err.Error())
+ } else {
+ err = nil
+ }
+ return rsp, err
+ }
+ _, err := futureRsp.WaitRsp()
+ assert.Equal(t, context.DeadlineExceeded, err)
+ w.Trigger(reqList.ReqId, errors.New("unexpected"))
+ _, err = futureRsp.WaitRsp()
+ assert.Equal(t, context.DeadlineExceeded, err)
+}
+
+func BenchmarkBatchRequestMarshal(b *testing.B) {
+ br := &BatchInternalRaftRequest{}
+ br.ReqId = 1
+ irr := InternalRaftRequest{
+ Data: make([]byte, 100),
+ }
+ irr.Header.Timestamp = time.Now().UnixNano()
+ br.Reqs = append(br.Reqs, irr)
+
+ b.SetParallelism(2)
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ br.Marshal()
+ }
+ })
+}
+
+func BenchmarkRequestMarshal(b *testing.B) {
+ irr := InternalRaftRequest{
+ Data: make([]byte, 100),
+ }
+ irr.Header.Timestamp = time.Now().UnixNano()
+
+ b.SetParallelism(2)
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ irr.Marshal()
+ }
+ })
+}
diff --git a/node/raft.go b/node/raft.go
index 504609f6..a0e67a7e 100644
--- a/node/raft.go
+++ b/node/raft.go
@@ -19,6 +19,7 @@ import (
"fmt"
"io"
"os"
+ "runtime"
"sort"
"strconv"
"sync"
@@ -27,33 +28,41 @@ import (
"encoding/json"
"sync/atomic"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/pkg/fileutil"
- "github.com/absolute8511/ZanRedisDB/pkg/idutil"
- "github.com/absolute8511/ZanRedisDB/pkg/types"
- "github.com/absolute8511/ZanRedisDB/raft"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
- "github.com/absolute8511/ZanRedisDB/snap"
- "github.com/absolute8511/ZanRedisDB/transport/rafthttp"
- "github.com/absolute8511/ZanRedisDB/wal"
- "github.com/absolute8511/ZanRedisDB/wal/walpb"
+ ps "github.com/prometheus/client_golang/prometheus"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/metric"
+ "github.com/youzan/ZanRedisDB/pkg/fileutil"
+ "github.com/youzan/ZanRedisDB/pkg/idutil"
+ "github.com/youzan/ZanRedisDB/pkg/types"
+ "github.com/youzan/ZanRedisDB/raft"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/settings"
+ "github.com/youzan/ZanRedisDB/snap"
+ "github.com/youzan/ZanRedisDB/transport/rafthttp"
+ "github.com/youzan/ZanRedisDB/wal"
+ "github.com/youzan/ZanRedisDB/wal/walpb"
"golang.org/x/net/context"
)
-const (
- DefaultSnapCount = 160000
+var (
+ // DefaultSnapCount is the count for trigger snapshot
+ DefaultSnapCount = int(settings.Soft.DefaultSnapCount)
// HealthInterval is the minimum time the cluster should be healthy
// before accepting add member requests.
- HealthInterval = 5 * time.Second
+ HealthInterval = time.Duration(settings.Soft.HealthIntervalSec) * time.Second
// max number of in-flight snapshot messages allows to have
- maxInFlightMsgSnap = 16
+ maxInFlightMsgSnap = int(settings.Soft.MaxInFlightMsgSnap)
+ maxInflightMsgs = settings.Soft.MaxInflightMsgs
+)
+
+const (
releaseDelayAfterSnapshot = 30 * time.Second
- maxSizePerMsg = 1024 * 1024
- maxInflightMsgs = 256
)
+var errWALMetaMismatch = errors.New("wal meta mismatch config (maybe reused old deleted data)")
+
type Snapshot interface {
GetData() ([]byte, error)
}
@@ -64,15 +73,22 @@ type IRaftPersistStorage interface {
Save(st raftpb.HardState, ents []raftpb.Entry) error
// SaveSnap function saves snapshot to the underlying stable storage.
SaveSnap(snap raftpb.Snapshot) error
- Load() (*raftpb.Snapshot, string, error)
+ Load() (*raftpb.Snapshot, error)
+ LoadNewestAvailable(walSnaps []walpb.Snapshot) (*raftpb.Snapshot, error)
// Close closes the Storage and performs finalization.
Close() error
+ // Release releases the locked wal files older than the provided snapshot.
+ Release(snap raftpb.Snapshot) error
+ // Sync WAL
+ Sync() error
}
type DataStorage interface {
CleanData() error
- RestoreFromSnapshot(bool, raftpb.Snapshot) error
+ RestoreFromSnapshot(raftpb.Snapshot) error
+ PrepareSnapshot(raftpb.Snapshot) error
GetSnapshot(term uint64, index uint64) (Snapshot, error)
+ UpdateSnapshotState(term uint64, index uint64)
Stop()
}
@@ -98,7 +114,7 @@ type raftNode struct {
// raft backing for the commit/error channel
node raft.Node
- raftStorage *raft.MemoryStorage
+ raftStorage raft.IExtRaftStorage
wal *wal.WAL
persistStorage IRaftPersistStorage
@@ -118,6 +134,11 @@ type raftNode struct {
lastLeaderChangedTs int64
stopping int32
replayRunning int32
+ busySnapshot int32
+ loopServering int32
+ lastPublished uint64
+
+ slowLimiter *SlowLimiter
}
// newRaftNode initiates a raft instance and returns a committed log entry
@@ -126,14 +147,14 @@ type raftNode struct {
// commit channel, followed by a nil message (to indicate the channel is
// current), then new log entries.
func newRaftNode(rconfig *RaftConfig, transport *rafthttp.Transport,
- join bool, ds DataStorage, newLeaderChan chan string) (<-chan applyInfo, *raftNode, error) {
+ join bool, ds DataStorage, rs raft.IExtRaftStorage, newLeaderChan chan string) (<-chan applyInfo, *raftNode, error) {
- commitC := make(chan applyInfo, 5000)
+ commitC := make(chan applyInfo, settings.Soft.CommitBufferLen)
if rconfig.SnapCount <= 0 {
rconfig.SnapCount = DefaultSnapCount
}
if rconfig.SnapCatchup <= 0 {
- rconfig.SnapCatchup = rconfig.SnapCount / 4
+ rconfig.SnapCatchup = rconfig.SnapCount / 2
}
rc := &raftNode{
@@ -142,13 +163,13 @@ func newRaftNode(rconfig *RaftConfig, transport *rafthttp.Transport,
members: make(map[uint64]*common.MemberInfo),
learnerMems: make(map[uint64]*common.MemberInfo),
join: join,
- raftStorage: raft.NewMemoryStorage(),
+ raftStorage: rs,
stopc: make(chan struct{}),
ds: ds,
reqIDGen: idutil.NewGenerator(uint16(rconfig.ID), time.Now()),
msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
transport: transport,
- readStateC: make(chan raft.ReadState, 1),
+ readStateC: make(chan raft.ReadState, 3),
newLeaderChan: newLeaderChan,
}
snapDir := rc.config.SnapDir
@@ -168,7 +189,7 @@ func (rc *raftNode) openWAL(snapshot *raftpb.Snapshot, readOld bool) (*wal.WAL,
var hardState raftpb.HardState
if !wal.Exist(rc.config.WALDir) {
if err := os.MkdirAll(rc.config.WALDir, common.DIR_PERM); err != nil {
- nodeLog.Errorf("cannot create dir for wal (%v)", err)
+ rc.Errorf("cannot create dir for wal (%v)", err)
return nil, nil, hardState, nil, err
}
@@ -179,7 +200,7 @@ func (rc *raftNode) openWAL(snapshot *raftpb.Snapshot, readOld bool) (*wal.WAL,
d, _ := json.Marshal(m)
w, err := wal.Create(rc.config.WALDir, d, rc.config.OptimizedFsync)
if err != nil {
- nodeLog.Errorf("create wal error (%v)", err)
+ rc.Errorf("create wal error (%v)", err)
}
return w, d, hardState, nil, err
}
@@ -194,21 +215,21 @@ func (rc *raftNode) openWAL(snapshot *raftpb.Snapshot, readOld bool) (*wal.WAL,
for {
w, err = wal.Open(rc.config.WALDir, walsnap, rc.config.OptimizedFsync)
if err != nil {
- nodeLog.Errorf("error loading wal (%v)", err)
+ rc.Errorf("error loading wal (%v)", err)
return w, nil, hardState, nil, err
}
if readOld {
meta, st, ents, err := w.ReadAll()
if err != nil {
w.Close()
- nodeLog.Errorf("failed to read WAL (%v)", err)
- if repaired || err != io.ErrUnexpectedEOF {
- nodeLog.Errorf("read wal error and cannot be repaire")
+ rc.Errorf("failed to read WAL (%s)", err)
+ if repaired {
+ rc.Errorf("read wal error and cannot be repaire")
} else {
if !wal.Repair(rc.config.WALDir) {
- nodeLog.Errorf("read wal error and cannot be repaire")
+ rc.Errorf("read wal error and cannot be repaire")
} else {
- nodeLog.Infof("wal repaired")
+ rc.Infof("wal repaired")
repaired = true
continue
}
@@ -237,19 +258,19 @@ func (rc *raftNode) replayWAL(snapshot *raftpb.Snapshot, forceStandalone bool) e
return err
}
- rc.Infof("wal meta: %v, restart with: %v", string(meta), st.String())
+ rc.Infof("wal meta: %v, restart with: %v, ents: %v", string(meta), st.String(), len(ents))
var m common.MemberInfo
err = json.Unmarshal(meta, &m)
if err != nil {
w.Close()
- nodeLog.Errorf("meta is wrong: %v", err)
+ rc.Errorf("meta is wrong: %v", err)
return err
}
if m.ID != uint64(rc.config.ID) ||
m.GroupID != rc.config.GroupID {
w.Close()
- nodeLog.Errorf("meta starting mismatch config: %v, %v", m, rc.config)
- return err
+ rc.Errorf("meta starting mismatch config: %v, %v", m, rc.config)
+ return errWALMetaMismatch
}
if rs, ok := rc.persistStorage.(*raftPersistStorage); ok {
rs.WAL = w
@@ -259,7 +280,7 @@ func (rc *raftNode) replayWAL(snapshot *raftpb.Snapshot, forceStandalone bool) e
// discard the previously uncommitted entries
for i, ent := range ents {
if ent.Index > st.Commit {
- nodeLog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
+ rc.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
ents = ents[:i]
break
}
@@ -291,7 +312,7 @@ func (rc *raftNode) replayWAL(snapshot *raftpb.Snapshot, forceStandalone bool) e
// force commit newly appended entries
err := w.Save(raftpb.HardState{}, toAppEnts)
if err != nil {
- nodeLog.Errorf("force commit error: %v", err)
+ rc.Errorf("force commit error: %v", err)
return err
}
if len(ents) != 0 {
@@ -333,22 +354,31 @@ func (rc *raftNode) startRaft(ds DataStorage, standalone bool) error {
elecTick = 10
}
c := &raft.Config{
- ID: uint64(rc.config.ID),
- ElectionTick: elecTick,
- HeartbeatTick: elecTick / 10,
- Storage: rc.raftStorage,
- MaxSizePerMsg: maxSizePerMsg,
- MaxInflightMsgs: maxInflightMsgs,
- CheckQuorum: true,
- PreVote: true,
- Logger: nodeLog,
+ ID: uint64(rc.config.ID),
+ ElectionTick: elecTick,
+ HeartbeatTick: elecTick / 10,
+ Storage: rc.raftStorage,
+ MaxSizePerMsg: settings.Soft.MaxSizePerMsg,
+ MaxInflightMsgs: int(maxInflightMsgs),
+ MaxCommittedSizePerReady: settings.Soft.MaxCommittedSizePerReady,
+ CheckQuorum: true,
+ PreVote: true,
+ Logger: nodeLog,
Group: raftpb.Group{NodeId: rc.config.nodeConfig.NodeID,
Name: rc.config.GroupName, GroupId: rc.config.GroupID,
RaftReplicaId: uint64(rc.config.ID)},
}
if oldwal {
- snapshot, _, err := rc.persistStorage.Load()
+ rc.Infof("loading from old wal: %s", walDir)
+ // Find a snapshot to start/restart a raft node
+ walSnaps, err := wal.ValidSnapshotEntries(walDir)
+ if err != nil {
+ return err
+ }
+ // snapshot files can be orphaned if etcd crashes after writing them but before writing the corresponding
+ // wal log entries
+ snapshot, err := rc.persistStorage.LoadNewestAvailable(walSnaps)
if err != nil && err != snap.ErrNoSnapshot {
nodeLog.Warning(err)
return err
@@ -360,8 +390,21 @@ func (rc *raftNode) startRaft(ds DataStorage, standalone bool) error {
rc.Infof("loading snapshot at term %d and index %d, snap: %v",
snapshot.Metadata.Term,
snapshot.Metadata.Index, snapshot.Metadata.ConfState)
- if err := rc.ds.RestoreFromSnapshot(true, *snapshot); err != nil {
- nodeLog.Error(err)
+ // update the latest snapshot index for statemachine
+ rc.ds.UpdateSnapshotState(snapshot.Metadata.Term, snapshot.Metadata.Index)
+ err := rc.ds.PrepareSnapshot(*snapshot)
+ if err == nil {
+ if err := rc.ds.RestoreFromSnapshot(*snapshot); err != nil {
+ rc.Errorf("failed to restore from snapshot: %s", err)
+ return err
+ }
+ } else if err == errNobackupAvailable {
+ if common.IsConfSetted(common.ConfIgnoreStartupNoBackup) {
+ rc.Infof("ignore failed at startup for no any backup from anyware")
+ } else {
+ return err
+ }
+ } else {
return err
}
}
@@ -372,6 +415,7 @@ func (rc *raftNode) startRaft(ds DataStorage, standalone bool) error {
err = rc.restartNode(c, snapshot)
}
if err != nil {
+ rc.Infof("restarting node failed: %v", err.Error())
return err
}
} else {
@@ -401,7 +445,13 @@ func (rc *raftNode) startRaft(ds DataStorage, standalone bool) error {
if rc.join {
startPeers = nil
}
- rc.node = raft.StartNode(c, startPeers, isLearner)
+ if len(startPeers) == 0 {
+ rc.Infof("loading empty wal: %s without peers", walDir)
+ rc.node = raft.RestartNode(c)
+ } else {
+ rc.Infof("loading empty wal: %s with peers: %v", walDir, startPeers)
+ rc.node = raft.StartNode(c, startPeers, isLearner)
+ }
}
rc.initForTransport()
rc.wgServe.Add(1)
@@ -437,9 +487,9 @@ func (rc *raftNode) initForTransport() {
}
func (rc *raftNode) restartNode(c *raft.Config, snapshot *raftpb.Snapshot) error {
- var err error
- err = rc.replayWAL(snapshot, false)
+ err := rc.replayWAL(snapshot, false)
if err != nil {
+ rc.Infof("restarting node failed to replay wal: %v", err.Error())
return err
}
rc.node = raft.RestartNode(c)
@@ -448,8 +498,7 @@ func (rc *raftNode) restartNode(c *raft.Config, snapshot *raftpb.Snapshot) error
}
func (rc *raftNode) restartAsStandaloneNode(cfg *raft.Config, snapshot *raftpb.Snapshot) error {
- var err error
- err = rc.replayWAL(snapshot, true)
+ err := rc.replayWAL(snapshot, true)
if err != nil {
return err
}
@@ -462,6 +511,7 @@ func (rc *raftNode) restartAsStandaloneNode(cfg *raft.Config, snapshot *raftpb.S
// ID-related entry:
// - ConfChangeAddNode, in which case the contained ID will be added into the set.
// - ConfChangeRemoveNode, in which case the contained ID will be removed from the set.
+// - ConfChangeAddLearnerNode, in which the contained ID will be added into the set.
func getIDsAndGroups(snap *raftpb.Snapshot, ents []raftpb.Entry) ([]uint64, map[uint64]raftpb.Group) {
ids := make(map[uint64]bool)
grps := make(map[uint64]raftpb.Group)
@@ -486,12 +536,16 @@ func getIDsAndGroups(snap *raftpb.Snapshot, ents []raftpb.Entry) ([]uint64, map[
var cc raftpb.ConfChange
cc.Unmarshal(e.Data)
switch cc.Type {
+ case raftpb.ConfChangeAddLearnerNode:
+ // https://github.com/etcd-io/etcd/pull/12288
+ ids[cc.ReplicaID] = true
+ grps[cc.NodeGroup.RaftReplicaId] = cc.NodeGroup
case raftpb.ConfChangeAddNode:
ids[cc.ReplicaID] = true
grps[cc.NodeGroup.RaftReplicaId] = cc.NodeGroup
case raftpb.ConfChangeRemoveNode:
delete(ids, cc.ReplicaID)
- delete(grps, cc.ReplicaID)
+ delete(grps, cc.NodeGroup.RaftReplicaId)
case raftpb.ConfChangeUpdateNode:
// do nothing
default:
@@ -582,14 +636,14 @@ func newSnapshotReaderCloser() io.ReadCloser {
func (rc *raftNode) handleSendSnapshot(np *nodeProgress) {
select {
case m := <-rc.msgSnapC:
- snapData, _, err := rc.persistStorage.Load()
+ snapData, err := rc.persistStorage.Load()
if err != nil {
rc.Infof("load snapshot error : %v", err)
rc.ReportSnapshot(m.To, m.ToGroup, raft.SnapshotFailure)
return
}
if snapData.Metadata.Index > np.appliedi {
- rc.Infof("load snapshot error, snapshot index should not great than applied: %v", snapData.Metadata, np)
+ rc.Infof("load snapshot error, snapshot index should not great than applied: %v, %v", snapData.Metadata, np)
rc.ReportSnapshot(m.To, m.ToGroup, raft.SnapshotFailure)
return
}
@@ -623,8 +677,6 @@ func (rc *raftNode) handleSendSnapshot(np *nodeProgress) {
func (rc *raftNode) beginSnapshot(snapTerm uint64, snapi uint64, confState raftpb.ConfState) error {
// here we can just begin snapshot, to freeze the state of storage
// and we can copy data async below
- // TODO: do we need the snapshot while we already make our data stable on disk?
- // maybe we can just same some meta data.
rc.Infof("begin get snapshot at: %v-%v", snapTerm, snapi)
sn, err := rc.ds.GetSnapshot(snapTerm, snapi)
if err != nil {
@@ -642,7 +694,7 @@ func (rc *raftNode) beginSnapshot(snapTerm uint64, snapi uint64, confState raftp
}
rc.Infof("snapshot data : %v\n", string(data))
rc.Infof("create snapshot with conf : %v\n", confState)
- // TODO: now we can do the actually snapshot for copy
+ // now we can do the actually snapshot for copy
snap, err := rc.raftStorage.CreateSnapshot(snapi, &confState, data)
if err != nil {
if err == raft.ErrSnapOutOfDate {
@@ -651,16 +703,28 @@ func (rc *raftNode) beginSnapshot(snapTerm uint64, snapi uint64, confState raftp
rc.Errorf("create snapshot at index %d failed: %v", snapi, err)
return
}
+ // SaveSnap saves the snapshot to file and appends the corresponding WAL entry.
if err := rc.persistStorage.SaveSnap(snap); err != nil {
rc.Errorf("save snapshot at index %v failed: %v", snap.Metadata, err)
return
}
- rc.Infof("saved snapshot at index %d", snap.Metadata.Index)
+ err = rc.persistStorage.Sync()
+ if err != nil {
+ rc.Errorf("failed to sync wal: %s", err)
+ return
+ }
+ if err = rc.persistStorage.Release(snap); err != nil {
+ rc.Errorf("failed to release wal: %s", err)
+ return
+ }
+ // update the latest snapshot index for statemachine
+ rc.ds.UpdateSnapshotState(snap.Metadata.Term, snap.Metadata.Index)
compactIndex := uint64(1)
if snapi > uint64(rc.config.SnapCatchup) {
compactIndex = snapi - uint64(rc.config.SnapCatchup)
}
+ rc.Infof("saved snapshot at index %d, compact to: %v", snap.Metadata.Index, compactIndex)
if err := rc.raftStorage.Compact(compactIndex); err != nil {
if err == raft.ErrCompacted {
return
@@ -678,8 +742,13 @@ func (rc *raftNode) beginSnapshot(snapTerm uint64, snapi uint64, confState raftp
func (rc *raftNode) publishEntries(ents []raftpb.Entry, snapshot raftpb.Snapshot, snapResult chan error,
raftDone chan struct{}, applyWaitDone chan struct{}) {
select {
- case rc.commitC <- applyInfo{ents: ents, snapshot: snapshot, applySnapshotResult: snapResult,
- raftDone: raftDone, applyWaitDone: applyWaitDone}:
+ case rc.commitC <- applyInfo{
+ ents: ents,
+ snapshot: snapshot,
+ applySnapshotResult: snapResult,
+ raftDone: raftDone,
+ applyWaitDone: applyWaitDone,
+ }:
case <-rc.stopc:
return
}
@@ -711,13 +780,13 @@ func (rc *raftNode) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Conf
var m common.MemberInfo
err := json.Unmarshal(cc.Context, &m)
if err != nil {
- nodeLog.Errorf("error conf context: %v", err)
+ rc.Errorf("error conf context: %v", err)
go rc.ds.Stop()
return false, false, err
} else {
m.ID = cc.ReplicaID
if m.NodeID == 0 {
- nodeLog.Errorf("invalid member info: %v", m)
+ rc.Errorf("invalid member info: %v", m)
go rc.ds.Stop()
return false, confChanged, errors.New("add member should include node id ")
}
@@ -775,12 +844,12 @@ func (rc *raftNode) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Conf
var m common.MemberInfo
err := json.Unmarshal(cc.Context, &m)
if err != nil {
- nodeLog.Errorf("error conf context: %v", err)
+ rc.Errorf("error conf context: %v", err)
return false, false, err
}
m.ID = cc.ReplicaID
if m.NodeID == 0 {
- nodeLog.Errorf("invalid member info: %v", m)
+ rc.Errorf("invalid member info: %v", m)
return false, confChanged, errors.New("add member should include node id ")
}
rc.memMutex.Lock()
@@ -796,11 +865,26 @@ func (rc *raftNode) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Conf
return false, confChanged, nil
}
+func (rc *raftNode) isServerRunning() bool {
+ return atomic.LoadInt32(&rc.loopServering) == 1
+}
+
func (rc *raftNode) serveChannels() {
purgeDone := make(chan struct{})
raftReadyLoopC := make(chan struct{})
go rc.purgeFile(purgeDone, raftReadyLoopC)
+ atomic.StoreInt32(&rc.loopServering, 1)
+ defer atomic.StoreInt32(&rc.loopServering, 0)
defer func() {
+ if e := recover(); e != nil {
+ buf := make([]byte, 4096)
+ n := runtime.Stack(buf, false)
+ buf = buf[0:n]
+ rc.Errorf("handle raft loop panic: %s:%v", buf, e)
+ go rc.ds.Stop()
+ <-rc.stopc
+ }
+
// wait purge stopped to avoid purge the files after wal closed
close(raftReadyLoopC)
<-purgeDone
@@ -810,140 +894,295 @@ func (rc *raftNode) serveChannels() {
rc.wgAsync.Wait()
rc.node.Stop()
rc.persistStorage.Close()
+ rc.raftStorage.Close()
rc.raftStorage = nil
}()
// event loop on raft state machine updates
- isMeNewLeader := false
for {
select {
case <-rc.stopc:
return
- // store raft entries to wal, then publish over commit channel
- case rd, ok := <-rc.node.Ready():
- if !ok {
- rc.Errorf("raft loop stopped")
- return
- }
- if rd.SoftState != nil {
- isMeNewLeader = (rd.RaftState == raft.StateLeader)
- oldLead := atomic.LoadUint64(&rc.lead)
- isMeLosingLeader := (oldLead == uint64(rc.config.ID)) && !isMeNewLeader
- if rd.SoftState.Lead != raft.None && oldLead != rd.SoftState.Lead {
- rc.Infof("leader changed from %v to %v", oldLead, rd.SoftState)
- atomic.StoreInt64(&rc.lastLeaderChangedTs, time.Now().UnixNano())
- }
- if rd.SoftState.Lead == raft.None && oldLead != raft.None {
- // TODO: handle proposal drop if leader is lost
- //rc.triggerLeaderLost()
- }
- if isMeNewLeader || isMeLosingLeader {
- rc.triggerLeaderChanged()
+ case <-rc.node.EventNotifyCh():
+ metric.QueueLen.With(ps.Labels{
+ "namespace": rc.Descrp(),
+ "queue_name": "apply_commit_queue",
+ }).Set(float64(len(rc.commitC)))
+ moreEntriesToApply := cap(rc.commitC)-len(rc.commitC) > 3
+ // we should slow down raft logs receiving while applying is slow, otherwise we
+ // may have too much logs in memory if the applying is slow.
+ busy := rc.IsBusySnapshot()
+ if !busy {
+ // note: if the lastIndex and FirstIndex is slow, we should avoid call it in every step
+ // and this may cause the raft log send some overflowed messages because the raft logs will
+ // send as much as MaxInflights*MaxSizePerMsg in pipeline (may increase the network bandwidth), so if we replaced the memory raft
+ // storage we can remove this to allow receiving all logs from leader.
+ last, err := rc.raftStorage.LastIndex()
+ if err == nil {
+ fi, _ := rc.raftStorage.FirstIndex()
+ fi = fi - 1
+ if last > fi && last-fi >= uint64(rc.config.SnapCatchup+rc.config.SnapCount)*10 {
+ busy = true
+ metric.EventCnt.With(ps.Labels{
+ "namespace": rc.Descrp(),
+ "event_name": "raft_too_much_logs_unapplied",
+ }).Inc()
+ }
}
- atomic.StoreUint64(&rc.lead, rd.SoftState.Lead)
}
- if len(rd.ReadStates) != 0 {
- select {
- case rc.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
- case <-time.After(time.Second):
- nodeLog.Infof("timeout sending read state")
- case <-rc.stopc:
- return
+ if !moreEntriesToApply && !busy {
+ // apply buffer nearly full, should slow down and refuse some slow write proposal
+ if rc.slowLimiter != nil {
+ rc.slowLimiter.MarkHeavySlow()
}
+ metric.EventCnt.With(ps.Labels{
+ "namespace": rc.Descrp(),
+ "event_name": "raft_apply_buffer_full",
+ }).Inc()
+ } else if len(rc.commitC) <= 10 {
}
-
- raftDone := make(chan struct{}, 1)
- var applyWaitDone chan struct{}
- waitApply := false
- if !isMeNewLeader {
- // Candidate or follower needs to wait for all pending configuration
- // changes to be applied before sending messages.
- // Otherwise we might incorrectly count votes (e.g. votes from removed members).
- // Also slow machine's follower raft-layer could proceed to become the leader
- // on its own single-node cluster, before apply-layer applies the config change.
- // We simply wait for ALL pending entries to be applied for now.
- // We might improve this later on if it causes unnecessary long blocking issues.
- for _, ent := range rd.CommittedEntries {
- if ent.Type == raftpb.EntryConfChange {
- waitApply = true
- nodeLog.Infof("need wait apply for config changed: %v", ent.String())
- break
- }
- }
- if waitApply {
- applyWaitDone = make(chan struct{})
- }
+ rd, hasUpdate := rc.node.StepNode(moreEntriesToApply, busy)
+ if !hasUpdate {
+ continue
}
-
- var applySnapshotResult chan error
- if !raft.IsEmptySnap(rd.Snapshot) {
- applySnapshotResult = make(chan error, 1)
+ rc.processReady(rd)
+ if rd.MoreCommittedEntries {
+ rc.node.NotifyEventCh()
}
+ }
+ }
+}
- rc.publishEntries(rd.CommittedEntries, rd.Snapshot, applySnapshotResult, raftDone, applyWaitDone)
+func (rc *raftNode) processReady(rd raft.Ready) {
+ isMeNewLeader := false
+ if rd.SoftState != nil {
+ isMeNewLeader = (rd.RaftState == raft.StateLeader)
+ oldLead := atomic.LoadUint64(&rc.lead)
+ isMeLosingLeader := (oldLead == uint64(rc.config.ID)) && !isMeNewLeader
+ if rd.SoftState.Lead != raft.None && oldLead != rd.SoftState.Lead {
+ rc.Infof("leader changed from %v to %v", oldLead, rd.SoftState)
+ atomic.StoreInt64(&rc.lastLeaderChangedTs, time.Now().UnixNano())
+ metric.EventCnt.With(ps.Labels{
+ "namespace": rc.Descrp(),
+ "event_name": "raft_leader_changed",
+ }).Inc()
+ }
+ if rd.SoftState.Lead == raft.None && oldLead != raft.None {
+ // TODO: handle proposal drop if leader is lost
+ //rc.triggerLeaderLost()
+ }
+ if isMeNewLeader || isMeLosingLeader {
+ rc.triggerLeaderChanged()
+ }
+ atomic.StoreUint64(&rc.lead, rd.SoftState.Lead)
+ }
- if !raft.IsEmptySnap(rd.Snapshot) {
- // since the snapshot only has metadata, we need rsync the real snapshot data first.
- // if the real snapshot failed to pull, we need stop raft and retry restart later.
- rc.Infof("raft begin to apply incoming snapshot : %v", rd.Snapshot.String())
- select {
- case applyErr := <-applySnapshotResult:
- if applyErr != nil {
- rc.Errorf("wait apply snapshot error: %v", applyErr)
- go rc.ds.Stop()
- <-rc.stopc
- return
- }
- case <-rc.stopc:
- return
- }
+ rc.processReadStates(&rd)
+
+ raftDone := make(chan struct{}, 1)
+ var applyWaitDone chan struct{}
+ waitApply := false
+ if !isMeNewLeader {
+ // Candidate or follower needs to wait for all pending configuration
+ // changes to be applied before sending messages.
+ // Otherwise we might incorrectly count votes (e.g. votes from removed members).
+ // Also slow machine's follower raft-layer could proceed to become the leader
+ // on its own single-node cluster, before apply-layer applies the config change.
+ // We simply wait for ALL pending entries to be applied for now.
+ // We might improve this later on if it causes unnecessary long blocking issues.
+ for _, ent := range rd.CommittedEntries {
+ if ent.Type == raftpb.EntryConfChange {
+ waitApply = true
+ rc.Infof("need wait apply for config changed: %v", ent.String())
+ break
}
- if isMeNewLeader {
- rc.transport.Send(rc.processMessages(rd.Messages))
+ }
+ if waitApply {
+ applyWaitDone = make(chan struct{})
+ }
+ }
+
+ var applySnapshotTransferResult chan error
+ if !raft.IsEmptySnap(rd.Snapshot) {
+ applySnapshotTransferResult = make(chan error, 1)
+ if !waitApply {
+ // this is only needed if the recover from snapshot is not atomic
+ waitApply = true
+ applyWaitDone = make(chan struct{})
+ }
+ }
+ processedMsgs, hasRequestSnapMsg := rc.processMessages(rd.Messages)
+ if len(rd.CommittedEntries) > 0 || !raft.IsEmptySnap(rd.Snapshot) || hasRequestSnapMsg {
+ var newPublished uint64
+ if !raft.IsEmptySnap(rd.Snapshot) {
+ newPublished = rd.Snapshot.Metadata.Index
+ }
+ if len(rd.CommittedEntries) > 0 {
+ firsti := rd.CommittedEntries[0].Index
+ if rc.lastPublished != 0 && firsti > rc.lastPublished+1 {
+ e := fmt.Sprintf("%v first index of committed entry[%d] should <= last published[%d] + 1, snap: %v",
+ rc.Descrp(), firsti, rc.lastPublished, rd.Snapshot.Metadata.String())
+ rc.Errorf("%s", e)
+ rc.Errorf("raft node status: %v", rc.node.DebugString())
}
- if err := rc.persistStorage.Save(rd.HardState, rd.Entries); err != nil {
- nodeLog.Errorf("raft save wal error: %v", err)
+ newPublished = rd.CommittedEntries[len(rd.CommittedEntries)-1].Index
+ }
+ rc.lastPublished = newPublished
+ rc.publishEntries(rd.CommittedEntries, rd.Snapshot, applySnapshotTransferResult, raftDone, applyWaitDone)
+ }
+ if !raft.IsEmptySnap(rd.Snapshot) {
+ // since the snapshot only has metadata, we need rsync the real snapshot data first.
+ // if the real snapshot failed to pull, we need stop raft and retry restart later.
+
+ // fixme: this is not the best way since it will block the raft loop, ideally we should
+ // have the full snap data while receiving the snapshot request and we save to raft. Then
+ // we can make it async while applying on local without blocking raft loop (just pause the entries apply)
+ rc.Infof("raft begin to transfer incoming snapshot : %v", rd.Snapshot.String())
+ select {
+ case applyErr := <-applySnapshotTransferResult:
+ if applyErr != nil {
+ rc.Errorf("wait transfer snapshot error: %v", applyErr)
go rc.ds.Stop()
<-rc.stopc
return
}
- if !raft.IsEmptySnap(rd.Snapshot) {
- if err := rc.persistStorage.SaveSnap(rd.Snapshot); err != nil {
- rc.Errorf("raft save snap error: %v", err)
- go rc.ds.Stop()
- <-rc.stopc
+ case <-rc.stopc:
+ return
+ }
+ rc.Infof("raft transfer incoming snapshot done : %v", rd.Snapshot.String())
+ }
+ if isMeNewLeader {
+ rc.transport.Send(processedMsgs)
+ }
+
+ start := time.Now()
+ // TODO: save entries, hardstate and snapshot should be atomic, or it may corrupt the raft
+ if err := rc.persistRaftState(&rd); err != nil {
+ rc.Errorf("raft save states to disk error: %v", err)
+ go rc.ds.Stop()
+ <-rc.stopc
+ return
+ }
+ cost := time.Since(start)
+ if cost >= raftSlow/2 {
+ rc.Infof("raft persist state slow: %v, cost: %v", len(rd.Entries), cost)
+ }
+ if cost >= time.Millisecond {
+ metric.RaftWriteLatency.With(ps.Labels{
+ "namespace": rc.Descrp(),
+ "step": "raft_persist_commit_entries",
+ }).Observe(float64(cost.Milliseconds()))
+ }
+
+ if !raft.IsEmptySnap(rd.Snapshot) {
+ // we need to notify to tell that the snapshot has been perisisted onto the disk
+ // Force WAL to fsync its hard state before Release() releases
+ // old data from the WAL. Otherwise could get an error like:
+ // panic: tocommit(107) is out of range [lastIndex(84)]. Was the raft log corrupted, truncated, or lost?
+ // See https://github.com/etcd-io/etcd/issues/10219 for more details.
+ if err := rc.persistStorage.Sync(); err != nil {
+ rc.Errorf("failed to sync Raft snapshot: %s", err)
+ go rc.ds.Stop()
+ <-rc.stopc
+ return
+ }
+ raftDone <- struct{}{}
+ rc.raftStorage.ApplySnapshot(rd.Snapshot)
+ rc.Infof("raft applied incoming snapshot done: %v", rd.Snapshot.String())
+ if rd.Snapshot.Metadata.Index >= rc.lastIndex {
+ if !rc.IsReplayFinished() {
+ rc.Infof("replay finished at snapshot index: %v\n", rd.Snapshot.String())
+ rc.MarkReplayFinished()
+ }
+ }
+ if err := rc.persistStorage.Release(rd.Snapshot); err != nil {
+ rc.Errorf("failed to release Raft wal: %s", err)
+ }
+ }
+ cost2 := time.Since(start)
+ rc.raftStorage.Append(rd.Entries)
+ cost3 := time.Since(start) - cost2
+ if cost3 > raftSlow/2 {
+ rc.Infof("raft append commit entries slow: %v, cost: %v", len(rd.Entries), cost3)
+ }
+ if cost3 >= time.Millisecond {
+ metric.RaftWriteLatency.With(ps.Labels{
+ "namespace": rc.Descrp(),
+ "step": "raft_append_commit_entries_to_storage",
+ }).Observe(float64(cost3.Milliseconds()))
+ }
+
+ if !isMeNewLeader {
+ raftDone <- struct{}{}
+ if waitApply {
+ rc.Infof("wait apply for pending configure or snapshot")
+ s := time.Now()
+ // wait and handle pending config change
+ done := false
+ for !done {
+ select {
+ case cc := <-rc.node.ConfChangedCh():
+ rc.node.HandleConfChanged(cc)
+ case <-applyWaitDone:
+ done = true
+ case <-rc.stopc:
return
}
- rc.raftStorage.ApplySnapshot(rd.Snapshot)
- rc.Infof("raft applied incoming snapshot done: %v", rd.Snapshot.String())
}
- rc.raftStorage.Append(rd.Entries)
- if !isMeNewLeader {
- msgs := rc.processMessages(rd.Messages)
- raftDone <- struct{}{}
- if waitApply {
- s := time.Now()
- select {
- case <-applyWaitDone:
- case <-rc.stopc:
- return
- }
- cost := time.Since(s)
- if cost > time.Second {
- nodeLog.Infof("wait apply %v msgs done cost: %v", len(msgs), cost.String())
- }
- }
- rc.transport.Send(msgs)
- } else {
- raftDone <- struct{}{}
+ cost := time.Since(s)
+ if cost > time.Second {
+ rc.Infof("wait apply %v msgs done cost: %v", len(processedMsgs), cost.String())
}
- rc.node.Advance()
}
+ rc.transport.Send(processedMsgs)
+ } else {
+ raftDone <- struct{}{}
+ }
+ rc.node.Advance(rd)
+}
+
+//should atomically saves the Raft states, log entries and snapshots
+func (rc *raftNode) persistRaftState(rd *raft.Ready) error {
+ // Must save the snapshot file and WAL snapshot entry before saving any other entries or hardstate to
+ // ensure that recovery after a snapshot restore is possible.
+ if !raft.IsEmptySnap(rd.Snapshot) {
+ err := rc.persistStorage.SaveSnap(rd.Snapshot)
+ if err != nil {
+ rc.Errorf("raft save snap error: %v", err)
+ return err
+ }
+ rc.Infof("raft persist snapshot meta done : %v", rd.Snapshot.String())
+ // update the latest snapshot index for statemachine
+ rc.ds.UpdateSnapshotState(rd.Snapshot.Metadata.Term, rd.Snapshot.Metadata.Index)
}
+ if err := rc.persistStorage.Save(rd.HardState, rd.Entries); err != nil {
+ rc.Errorf("raft save wal error: %v", err)
+ return err
+ }
+ return nil
}
-func (rc *raftNode) processMessages(msgs []raftpb.Message) []raftpb.Message {
+func (rc *raftNode) processReadStates(rd *raft.Ready) {
+ if len(rd.ReadStates) != 0 {
+ select {
+ case rc.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
+ default:
+ t := time.NewTimer(time.Millisecond * 10)
+ defer t.Stop()
+ select {
+ case rc.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
+ case <-t.C:
+ rc.Infof("timeout sending read state")
+ case <-rc.stopc:
+ return
+ }
+ }
+ }
+}
+
+func (rc *raftNode) processMessages(msgs []raftpb.Message) ([]raftpb.Message, bool) {
sentAppResp := false
+ hasSnapMsg := false
for i := len(msgs) - 1; i >= 0; i-- {
if msgs[i].Type == raftpb.MsgAppResp {
if sentAppResp {
@@ -958,6 +1197,7 @@ func (rc *raftNode) processMessages(msgs []raftpb.Message) []raftpb.Message {
rc.Infof("some node request snapshot: %v", msgs[i].String())
select {
case rc.msgSnapC <- msgs[i]:
+ hasSnapMsg = true
default:
// drop msgSnap if the inflight chan if full.
}
@@ -968,7 +1208,7 @@ func (rc *raftNode) processMessages(msgs []raftpb.Message) []raftpb.Message {
rc.Infof("process vote/prevote :%v ", msgs[i].String())
}
}
- return msgs
+ return msgs, hasSnapMsg
}
func (rc *raftNode) Lead() uint64 { return atomic.LoadUint64(&rc.lead) }
@@ -1123,6 +1363,18 @@ func (rc *raftNode) triggerLeaderChanged() {
}
}
+func (rc *raftNode) SetPrepareSnapshot(busy bool) {
+ if busy {
+ atomic.StoreInt32(&rc.busySnapshot, 1)
+ } else {
+ atomic.StoreInt32(&rc.busySnapshot, 0)
+ }
+}
+
+func (rc *raftNode) IsBusySnapshot() bool {
+ return atomic.LoadInt32(&rc.busySnapshot) == 1
+}
+
func (rc *raftNode) ReportUnreachable(id uint64, group raftpb.Group) {
//rc.Infof("report node %v in group %v unreachable", id, group)
rc.node.ReportUnreachable(id, group)
@@ -1144,22 +1396,34 @@ func (rc *raftNode) purgeFile(done chan struct{}, stopC chan struct{}) {
rc.Infof("purge exit")
close(done)
}()
- keep := rc.config.KeepWAL
- if keep == 0 {
- keep = 20
+ if rc.config.nodeConfig == nil {
+ // maybe in test
+ return
}
- if keep < 10 {
- keep = 10
+ keepBackup := rc.config.nodeConfig.KeepBackup
+ keep := rc.config.nodeConfig.KeepWAL
+ if keep <= 1 {
+ keep = 80
+ }
+ if keepBackup <= 1 {
+ keepBackup = 10
}
var serrc, werrc <-chan error
- serrc = fileutil.PurgeFile(rc.config.SnapDir, "snap", 10, time.Minute*10, rc.stopc)
- werrc = fileutil.PurgeFile(rc.config.WALDir, "wal", uint(keep), time.Minute*10, rc.stopc)
+ var sdonec, wdonec <-chan struct{}
+ sdonec, serrc = fileutil.PurgeFileWithDoneNotify(rc.config.SnapDir, "snap", uint(keepBackup), time.Minute*10, stopC)
+ wdonec, werrc = fileutil.PurgeFileWithDoneNotify(rc.config.WALDir, "wal", uint(keep), time.Minute*10, stopC)
select {
case e := <-werrc:
rc.Infof("failed to purge wal file %v", e)
case e := <-serrc:
rc.Infof("failed to purge snap file %v", e)
case <-stopC:
+ if sdonec != nil {
+ <-sdonec
+ }
+ if wdonec != nil {
+ <-wdonec
+ }
return
}
}
diff --git a/node/raft_internal.pb.go b/node/raft_internal.pb.go
index fe37b959..639c676e 100644
--- a/node/raft_internal.pb.go
+++ b/node/raft_internal.pb.go
@@ -1,26 +1,15 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: raft_internal.proto
-/*
- Package node is a generated protocol buffer package.
-
- It is generated from these files:
- raft_internal.proto
-
- It has these top-level messages:
- RequestHeader
- InternalRaftRequest
- BatchInternalRaftRequest
- SchemaChange
-*/
package node
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
-
-import io "io"
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@@ -31,7 +20,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type ReqSourceType int32
@@ -44,6 +33,7 @@ var ReqSourceType_name = map[int32]string{
0: "FromAPI",
1: "FromClusterSyncer",
}
+
var ReqSourceType_value = map[string]int32{
"FromAPI": 0,
"FromClusterSyncer": 1,
@@ -52,7 +42,10 @@ var ReqSourceType_value = map[string]int32{
func (x ReqSourceType) String() string {
return proto.EnumName(ReqSourceType_name, int32(x))
}
-func (ReqSourceType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{0} }
+
+func (ReqSourceType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_b4c9a9be0cfca103, []int{0}
+}
type SchemaChangeType int32
@@ -67,6 +60,7 @@ var SchemaChangeType_name = map[int32]string{
1: "SchemaChangeUpdateHsetIndex",
2: "SchemaChangeDeleteHsetIndex",
}
+
var SchemaChangeType_value = map[string]int32{
"SchemaChangeAddHsetIndex": 0,
"SchemaChangeUpdateHsetIndex": 1,
@@ -76,7 +70,10 @@ var SchemaChangeType_value = map[string]int32{
func (x SchemaChangeType) String() string {
return proto.EnumName(SchemaChangeType_name, int32(x))
}
-func (SchemaChangeType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{1} }
+
+func (SchemaChangeType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_b4c9a9be0cfca103, []int{1}
+}
type RequestHeader struct {
ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
@@ -84,27 +81,83 @@ type RequestHeader struct {
Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
}
-func (m *RequestHeader) Reset() { *m = RequestHeader{} }
-func (m *RequestHeader) String() string { return proto.CompactTextString(m) }
-func (*RequestHeader) ProtoMessage() {}
-func (*RequestHeader) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{0} }
+func (m *RequestHeader) Reset() { *m = RequestHeader{} }
+func (m *RequestHeader) String() string { return proto.CompactTextString(m) }
+func (*RequestHeader) ProtoMessage() {}
+func (*RequestHeader) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b4c9a9be0cfca103, []int{0}
+}
+func (m *RequestHeader) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RequestHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_RequestHeader.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *RequestHeader) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RequestHeader.Merge(m, src)
+}
+func (m *RequestHeader) XXX_Size() int {
+ return m.Size()
+}
+func (m *RequestHeader) XXX_DiscardUnknown() {
+ xxx_messageInfo_RequestHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RequestHeader proto.InternalMessageInfo
type InternalRaftRequest struct {
- Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
- Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+ Header RequestHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header"`
+ Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+}
+
+func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} }
+func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) }
+func (*InternalRaftRequest) ProtoMessage() {}
+func (*InternalRaftRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b4c9a9be0cfca103, []int{1}
+}
+func (m *InternalRaftRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *InternalRaftRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_InternalRaftRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *InternalRaftRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_InternalRaftRequest.Merge(m, src)
+}
+func (m *InternalRaftRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *InternalRaftRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_InternalRaftRequest.DiscardUnknown(m)
}
-func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} }
-func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) }
-func (*InternalRaftRequest) ProtoMessage() {}
-func (*InternalRaftRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{1} }
+var xxx_messageInfo_InternalRaftRequest proto.InternalMessageInfo
type BatchInternalRaftRequest struct {
- ReqNum int32 `protobuf:"varint,1,opt,name=req_num,json=reqNum,proto3" json:"req_num,omitempty"`
- Reqs []*InternalRaftRequest `protobuf:"bytes,2,rep,name=reqs" json:"reqs,omitempty"`
- Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
- Type ReqSourceType `protobuf:"varint,4,opt,name=type,proto3,enum=node.ReqSourceType" json:"type,omitempty"`
- ReqId uint64 `protobuf:"varint,5,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
+ ReqNum int32 `protobuf:"varint,1,opt,name=req_num,json=reqNum,proto3" json:"req_num,omitempty"`
+ Reqs []InternalRaftRequest `protobuf:"bytes,2,rep,name=reqs,proto3" json:"reqs"`
+ Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ Type ReqSourceType `protobuf:"varint,4,opt,name=type,proto3,enum=node.ReqSourceType" json:"type,omitempty"`
+ ReqId uint64 `protobuf:"varint,5,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
// used for cluster log syncer
OrigTerm uint64 `protobuf:"varint,6,opt,name=orig_term,json=origTerm,proto3" json:"orig_term,omitempty"`
OrigIndex uint64 `protobuf:"varint,7,opt,name=orig_index,json=origIndex,proto3" json:"orig_index,omitempty"`
@@ -115,28 +168,122 @@ func (m *BatchInternalRaftRequest) Reset() { *m = BatchInternalRaftReque
func (m *BatchInternalRaftRequest) String() string { return proto.CompactTextString(m) }
func (*BatchInternalRaftRequest) ProtoMessage() {}
func (*BatchInternalRaftRequest) Descriptor() ([]byte, []int) {
- return fileDescriptorRaftInternal, []int{2}
+ return fileDescriptor_b4c9a9be0cfca103, []int{2}
+}
+func (m *BatchInternalRaftRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BatchInternalRaftRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_BatchInternalRaftRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *BatchInternalRaftRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BatchInternalRaftRequest.Merge(m, src)
+}
+func (m *BatchInternalRaftRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *BatchInternalRaftRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_BatchInternalRaftRequest.DiscardUnknown(m)
}
+var xxx_messageInfo_BatchInternalRaftRequest proto.InternalMessageInfo
+
type SchemaChange struct {
Type SchemaChangeType `protobuf:"varint,1,opt,name=Type,proto3,enum=node.SchemaChangeType" json:"Type,omitempty"`
Table string `protobuf:"bytes,2,opt,name=Table,proto3" json:"Table,omitempty"`
SchemaData []byte `protobuf:"bytes,3,opt,name=SchemaData,proto3" json:"SchemaData,omitempty"`
}
-func (m *SchemaChange) Reset() { *m = SchemaChange{} }
-func (m *SchemaChange) String() string { return proto.CompactTextString(m) }
-func (*SchemaChange) ProtoMessage() {}
-func (*SchemaChange) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{3} }
+func (m *SchemaChange) Reset() { *m = SchemaChange{} }
+func (m *SchemaChange) String() string { return proto.CompactTextString(m) }
+func (*SchemaChange) ProtoMessage() {}
+func (*SchemaChange) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b4c9a9be0cfca103, []int{3}
+}
+func (m *SchemaChange) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SchemaChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_SchemaChange.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *SchemaChange) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SchemaChange.Merge(m, src)
+}
+func (m *SchemaChange) XXX_Size() int {
+ return m.Size()
+}
+func (m *SchemaChange) XXX_DiscardUnknown() {
+ xxx_messageInfo_SchemaChange.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SchemaChange proto.InternalMessageInfo
func init() {
+ proto.RegisterEnum("node.ReqSourceType", ReqSourceType_name, ReqSourceType_value)
+ proto.RegisterEnum("node.SchemaChangeType", SchemaChangeType_name, SchemaChangeType_value)
proto.RegisterType((*RequestHeader)(nil), "node.RequestHeader")
proto.RegisterType((*InternalRaftRequest)(nil), "node.InternalRaftRequest")
proto.RegisterType((*BatchInternalRaftRequest)(nil), "node.BatchInternalRaftRequest")
proto.RegisterType((*SchemaChange)(nil), "node.SchemaChange")
- proto.RegisterEnum("node.ReqSourceType", ReqSourceType_name, ReqSourceType_value)
- proto.RegisterEnum("node.SchemaChangeType", SchemaChangeType_name, SchemaChangeType_value)
}
+
+func init() { proto.RegisterFile("raft_internal.proto", fileDescriptor_b4c9a9be0cfca103) }
+
+var fileDescriptor_b4c9a9be0cfca103 = []byte{
+ // 514 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x53, 0xcd, 0x6e, 0xda, 0x40,
+ 0x18, 0xf4, 0x1a, 0xf3, 0xf7, 0x41, 0x11, 0x5d, 0x92, 0xd6, 0x6d, 0x52, 0xc7, 0xe5, 0x52, 0x8b,
+ 0x03, 0x55, 0xe1, 0x09, 0x42, 0x50, 0x15, 0x5f, 0xaa, 0x6a, 0xa1, 0x97, 0xaa, 0x12, 0xda, 0xe0,
+ 0x2f, 0x80, 0x84, 0x7f, 0x58, 0x16, 0xa9, 0xbc, 0x45, 0x5f, 0xa2, 0xef, 0xc2, 0x91, 0x63, 0x4f,
+ 0x55, 0x03, 0x2f, 0x52, 0xed, 0xda, 0x12, 0x6e, 0x14, 0xf5, 0xb6, 0x3b, 0x33, 0xf2, 0xcc, 0x37,
+ 0x9f, 0x17, 0x5a, 0x82, 0xdf, 0xcb, 0xc9, 0x22, 0x92, 0x28, 0x22, 0xbe, 0xec, 0x26, 0x22, 0x96,
+ 0x31, 0xb5, 0xa2, 0x38, 0xc0, 0xd7, 0x67, 0xb3, 0x78, 0x16, 0x6b, 0xe0, 0xbd, 0x3a, 0xa5, 0x5c,
+ 0xfb, 0x2b, 0x3c, 0x63, 0xb8, 0xda, 0xe0, 0x5a, 0xde, 0x22, 0x0f, 0x50, 0xd0, 0x06, 0x98, 0xfe,
+ 0xd0, 0x26, 0x2e, 0xf1, 0x2c, 0x66, 0xfa, 0x43, 0x7a, 0x01, 0xd5, 0x80, 0x4b, 0x3e, 0x91, 0xdb,
+ 0x04, 0x6d, 0xd3, 0x25, 0x5e, 0x91, 0x55, 0x14, 0x30, 0xde, 0x26, 0x48, 0x2f, 0xa1, 0x2a, 0x17,
+ 0x21, 0xae, 0x25, 0x0f, 0x13, 0xbb, 0xe0, 0x12, 0xaf, 0xc0, 0x4e, 0x40, 0xfb, 0x1b, 0xb4, 0xfc,
+ 0x2c, 0x09, 0xe3, 0xf7, 0x32, 0xf3, 0xa1, 0x1f, 0xa0, 0x34, 0xd7, 0x5e, 0xda, 0xa5, 0xd6, 0x6b,
+ 0x75, 0x55, 0xbe, 0xee, 0x3f, 0x31, 0x06, 0xd6, 0xee, 0xf7, 0x95, 0xc1, 0x32, 0x21, 0xa5, 0x60,
+ 0x29, 0x4f, 0xed, 0x5f, 0x67, 0xfa, 0xdc, 0xfe, 0x69, 0x82, 0x3d, 0xe0, 0x72, 0x3a, 0x7f, 0xca,
+ 0xe3, 0x25, 0x94, 0x05, 0xae, 0x26, 0xd1, 0x26, 0xd4, 0x26, 0x45, 0x56, 0x12, 0xb8, 0xfa, 0xb4,
+ 0x09, 0x69, 0x1f, 0x2c, 0x81, 0xab, 0xb5, 0x6d, 0xba, 0x05, 0xaf, 0xd6, 0x7b, 0x95, 0x5a, 0x3f,
+ 0xf1, 0x85, 0x2c, 0x80, 0x16, 0xff, 0x7f, 0x4c, 0xfa, 0x0e, 0x2c, 0x5d, 0x8e, 0xe5, 0x12, 0xaf,
+ 0x91, 0x9b, 0x66, 0x14, 0x6f, 0xc4, 0x14, 0x55, 0x4f, 0x4c, 0x0b, 0xe8, 0x39, 0xa8, 0x14, 0x93,
+ 0x45, 0x60, 0x17, 0x75, 0xbd, 0x45, 0x81, 0x2b, 0x3f, 0x50, 0x0d, 0xc7, 0x62, 0x31, 0x9b, 0x48,
+ 0x14, 0xa1, 0x5d, 0xd2, 0x4c, 0x45, 0x01, 0x63, 0x14, 0x21, 0x7d, 0x03, 0xa0, 0xc9, 0x45, 0x14,
+ 0xe0, 0x77, 0xbb, 0xac, 0x59, 0x2d, 0xf7, 0x15, 0x40, 0xdf, 0x42, 0x5d, 0xd3, 0xd3, 0xe5, 0x66,
+ 0x2d, 0x51, 0xd8, 0x15, 0x97, 0x78, 0x55, 0x56, 0x53, 0xd8, 0x4d, 0x0a, 0xb5, 0x13, 0xa8, 0x8f,
+ 0xa6, 0x73, 0x0c, 0xf9, 0xcd, 0x9c, 0x47, 0x33, 0xa4, 0x1d, 0xb0, 0x54, 0x26, 0xdd, 0x4b, 0xa3,
+ 0xf7, 0x22, 0x8d, 0x9b, 0x57, 0xa4, 0x89, 0xf5, 0x7e, 0xcf, 0xa0, 0x38, 0xe6, 0x77, 0xcb, 0x74,
+ 0xf1, 0x55, 0x96, 0x5e, 0xa8, 0x03, 0x90, 0xea, 0x87, 0x6a, 0x27, 0x05, 0xbd, 0x93, 0x1c, 0xd2,
+ 0xe9, 0xeb, 0x7f, 0xea, 0x34, 0x3e, 0xad, 0x41, 0xf9, 0xa3, 0x88, 0xc3, 0xeb, 0xcf, 0x7e, 0xd3,
+ 0xa0, 0xe7, 0xf0, 0x5c, 0x5d, 0xb2, 0x78, 0xa3, 0x6d, 0x34, 0x45, 0xd1, 0x24, 0x1d, 0x01, 0xcd,
+ 0xc7, 0x21, 0xe8, 0x25, 0xd8, 0x79, 0xec, 0x3a, 0x08, 0x6e, 0xd7, 0x28, 0xf5, 0xe4, 0x4d, 0x83,
+ 0x5e, 0xc1, 0x45, 0x9e, 0xfd, 0x92, 0x04, 0x5c, 0xe2, 0x49, 0x40, 0x1e, 0x0b, 0x86, 0xb8, 0xc4,
+ 0xbc, 0xc0, 0x1c, 0xb8, 0xbb, 0x07, 0xc7, 0xd8, 0x3f, 0x38, 0xc6, 0xee, 0xe0, 0x90, 0xfd, 0xc1,
+ 0x21, 0x7f, 0x0e, 0x0e, 0xf9, 0x71, 0x74, 0x8c, 0xfd, 0xd1, 0x31, 0x7e, 0x1d, 0x1d, 0xe3, 0xae,
+ 0xa4, 0x5f, 0x49, 0xff, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3a, 0x85, 0x6c, 0x1a, 0x58, 0x03,
+ 0x00, 0x00,
+}
+
func (m *RequestHeader) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -185,16 +332,14 @@ func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
- if m.Header != nil {
- dAtA[i] = 0xa
- i++
- i = encodeVarintRaftInternal(dAtA, i, uint64(m.Header.Size()))
- n1, err := m.Header.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n1
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.Header.Size()))
+ n1, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
}
+ i += n1
if len(m.Data) > 0 {
dAtA[i] = 0x12
i++
@@ -315,6 +460,9 @@ func encodeVarintRaftInternal(dAtA []byte, offset int, v uint64) int {
return offset + 1
}
func (m *RequestHeader) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
if m.ID != 0 {
@@ -330,12 +478,13 @@ func (m *RequestHeader) Size() (n int) {
}
func (m *InternalRaftRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRaftInternal(uint64(l))
- }
+ l = m.Header.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
l = len(m.Data)
if l > 0 {
n += 1 + l + sovRaftInternal(uint64(l))
@@ -344,6 +493,9 @@ func (m *InternalRaftRequest) Size() (n int) {
}
func (m *BatchInternalRaftRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
if m.ReqNum != 0 {
@@ -378,6 +530,9 @@ func (m *BatchInternalRaftRequest) Size() (n int) {
}
func (m *SchemaChange) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
if m.Type != 0 {
@@ -422,7 +577,7 @@ func (m *RequestHeader) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -450,7 +605,7 @@ func (m *RequestHeader) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.ID |= (uint64(b) & 0x7F) << shift
+ m.ID |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -469,7 +624,7 @@ func (m *RequestHeader) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.DataType |= (int32(b) & 0x7F) << shift
+ m.DataType |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -488,7 +643,7 @@ func (m *RequestHeader) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Timestamp |= (int64(b) & 0x7F) << shift
+ m.Timestamp |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -502,6 +657,9 @@ func (m *RequestHeader) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthRaftInternal
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -529,7 +687,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -557,7 +715,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -566,12 +724,12 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRaftInternal
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.Header == nil {
- m.Header = &RequestHeader{}
- }
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
@@ -590,7 +748,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= (int(b) & 0x7F) << shift
+ byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -599,6 +757,9 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRaftInternal
}
postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -616,6 +777,9 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthRaftInternal
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -643,7 +807,7 @@ func (m *BatchInternalRaftRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -671,7 +835,7 @@ func (m *BatchInternalRaftRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.ReqNum |= (int32(b) & 0x7F) << shift
+ m.ReqNum |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -690,7 +854,7 @@ func (m *BatchInternalRaftRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -699,10 +863,13 @@ func (m *BatchInternalRaftRequest) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRaftInternal
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Reqs = append(m.Reqs, &InternalRaftRequest{})
+ m.Reqs = append(m.Reqs, InternalRaftRequest{})
if err := m.Reqs[len(m.Reqs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
@@ -721,7 +888,7 @@ func (m *BatchInternalRaftRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Timestamp |= (int64(b) & 0x7F) << shift
+ m.Timestamp |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -740,7 +907,7 @@ func (m *BatchInternalRaftRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Type |= (ReqSourceType(b) & 0x7F) << shift
+ m.Type |= ReqSourceType(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -759,7 +926,7 @@ func (m *BatchInternalRaftRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.ReqId |= (uint64(b) & 0x7F) << shift
+ m.ReqId |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -778,7 +945,7 @@ func (m *BatchInternalRaftRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.OrigTerm |= (uint64(b) & 0x7F) << shift
+ m.OrigTerm |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -797,7 +964,7 @@ func (m *BatchInternalRaftRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.OrigIndex |= (uint64(b) & 0x7F) << shift
+ m.OrigIndex |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -816,7 +983,7 @@ func (m *BatchInternalRaftRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -826,6 +993,9 @@ func (m *BatchInternalRaftRequest) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRaftInternal
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -840,6 +1010,9 @@ func (m *BatchInternalRaftRequest) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthRaftInternal
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -867,7 +1040,7 @@ func (m *SchemaChange) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -895,7 +1068,7 @@ func (m *SchemaChange) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Type |= (SchemaChangeType(b) & 0x7F) << shift
+ m.Type |= SchemaChangeType(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -914,7 +1087,7 @@ func (m *SchemaChange) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -924,6 +1097,9 @@ func (m *SchemaChange) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRaftInternal
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -943,7 +1119,7 @@ func (m *SchemaChange) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= (int(b) & 0x7F) << shift
+ byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -952,6 +1128,9 @@ func (m *SchemaChange) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRaftInternal
}
postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -969,6 +1148,9 @@ func (m *SchemaChange) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthRaftInternal
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -1035,10 +1217,13 @@ func skipRaftInternal(dAtA []byte) (n int, err error) {
break
}
}
- iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthRaftInternal
}
+ iNdEx += length
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthRaftInternal
+ }
return iNdEx, nil
case 3:
for {
@@ -1067,6 +1252,9 @@ func skipRaftInternal(dAtA []byte) (n int, err error) {
return 0, err
}
iNdEx = start + next
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthRaftInternal
+ }
}
return iNdEx, nil
case 4:
@@ -1085,41 +1273,3 @@ var (
ErrInvalidLengthRaftInternal = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowRaftInternal = fmt.Errorf("proto: integer overflow")
)
-
-func init() { proto.RegisterFile("raft_internal.proto", fileDescriptorRaftInternal) }
-
-var fileDescriptorRaftInternal = []byte{
- // 500 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x53, 0xcd, 0x6e, 0xda, 0x4c,
- 0x14, 0x65, 0x8c, 0x21, 0x70, 0xe1, 0x43, 0x7c, 0x43, 0xd2, 0x4e, 0x9b, 0xd4, 0x75, 0xd9, 0xd4,
- 0xa2, 0x2a, 0x95, 0xc8, 0x13, 0x24, 0x41, 0x55, 0xbc, 0xa9, 0xaa, 0x81, 0x76, 0xd1, 0x0d, 0x9a,
- 0xe0, 0x1b, 0x40, 0xc2, 0x3f, 0x0c, 0x83, 0x54, 0xde, 0xa4, 0xbb, 0xbe, 0x4e, 0x96, 0x79, 0x84,
- 0x86, 0xbe, 0x48, 0x35, 0xd7, 0x48, 0xb8, 0x51, 0xd4, 0xdd, 0xcc, 0x39, 0x47, 0x73, 0xce, 0x3d,
- 0xd7, 0x86, 0x8e, 0x56, 0xb7, 0x66, 0xb2, 0x48, 0x0c, 0xea, 0x44, 0x2d, 0xfb, 0x99, 0x4e, 0x4d,
- 0xca, 0xdd, 0x24, 0x8d, 0xf0, 0xe5, 0xf1, 0x2c, 0x9d, 0xa5, 0x04, 0x7c, 0xb0, 0xa7, 0x9c, 0xeb,
- 0x7e, 0x83, 0xff, 0x24, 0xae, 0x36, 0xb8, 0x36, 0xd7, 0xa8, 0x22, 0xd4, 0xbc, 0x05, 0x4e, 0x38,
- 0x14, 0xcc, 0x67, 0x81, 0x2b, 0x9d, 0x70, 0xc8, 0x4f, 0xa1, 0x1e, 0x29, 0xa3, 0x26, 0x66, 0x9b,
- 0xa1, 0x70, 0x7c, 0x16, 0x54, 0x64, 0xcd, 0x02, 0xe3, 0x6d, 0x86, 0xfc, 0x0c, 0xea, 0x66, 0x11,
- 0xe3, 0xda, 0xa8, 0x38, 0x13, 0x65, 0x9f, 0x05, 0x65, 0x79, 0x00, 0xba, 0x5f, 0xa1, 0x13, 0xee,
- 0x93, 0x48, 0x75, 0x6b, 0xf6, 0x3e, 0xfc, 0x1d, 0x54, 0xe7, 0xe4, 0x45, 0x2e, 0x8d, 0x41, 0xa7,
- 0x6f, 0xf3, 0xf5, 0xff, 0x8a, 0x21, 0xf7, 0x12, 0xce, 0xc1, 0xb5, 0x6e, 0xe4, 0xdc, 0x94, 0x74,
- 0xee, 0xfe, 0x74, 0x40, 0x5c, 0x2a, 0x33, 0x9d, 0x3f, 0xf5, 0xfa, 0x73, 0x38, 0xd2, 0xb8, 0x9a,
- 0x24, 0x9b, 0x98, 0x9e, 0xaf, 0xc8, 0xaa, 0xc6, 0xd5, 0xa7, 0x4d, 0xcc, 0xdf, 0x83, 0xab, 0x71,
- 0xb5, 0x16, 0x8e, 0x5f, 0x0e, 0x1a, 0x83, 0x17, 0xb9, 0xe9, 0x13, 0x2f, 0x48, 0x92, 0xfd, 0x7b,
- 0x34, 0xfe, 0x16, 0x5c, 0x2a, 0xc4, 0xf5, 0x59, 0xd0, 0x2a, 0x4c, 0x30, 0x4a, 0x37, 0x7a, 0x8a,
- 0xb6, 0x1b, 0x49, 0x02, 0x7e, 0x02, 0xd6, 0x7f, 0xb2, 0x88, 0x44, 0x85, 0x2a, 0xad, 0x68, 0x5c,
- 0x85, 0x91, 0x6d, 0x35, 0xd5, 0x8b, 0xd9, 0xc4, 0xa0, 0x8e, 0x45, 0x95, 0x98, 0x9a, 0x05, 0xc6,
- 0xa8, 0x63, 0xfe, 0x0a, 0x80, 0xc8, 0x45, 0x12, 0xe1, 0x77, 0x71, 0x44, 0x2c, 0xc9, 0x43, 0x0b,
- 0xf0, 0x37, 0xd0, 0x24, 0x7a, 0xba, 0xdc, 0xac, 0x0d, 0x6a, 0x51, 0xf3, 0x59, 0x50, 0x97, 0x0d,
- 0x8b, 0x5d, 0xe5, 0x50, 0x37, 0x83, 0xe6, 0x68, 0x3a, 0xc7, 0x58, 0x5d, 0xcd, 0x55, 0x32, 0x43,
- 0xde, 0x03, 0xd7, 0x66, 0xa2, 0x46, 0x5a, 0x83, 0x67, 0x79, 0xdc, 0xa2, 0x22, 0x4f, 0x4c, 0x3b,
- 0x3d, 0x86, 0xca, 0x58, 0xdd, 0x2c, 0xf3, 0x65, 0xd7, 0x65, 0x7e, 0xe1, 0x1e, 0x40, 0xae, 0x1f,
- 0xda, 0x6d, 0x94, 0x69, 0x1b, 0x05, 0xa4, 0x77, 0x4e, 0xdf, 0xd1, 0x61, 0x7c, 0xde, 0x80, 0xa3,
- 0x8f, 0x3a, 0x8d, 0x2f, 0x3e, 0x87, 0xed, 0x12, 0x3f, 0x81, 0xff, 0xed, 0x65, 0x1f, 0x6f, 0xb4,
- 0x4d, 0xa6, 0xa8, 0xdb, 0xac, 0xa7, 0xa1, 0xfd, 0x38, 0x04, 0x3f, 0x03, 0x51, 0xc4, 0x2e, 0xa2,
- 0xe8, 0x7a, 0x8d, 0x86, 0x26, 0x6f, 0x97, 0xf8, 0x6b, 0x38, 0x2d, 0xb2, 0x5f, 0xb2, 0x48, 0x19,
- 0x3c, 0x08, 0xd8, 0x63, 0xc1, 0x10, 0x97, 0x58, 0x14, 0x38, 0x97, 0xe2, 0xee, 0xc1, 0x2b, 0xdd,
- 0x3f, 0x78, 0xa5, 0xbb, 0x9d, 0xc7, 0xee, 0x77, 0x1e, 0xfb, 0xb5, 0xf3, 0xd8, 0x8f, 0xdf, 0x5e,
- 0xe9, 0xa6, 0x4a, 0x7f, 0xc4, 0xf9, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0xf8, 0x37, 0x42,
- 0x44, 0x03, 0x00, 0x00,
-}
diff --git a/node/raft_internal.proto b/node/raft_internal.proto
index 444b895b..81fb4b29 100644
--- a/node/raft_internal.proto
+++ b/node/raft_internal.proto
@@ -21,13 +21,13 @@ message RequestHeader {
}
message InternalRaftRequest {
- RequestHeader header = 1;
+ RequestHeader header = 1 [(gogoproto.nullable) = false];
bytes data = 2;
}
message BatchInternalRaftRequest {
int32 req_num = 1 ;
- repeated InternalRaftRequest reqs = 2;
+ repeated InternalRaftRequest reqs = 2 [(gogoproto.nullable) = false];
int64 timestamp = 3;
ReqSourceType type = 4;
uint64 req_id = 5;
diff --git a/node/raft_storage.go b/node/raft_storage.go
index 47e549bb..eaeb6dfc 100644
--- a/node/raft_storage.go
+++ b/node/raft_storage.go
@@ -1,10 +1,12 @@
package node
import (
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
- "github.com/absolute8511/ZanRedisDB/snap"
- "github.com/absolute8511/ZanRedisDB/wal"
- "github.com/absolute8511/ZanRedisDB/wal/walpb"
+ "errors"
+
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/snap"
+ "github.com/youzan/ZanRedisDB/wal"
+ "github.com/youzan/ZanRedisDB/wal/walpb"
)
type raftPersistStorage struct {
@@ -19,17 +21,29 @@ func NewRaftPersistStorage(w *wal.WAL, s *snap.Snapshotter) IRaftPersistStorage
// SaveSnap saves the snapshot to disk and release the locked
// wal files since they will not be used.
func (st *raftPersistStorage) SaveSnap(snap raftpb.Snapshot) error {
+ if enableSnapSaveTest {
+ return errors.New("failed to save snapshot to raft in failed test")
+ }
walsnap := walpb.Snapshot{
Index: snap.Metadata.Index,
Term: snap.Metadata.Term,
}
- err := st.WAL.SaveSnapshot(walsnap)
+ // save the snapshot file before writing the snapshot to the wal.
+ // This makes it possible for the snapshot file to become orphaned, but prevents
+ // a WAL snapshot entry from having no corresponding snapshot file.
+ err := st.Snapshotter.SaveSnap(snap)
if err != nil {
return err
}
- err = st.Snapshotter.SaveSnap(snap)
- if err != nil {
+ return st.WAL.SaveSnapshot(walsnap)
+}
+
+// Release releases resources older than the given snap and are no longer needed:
+// - releases the locks to the wal files that are older than the provided wal for the given snap.
+// - deletes any .snap.db files that are older than the given snap.
+func (st *raftPersistStorage) Release(snap raftpb.Snapshot) error {
+ if err := st.WAL.ReleaseLockTo(snap.Metadata.Index); err != nil {
return err
}
- return st.WAL.ReleaseLockTo(snap.Metadata.Index)
+ return st.Snapshotter.ReleaseSnapDBs(snap)
}
diff --git a/node/raft_test.go b/node/raft_test.go
index 355b4c81..64639738 100644
--- a/node/raft_test.go
+++ b/node/raft_test.go
@@ -21,12 +21,13 @@ import (
"testing"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/pkg/pbutil"
- "github.com/absolute8511/ZanRedisDB/pkg/testutil"
- "github.com/absolute8511/ZanRedisDB/raft"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
- "github.com/absolute8511/ZanRedisDB/transport/rafthttp"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/pkg/pbutil"
+ "github.com/youzan/ZanRedisDB/pkg/testutil"
+ "github.com/youzan/ZanRedisDB/raft"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/transport/rafthttp"
+ "github.com/youzan/ZanRedisDB/wal/walpb"
"golang.org/x/net/context"
)
@@ -36,7 +37,10 @@ func newNodeRecorder() *nodeRecorder { return &nodeRecorder{&testutil.Reco
func newNodeRecorderStream() *nodeRecorder { return &nodeRecorder{testutil.NewRecorderStream()} }
func newNodeNop() raft.Node { return newNodeRecorder() }
-func (n *nodeRecorder) Tick() { n.Record(testutil.Action{Name: "Tick"}) }
+func (n *nodeRecorder) Tick() bool {
+ n.Record(testutil.Action{Name: "Tick"})
+ return true
+}
func (n *nodeRecorder) Campaign(ctx context.Context) error {
n.Record(testutil.Action{Name: "Campaign"})
return nil
@@ -51,6 +55,11 @@ func (n *nodeRecorder) ProposeWithDrop(ctx context.Context, data []byte, cancel
return nil
}
+func (n *nodeRecorder) ProposeEntryWithDrop(ctx context.Context, e raftpb.Entry, cancel context.CancelFunc) error {
+ n.Record(testutil.Action{Name: "Propose", Params: []interface{}{e.Data}})
+ return nil
+}
+
func (n *nodeRecorder) ProposeConfChange(ctx context.Context, conf raftpb.ConfChange) error {
n.Record(testutil.Action{Name: "ProposeConfChange"})
return nil
@@ -59,11 +68,16 @@ func (n *nodeRecorder) Step(ctx context.Context, msg raftpb.Message) error {
n.Record(testutil.Action{Name: "Step"})
return nil
}
+func (n *nodeRecorder) ConfChangedCh() <-chan raftpb.ConfChange { return nil }
+func (n *nodeRecorder) HandleConfChanged(cc raftpb.ConfChange) { return }
+func (n *nodeRecorder) EventNotifyCh() chan bool { return nil }
+func (n *nodeRecorder) NotifyEventCh() { return }
+func (n *nodeRecorder) StepNode(bool, bool) (raft.Ready, bool) { return raft.Ready{}, true }
func (n *nodeRecorder) Status() raft.Status { return raft.Status{} }
func (n *nodeRecorder) Ready() <-chan raft.Ready { return nil }
func (n *nodeRecorder) TransferLeadership(ctx context.Context, lead, transferee uint64) {}
func (n *nodeRecorder) ReadIndex(ctx context.Context, rctx []byte) error { return nil }
-func (n *nodeRecorder) Advance() {}
+func (n *nodeRecorder) Advance(rd raft.Ready) {}
func (n *nodeRecorder) ApplyConfChange(conf raftpb.ConfChange) *raftpb.ConfState {
n.Record(testutil.Action{Name: "ApplyConfChange", Params: []interface{}{conf}})
return &raftpb.ConfState{}
@@ -72,6 +86,7 @@ func (n *nodeRecorder) ApplyConfChange(conf raftpb.ConfChange) *raftpb.ConfState
func (n *nodeRecorder) Stop() {
n.Record(testutil.Action{Name: "Stop"})
}
+func (n *nodeRecorder) DebugString() string { return "" }
func (n *nodeRecorder) ReportUnreachable(id uint64, g raftpb.Group) {}
@@ -85,18 +100,40 @@ func (n *nodeRecorder) Compact(index uint64, nodes []uint64, d []byte) {
type readyNode struct {
nodeRecorder
readyc chan raft.Ready
+ c chan bool
}
func newReadyNode() *readyNode {
return &readyNode{
- nodeRecorder{testutil.NewRecorderStream()},
- make(chan raft.Ready, 1)}
+ nodeRecorder: nodeRecorder{testutil.NewRecorderStream()},
+ c: make(chan bool, 1),
+ readyc: make(chan raft.Ready, 1)}
}
+
func newNopReadyNode() *readyNode {
- return &readyNode{*newNodeRecorder(), make(chan raft.Ready, 1)}
+ return &readyNode{*newNodeRecorder(), make(chan raft.Ready, 1), make(chan bool, 1)}
+}
+
+func (n *readyNode) EventNotifyCh() chan bool { return n.c }
+
+func (n *readyNode) pushReady(rd raft.Ready) {
+ select {
+ case n.readyc <- rd:
+ }
+ select {
+ case n.c <- true:
+ default:
+ }
}
-func (n *readyNode) Ready() <-chan raft.Ready { return n.readyc }
+func (n *readyNode) StepNode(bool, bool) (raft.Ready, bool) {
+ select {
+ case rd := <-n.readyc:
+ return rd, true
+ default:
+ }
+ return raft.Ready{}, false
+}
type storageRecorder struct {
testutil.Recorder
@@ -116,9 +153,14 @@ func (p *storageRecorder) Save(st raftpb.HardState, ents []raftpb.Entry) error {
return nil
}
-func (p *storageRecorder) Load() (*raftpb.Snapshot, string, error) {
+func (p *storageRecorder) Load() (*raftpb.Snapshot, error) {
p.Record(testutil.Action{Name: "Load"})
- return nil, "", nil
+ return nil, nil
+}
+
+func (p *storageRecorder) LoadNewestAvailable(walSnaps []walpb.Snapshot) (*raftpb.Snapshot, error) {
+ p.Record(testutil.Action{Name: "LoadNewestAvailable"})
+ return nil, nil
}
func (p *storageRecorder) SaveSnap(st raftpb.Snapshot) error {
@@ -128,6 +170,18 @@ func (p *storageRecorder) SaveSnap(st raftpb.Snapshot) error {
return nil
}
+func (p *storageRecorder) Release(st raftpb.Snapshot) error {
+ if !raft.IsEmptySnap(st) {
+ p.Record(testutil.Action{Name: "Release"})
+ }
+ return nil
+}
+
+func (p *storageRecorder) Sync() error {
+ p.Record(testutil.Action{Name: "Sync"})
+ return nil
+}
+
func (p *storageRecorder) DBFilePath(id uint64) (string, error) {
p.Record(testutil.Action{Name: "DBFilePath"})
path := p.dbPath
@@ -357,7 +411,14 @@ func TestStopRaftWhenWaitingForApplyDone(t *testing.T) {
r.serveChannels()
close(done)
}()
- n.readyc <- raft.Ready{}
+ n.pushReady(raft.Ready{
+ Snapshot: raftpb.Snapshot{
+ Metadata: raftpb.SnapshotMetadata{
+ Term: 1,
+ Index: 1,
+ },
+ },
+ })
select {
case <-commitC:
case <-time.After(time.Second):
@@ -398,10 +459,10 @@ func TestConfgChangeBlocksApply(t *testing.T) {
}()
defer close(r.stopc)
- n.readyc <- raft.Ready{
+ n.pushReady(raft.Ready{
SoftState: &raft.SoftState{RaftState: raft.StateFollower},
CommittedEntries: []raftpb.Entry{{Type: raftpb.EntryConfChange}},
- }
+ })
blockingEnt := <-commitC
if blockingEnt.applyWaitDone == nil {
t.Fatalf("unexpected nil chan, should init wait channel for waiting apply conf change event")
@@ -409,7 +470,14 @@ func TestConfgChangeBlocksApply(t *testing.T) {
continueC := make(chan struct{})
go func() {
- n.readyc <- raft.Ready{}
+ n.pushReady(raft.Ready{
+ Snapshot: raftpb.Snapshot{
+ Metadata: raftpb.SnapshotMetadata{
+ Term: 1,
+ Index: 2,
+ },
+ },
+ })
<-commitC
close(continueC)
}()
@@ -429,3 +497,107 @@ func TestConfgChangeBlocksApply(t *testing.T) {
t.Fatalf("unexpected blocking on execution")
}
}
+
+type fakeDataStorage struct {
+}
+
+func (*fakeDataStorage) CleanData() error { return nil }
+func (*fakeDataStorage) RestoreFromSnapshot(raftpb.Snapshot) error { return nil }
+func (*fakeDataStorage) PrepareSnapshot(raftpb.Snapshot) error { return nil }
+func (*fakeDataStorage) GetSnapshot(term uint64, index uint64) (Snapshot, error) { return nil, nil }
+func (*fakeDataStorage) UpdateSnapshotState(term uint64, index uint64) {}
+func (*fakeDataStorage) Stop() {}
+func TestSnapshotApplyingShouldBlock(t *testing.T) {
+ // TODO: apply slow snapshot should not become leader for compaign
+ n := newNopReadyNode()
+ config := &RaftConfig{
+ GroupID: 1,
+ GroupName: "testgroup",
+ ID: 1,
+ RaftAddr: "127.0.0.1:1239",
+ }
+ commitC := make(chan applyInfo, 10)
+ r := &raftNode{
+ ds: &fakeDataStorage{},
+ config: config,
+ commitC: commitC,
+ node: n,
+ persistStorage: NewStorageRecorder(""),
+ raftStorage: raft.NewMemoryStorage(),
+ transport: rafthttp.NewNopTransporter(),
+ stopc: make(chan struct{}),
+ }
+
+ go func() {
+ r.serveChannels()
+ }()
+ defer close(r.stopc)
+
+ n.pushReady(raft.Ready{
+ SoftState: &raft.SoftState{RaftState: raft.StateFollower},
+ Snapshot: raftpb.Snapshot{
+ Metadata: raftpb.SnapshotMetadata{
+ Term: 1,
+ Index: 2,
+ },
+ },
+ })
+ blockingEnt := <-commitC
+ if blockingEnt.applyWaitDone == nil {
+ t.Fatalf("unexpected nil chan, should init wait channel for waiting apply snapshot event")
+ }
+ if blockingEnt.applySnapshotResult == nil {
+ t.Fatalf("unexpected nil chan, should init wait channel for waiting apply snapshot event")
+ }
+ continueC := make(chan struct{})
+ go func() {
+ n.pushReady(raft.Ready{
+ Snapshot: raftpb.Snapshot{
+ Metadata: raftpb.SnapshotMetadata{
+ Term: 1,
+ Index: 3,
+ },
+ },
+ })
+ select {
+ case blockingEnt2 := <-commitC:
+ if blockingEnt2.applyWaitDone == nil {
+ t.Fatalf("unexpected nil chan, should init wait channel for waiting apply conf change event")
+ }
+ // finish apply, unblock raft routine
+ blockingEnt2.applySnapshotResult <- nil
+ <-blockingEnt2.raftDone
+ close(blockingEnt2.applyWaitDone)
+ case <-time.After(time.Second * 3):
+ t.Fatalf("unexpected blocking on execution")
+ }
+
+ close(continueC)
+ }()
+
+ select {
+ case <-continueC:
+ t.Fatalf("unexpected execution: raft routine should block waiting for apply")
+ case <-time.After(time.Second):
+ }
+
+ // finish apply, unblock raft routine
+ blockingEnt.applySnapshotResult <- nil
+ <-blockingEnt.raftDone
+ <-blockingEnt.raftDone
+ close(blockingEnt.applyWaitDone)
+
+ select {
+ case <-continueC:
+ case <-time.After(time.Second * 4):
+ t.Fatalf("unexpected blocking on execution")
+ }
+}
+
+func TestSnapshotPreTransferRetryOnFail(t *testing.T) {
+ // TODO: a failed snapshot receive should retry after report failed
+}
+
+func TestSlowApplyingShouldPauseRaftStep(t *testing.T) {
+ // TODO: slow in state machine should pause raft step to avoid too much memory for raft logs
+}
diff --git a/node/remote_sync_mgr.go b/node/remote_sync_mgr.go
index 2fe794b4..5fea12f4 100644
--- a/node/remote_sync_mgr.go
+++ b/node/remote_sync_mgr.go
@@ -6,9 +6,11 @@ import (
"sync"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/metric"
)
+const syncStateTimeout = time.Minute * 5
+
type SyncedState struct {
SyncedTerm uint64 `json:"synced_term,omitempty"`
SyncedIndex uint64 `json:"synced_index,omitempty"`
@@ -86,6 +88,7 @@ func (rss *remoteSyncedStateMgr) RemoveApplyingSnap(name string, state SyncedSta
func (rss *remoteSyncedStateMgr) AddApplyingSnap(name string, state SyncedState) (*SnapApplyStatus, bool) {
added := false
rss.Lock()
+ defer rss.Unlock()
sas, ok := rss.remoteSnapshotsApplying[name]
canAdd := false
if !ok {
@@ -96,7 +99,14 @@ func (rss *remoteSyncedStateMgr) AddApplyingSnap(name string, state SyncedState)
} else if sas.StatusCode == ApplySnapBegin || sas.StatusCode == ApplySnapFailed || sas.StatusCode == ApplySnapApplying {
// begin -> transferring, may lost if proposal dropped,
// so we check the time and restart
- if time.Since(sas.UpdatedTime) > proposeTimeout*10 {
+ if time.Since(sas.UpdatedTime) > syncStateTimeout {
+ delete(rss.remoteSnapshotsApplying, name)
+ canAdd = true
+ }
+ } else if !sas.SS.IsNewer(&state) {
+ if time.Since(sas.UpdatedTime) > syncStateTimeout {
+ nodeLog.Infof("%v got newer snapshot %v, old is %v", name,
+ state, sas)
delete(rss.remoteSnapshotsApplying, name)
canAdd = true
}
@@ -105,13 +115,12 @@ func (rss *remoteSyncedStateMgr) AddApplyingSnap(name string, state SyncedState)
sas = &SnapApplyStatus{
SS: state,
StatusCode: ApplySnapBegin,
- Status: applyStatusMsgs[1],
+ Status: applyStatusMsgs[ApplySnapBegin],
UpdatedTime: time.Now(),
}
rss.remoteSnapshotsApplying[name] = sas
added = true
}
- rss.Unlock()
return sas, added
}
@@ -140,20 +149,23 @@ func (rss *remoteSyncedStateMgr) UpdateState(name string, state SyncedState) {
rss.remoteSyncedStates[name] = state
rss.Unlock()
}
+
func (rss *remoteSyncedStateMgr) GetState(name string) (SyncedState, bool) {
rss.RLock()
state, ok := rss.remoteSyncedStates[name]
rss.RUnlock()
return state, ok
}
+
func (rss *remoteSyncedStateMgr) RestoreStates(ss map[string]SyncedState) {
rss.Lock()
rss.remoteSyncedStates = make(map[string]SyncedState, len(ss))
- for k, v := range rss.remoteSyncedStates {
+ for k, v := range ss {
rss.remoteSyncedStates[k] = v
}
rss.Unlock()
}
+
func (rss *remoteSyncedStateMgr) Clone() map[string]SyncedState {
rss.RLock()
clone := make(map[string]SyncedState, len(rss.remoteSyncedStates))
@@ -164,6 +176,19 @@ func (rss *remoteSyncedStateMgr) Clone() map[string]SyncedState {
return clone
}
+func (nd *KVNode) isContinueCommit(reqList BatchInternalRaftRequest) bool {
+ oldState, ok := nd.remoteSyncedStates.GetState(reqList.OrigCluster)
+ if ok {
+ if reqList.OrigIndex > oldState.SyncedIndex+1 {
+ nd.rn.Infof("request %v is not continue while sync : %v",
+ reqList.OrigIndex, oldState)
+ return false
+ }
+ }
+ // not found, we consider first init
+ return true
+}
+
func (nd *KVNode) isAlreadyApplied(reqList BatchInternalRaftRequest) bool {
oldState, ok := nd.remoteSyncedStates.GetState(reqList.OrigCluster)
if ok {
@@ -207,8 +232,11 @@ func (nd *KVNode) preprocessRemoteSnapApply(reqList BatchInternalRaftRequest) (b
return false, false
}
-func (nd *KVNode) postprocessRemoteSnapApply(reqList BatchInternalRaftRequest,
+func (nd *KVNode) postprocessRemoteApply(reqList BatchInternalRaftRequest,
isRemoteSnapTransfer bool, isRemoteSnapApply bool, retErr error) {
+ if reqList.OrigTerm == 0 && reqList.OrigIndex == 0 {
+ return
+ }
ss := SyncedState{SyncedTerm: reqList.OrigTerm, SyncedIndex: reqList.OrigIndex, Timestamp: reqList.Timestamp}
// for remote snapshot transfer, we need wait apply success before update sync state
if !isRemoteSnapTransfer {
@@ -239,7 +267,7 @@ func (nd *KVNode) GetRemoteClusterSyncedRaft(name string) (uint64, uint64, int64
return state.SyncedTerm, state.SyncedIndex, state.Timestamp
}
-func (nd *KVNode) GetLogSyncStatsInSyncLearner() (*common.LogSyncStats, *common.LogSyncStats) {
+func (nd *KVNode) GetLogSyncStatsInSyncLearner() (*metric.LogSyncStats, *metric.LogSyncStats) {
logSyncer, ok := nd.sm.(*logSyncerSM)
if !ok {
return nil, nil
@@ -271,7 +299,12 @@ func (nd *KVNode) ApplyRemoteSnapshot(skip bool, name string, term uint64, index
}
if oldS.StatusCode != ApplySnapTransferred {
nd.rn.Infof("remote cluster %v snapshot not ready for apply: %v", name, oldS)
- return errors.New("apply remote snapshot status invalid")
+ // it may be changed to applying but proposal failed, so we need proposal again
+ if oldS.StatusCode == ApplySnapApplying && time.Since(oldS.UpdatedTime) > syncStateTimeout {
+ nd.rn.Infof("remote cluster %v snapshot waiting applying too long: %v", name, oldS)
+ } else {
+ return errors.New("apply remote snapshot status invalid")
+ }
}
// set the snap status to applying and the snap status will be updated if apply done or failed
nd.remoteSyncedStates.UpdateApplyingSnapStatus(name, oldS.SS, ApplySnapApplying)
@@ -290,7 +323,7 @@ func (nd *KVNode) ApplyRemoteSnapshot(skip bool, name string, term uint64, index
p.ProposeOp = ProposeOp_ApplySkippedRemoteSnap
}
d, _ := json.Marshal(p)
- h := &RequestHeader{
+ h := RequestHeader{
ID: 0,
DataType: int32(CustomReq),
}
@@ -298,9 +331,8 @@ func (nd *KVNode) ApplyRemoteSnapshot(skip bool, name string, term uint64, index
Header: h,
Data: d,
}
- reqList.Reqs = append(reqList.Reqs, &raftReq)
- buf, _ := reqList.Marshal()
- err := nd.ProposeRawAndWait(buf, term, index, reqList.Timestamp)
+ reqList.Reqs = append(reqList.Reqs, raftReq)
+ err := nd.ProposeRawAndWaitFromSyncer(&reqList, term, index, reqList.Timestamp)
if err != nil {
nd.rn.Infof("cluster %v applying snap %v-%v failed", name, term, index)
// just wait next retry
@@ -311,13 +343,21 @@ func (nd *KVNode) ApplyRemoteSnapshot(skip bool, name string, term uint64, index
}
func (nd *KVNode) BeginTransferRemoteSnap(name string, term uint64, index uint64, syncAddr string, syncPath string) error {
+ // we should disallow transfer remote snap while we are running as master cluster
+ if !IsSyncerOnly() {
+ nd.rn.Infof("cluster %v snapshot is not allowed: %v-%v", name, term, index)
+ return errors.New("remote snapshot is not allowed while not in syncer only mode")
+ }
+
ss := SyncedState{SyncedTerm: term, SyncedIndex: index}
// set the snap status to begin and the snap status will be updated if transfer begin
// if transfer failed to propose, after some timeout it will be removed while adding
old, added := nd.remoteSyncedStates.AddApplyingSnap(name, ss)
if !added {
nd.rn.Infof("cluster %v applying snap %v already running while apply %v", name, old, ss)
- return nil
+ if !old.SS.IsSame(&ss) {
+ return errors.New("another snapshot applying")
+ }
}
p := &customProposeData{
@@ -334,7 +374,7 @@ func (nd *KVNode) BeginTransferRemoteSnap(name string, term uint64, index uint64
reqList.ReqNum = 1
reqList.Timestamp = time.Now().UnixNano()
- h := &RequestHeader{
+ h := RequestHeader{
ID: 0,
DataType: int32(CustomReq),
}
@@ -342,15 +382,14 @@ func (nd *KVNode) BeginTransferRemoteSnap(name string, term uint64, index uint64
Header: h,
Data: d,
}
- reqList.Reqs = append(reqList.Reqs, &raftReq)
- buf, _ := reqList.Marshal()
- err := nd.ProposeRawAndWait(buf, term, index, reqList.Timestamp)
+ reqList.Reqs = append(reqList.Reqs, raftReq)
+ err := nd.ProposeRawAndWaitFromSyncer(&reqList, term, index, reqList.Timestamp)
if err != nil {
nd.rn.Infof("cluster %v applying transfer snap %v failed", name, ss)
} else {
nd.rn.Infof("cluster %v applying transfer snap %v done", name, ss)
}
- return nil
+ return err
}
func (nd *KVNode) GetApplyRemoteSnapStatus(name string) (*SnapApplyStatus, bool) {
diff --git a/node/scan.go b/node/scan.go
index 30fb30cf..342d55e1 100644
--- a/node/scan.go
+++ b/node/scan.go
@@ -6,8 +6,8 @@ import (
"strconv"
"strings"
- "github.com/absolute8511/ZanRedisDB/common"
"github.com/absolute8511/redcon"
+ "github.com/youzan/ZanRedisDB/common"
)
func parseScanArgs(args [][]byte) (cursor []byte, match string, count int, err error) {
@@ -54,6 +54,11 @@ func parseScanArgs(args [][]byte) (cursor []byte, match string, count int, err e
// TODO: for scan we act like the prefix scan, if the prefix changed , we should stop scan
func (nd *KVNode) scanCommand(cmd redcon.Command) (interface{}, error) {
+ scanName := strings.ToLower(string(cmd.Args[0]))
+ reverse := false
+ if scanName == "revscan" {
+ reverse = true
+ }
args := cmd.Args[1:]
cursor, match, count, err := parseScanArgs(args)
@@ -66,7 +71,7 @@ func (nd *KVNode) scanCommand(cmd redcon.Command) (interface{}, error) {
return nil, common.ErrInvalidScanCursor
}
- ay, err := nd.store.Scan(common.KV, cursor, count, match)
+ ay, err := nd.store.Scan(common.KV, cursor, count, match, reverse)
if err != nil {
return &common.ScanResult{Keys: nil, NextCursor: nil, PartionId: "", Error: err}, err
}
@@ -126,10 +131,15 @@ func (nd *KVNode) advanceScanCommand(cmd redcon.Command) (interface{}, error) {
default:
return &common.ScanResult{Keys: nil, NextCursor: nil, Error: common.ErrInvalidScanType}, common.ErrInvalidScanType
}
- _, key, err := common.ExtractNamesapce(cmd.Args[1])
+ key, err := common.CutNamesapce(cmd.Args[1])
if err != nil {
return &common.ScanResult{Keys: nil, NextCursor: nil, PartionId: "", Error: err}, err
}
+ scanName := strings.ToLower(string(cmd.Args[0]))
+ reverse := false
+ if scanName == "advrevscan" {
+ reverse = true
+ }
cmd.Args[1] = key
cmd.Args[1], cmd.Args[2] = cmd.Args[2], cmd.Args[1]
@@ -145,7 +155,7 @@ func (nd *KVNode) advanceScanCommand(cmd redcon.Command) (interface{}, error) {
var ay [][]byte
- ay, err = nd.store.Scan(dataType, cursor, count, match)
+ ay, err = nd.store.Scan(dataType, cursor, count, match, reverse)
if err != nil {
return &common.ScanResult{Keys: nil, NextCursor: nil, PartionId: "", Error: err}, err
@@ -193,6 +203,11 @@ func (nd *KVNode) hscanCommand(conn redcon.Conn, cmd redcon.Command) {
conn.WriteError("ERR wrong number of arguments for '" + string(cmd.Args[0]) + "' command")
return
}
+ scanName := strings.ToLower(string(cmd.Args[0]))
+ reverse := false
+ if scanName == "hrevscan" {
+ reverse = true
+ }
args := cmd.Args[1:]
key := args[0]
cursor, match, count, err := parseScanArgs(args[1:])
@@ -204,7 +219,7 @@ func (nd *KVNode) hscanCommand(conn redcon.Conn, cmd redcon.Command) {
var ay []common.KVRecord
- ay, err = nd.store.HScan(key, cursor, count, match)
+ ay, err = nd.store.HScan(key, cursor, count, match, reverse)
if err != nil {
conn.WriteError(err.Error())
return
@@ -234,6 +249,11 @@ func (nd *KVNode) sscanCommand(conn redcon.Conn, cmd redcon.Command) {
conn.WriteError("ERR wrong number of arguments for '" + string(cmd.Args[0]) + "' command")
return
}
+ scanName := strings.ToLower(string(cmd.Args[0]))
+ reverse := false
+ if scanName == "srevscan" {
+ reverse = true
+ }
args := cmd.Args[1:]
key := args[0]
@@ -245,7 +265,7 @@ func (nd *KVNode) sscanCommand(conn redcon.Conn, cmd redcon.Command) {
}
var ay [][]byte
- ay, err = nd.store.SScan(key, cursor, count, match)
+ ay, err = nd.store.SScan(key, cursor, count, match, reverse)
if err != nil {
conn.WriteError(err.Error())
return
@@ -272,6 +292,11 @@ func (nd *KVNode) zscanCommand(conn redcon.Conn, cmd redcon.Command) {
conn.WriteError("ERR wrong number of arguments for '" + string(cmd.Args[0]) + "' command")
return
}
+ scanName := strings.ToLower(string(cmd.Args[0]))
+ reverse := false
+ if scanName == "zrevscan" {
+ reverse = true
+ }
args := cmd.Args[1:]
key := args[0]
@@ -284,7 +309,7 @@ func (nd *KVNode) zscanCommand(conn redcon.Conn, cmd redcon.Command) {
var ay []common.ScorePair
- ay, err = nd.store.ZScan(key, cursor, count, match)
+ ay, err = nd.store.ZScan(key, cursor, count, match, reverse)
if err != nil {
conn.WriteError(err.Error())
@@ -338,7 +363,7 @@ func (nd *KVNode) fullScanCommand(cmd redcon.Command) (interface{}, error) {
default:
return nil, common.ErrInvalidScanType
}
- _, key, err := common.ExtractNamesapce(cmd.Args[1])
+ key, err := common.CutNamesapce(cmd.Args[1])
if err != nil {
return nil, err
}
diff --git a/node/scan_test.go b/node/scan_test.go
index 58fb09ca..11d1a4d7 100644
--- a/node/scan_test.go
+++ b/node/scan_test.go
@@ -18,8 +18,13 @@ func TestKVNode_scanCommand(t *testing.T) {
{"hscan", buildCommand([][]byte{[]byte("hscan"), testKey, []byte("")})},
{"sscan", buildCommand([][]byte{[]byte("sscan"), testKey, []byte("")})},
{"zscan", buildCommand([][]byte{[]byte("zscan"), testKey, []byte("")})},
+ {"hrevscan", buildCommand([][]byte{[]byte("hscan"), testKey, []byte("")})},
+ {"srevscan", buildCommand([][]byte{[]byte("sscan"), testKey, []byte("")})},
+ {"zrevscan", buildCommand([][]byte{[]byte("zscan"), testKey, []byte("")})},
{"scan", buildCommand([][]byte{[]byte("scan"), testKey})},
{"scan", buildCommand([][]byte{[]byte("scan"), testKey, []byte("match"), []byte("test"), []byte("count"), []byte("1")})},
+ {"revscan", buildCommand([][]byte{[]byte("scan"), testKey})},
+ {"revscan", buildCommand([][]byte{[]byte("scan"), testKey, []byte("match"), []byte("test"), []byte("count"), []byte("1")})},
{"advscan", buildCommand([][]byte{[]byte("advscan"), testKey, []byte("kv")})},
{"advscan", buildCommand([][]byte{[]byte("advscan"), testKey, []byte("hash")})},
{"advscan", buildCommand([][]byte{[]byte("advscan"), testKey, []byte("list")})},
@@ -37,14 +42,22 @@ func TestKVNode_scanCommand(t *testing.T) {
c := &fakeRedisConn{}
for _, cmd := range tests {
c.Reset()
- handler, _, _ := nd.router.GetCmdHandler(cmd.name)
+ handler, _ := nd.router.GetCmdHandler(cmd.name)
if handler != nil {
handler(c, cmd.args)
assert.Nil(t, c.GetError())
} else {
- mhandler, _, _ := nd.router.GetMergeCmdHandler(cmd.name)
- _, err := mhandler(cmd.args)
- assert.Nil(t, err)
+ whandler, _ := nd.router.GetWCmdHandler(cmd.name)
+ if whandler != nil {
+ rsp, err := whandler(cmd.args)
+ assert.Nil(t, err)
+ _, ok := rsp.(error)
+ assert.True(t, !ok)
+ } else {
+ mhandler, _, _ := nd.router.GetMergeCmdHandler(cmd.name)
+ _, err := mhandler(cmd.args)
+ assert.Nil(t, err)
+ }
}
}
}
diff --git a/node/schema.go b/node/schema.go
index 1cc9fb4a..7a24a228 100644
--- a/node/schema.go
+++ b/node/schema.go
@@ -4,7 +4,7 @@ import (
"encoding/json"
"errors"
- "github.com/absolute8511/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/common"
)
func (nd *KVNode) GetIndexSchema(table string) (map[string]*common.IndexSchema, error) {
diff --git a/node/secondary_index.go b/node/secondary_index.go
index ece0d52c..498f00e7 100644
--- a/node/secondary_index.go
+++ b/node/secondary_index.go
@@ -5,9 +5,9 @@ import (
"strconv"
"strings"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/rockredis"
"github.com/absolute8511/redcon"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/rockredis"
)
type HindexSearchResults struct {
@@ -101,7 +101,7 @@ func (nd *KVNode) hindexSearchCommand(cmd redcon.Command) (interface{}, error) {
if len(cmd.Args) < 4 {
return nil, common.ErrInvalidArgs
}
- _, table, err := common.ExtractNamesapce(cmd.Args[1])
+ table, err := common.CutNamesapce(cmd.Args[1])
if err != nil {
return nil, err
}
@@ -173,12 +173,12 @@ func (nd *KVNode) hindexSearchCommand(cmd redcon.Command) (interface{}, error) {
}
case "hgetall":
for _, pk := range pkList {
- _, valCh, err := nd.store.HGetAll(pk.PKey)
+ _, vals, err := nd.store.HGetAll(pk.PKey)
if err != nil {
continue
}
vv := [][]byte{}
- for v := range valCh {
+ for _, v := range vals {
vv = append(vv, v.Rec.Key, v.Rec.Value)
}
rspV := common.HIndexRespWithValues{PKey: pk.PKey, IndexV: pk.IndexValue, HsetValues: vv}
diff --git a/node/set.go b/node/set.go
index f2dcf489..c8982b7f 100644
--- a/node/set.go
+++ b/node/set.go
@@ -1,9 +1,12 @@
package node
import (
+ "errors"
+ "fmt"
"strconv"
"github.com/absolute8511/redcon"
+ "github.com/youzan/ZanRedisDB/common"
)
func (nd *KVNode) scardCommand(conn redcon.Conn, cmd redcon.Command) {
@@ -38,22 +41,17 @@ func (nd *KVNode) smembersCommand(conn redcon.Conn, cmd redcon.Command) {
}
}
-func (nd *KVNode) saddCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- if rsp, ok := v.(int64); ok {
- conn.WriteInt64(rsp)
- } else {
- conn.WriteError(errInvalidResponse.Error())
- }
-}
-
-func (nd *KVNode) spopCommand(conn redcon.Conn, cmd redcon.Command) {
+func (nd *KVNode) srandmembersCommand(conn redcon.Conn, cmd redcon.Command) {
if len(cmd.Args) != 2 && len(cmd.Args) != 3 {
- conn.WriteError("ERR wrong number of arguments for '" + string(cmd.Args[0]) + "' command")
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ conn.WriteError(err.Error())
return
}
hasCount := len(cmd.Args) == 3
+ cnt := 1
+ var err error
if hasCount {
- cnt, err := strconv.Atoi(string(cmd.Args[2]))
+ cnt, err = strconv.Atoi(string(cmd.Args[2]))
if err != nil {
conn.WriteError(err.Error())
return
@@ -63,60 +61,128 @@ func (nd *KVNode) spopCommand(conn redcon.Conn, cmd redcon.Command) {
return
}
}
- _, v, ok := rebuildFirstKeyAndPropose(nd, conn, cmd)
- if !ok {
+ v, err := nd.store.SRandMembers(cmd.Args[1], int64(cnt))
+ if err != nil {
+ conn.WriteError(err.Error())
return
}
- // without the count argument, it is bulk string
- if !hasCount {
- if v == nil {
- conn.WriteNull()
- return
- }
- if rsp, ok := v.(string); ok {
- conn.WriteBulkString(string(rsp[0]))
- return
- }
- if !ok {
- conn.WriteError("Invalid response type")
- return
- }
- } else {
- rsp, ok := v.([][]byte)
- if !ok {
- conn.WriteError("Invalid response type")
- return
+ conn.WriteArray(len(v))
+ for _, vv := range v {
+ conn.WriteBulk(vv)
+ }
+}
+
+func (nd *KVNode) saddCommand(cmd redcon.Command) (interface{}, error) {
+ // optimize the sadd to check before propose to raft
+ if len(cmd.Args) < 3 {
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ return nil, err
+ }
+ key, err := common.CutNamesapce(cmd.Args[1])
+ if err != nil {
+ return nil, err
+ }
+
+ needChange := false
+ for _, m := range cmd.Args[2:] {
+ if err := common.CheckKeySubKey(key, m); err != nil {
+ return nil, err
}
- conn.WriteArray(len(rsp))
- for _, d := range rsp {
- conn.WriteBulk(d)
+ n, _ := nd.store.SIsMember(key, m)
+ if n == 0 {
+ // found a new member not exist, we need do raft proposal
+ needChange = true
+ break
}
}
+ if !needChange {
+ return int64(0), nil
+ }
+ rsp, err := rebuildFirstKeyAndPropose(nd, cmd, checkAndRewriteIntRsp)
+ return rsp, err
}
-func (nd *KVNode) sremCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- if rsp, ok := v.(int64); ok {
- conn.WriteInt64(rsp)
- } else {
- conn.WriteError(errInvalidResponse.Error())
+func (nd *KVNode) sremCommand(cmd redcon.Command) (interface{}, error) {
+ // optimize the srem to check before propose to raft
+ if len(cmd.Args) < 3 {
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ return nil, err
+ }
+ key, err := common.CutNamesapce(cmd.Args[1])
+ if err != nil {
+ return nil, err
}
-}
-func (nd *KVNode) sclearCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- if rsp, ok := v.(int64); ok {
- conn.WriteInt64(rsp)
- } else {
- conn.WriteError(errInvalidResponse.Error())
+ needChange := false
+ for _, m := range cmd.Args[2:] {
+ n, _ := nd.store.SIsMember(key, m)
+ if n != 0 {
+ // found a new member, we need do raft proposal
+ needChange = true
+ break
+ }
+ }
+ if !needChange {
+ return int64(0), nil
}
+ rsp, err := rebuildFirstKeyAndPropose(nd, cmd, checkAndRewriteIntRsp)
+ return rsp, err
}
-func (nd *KVNode) smclearCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- if rsp, ok := v.(int64); ok {
- conn.WriteInt64(rsp)
- } else {
- conn.WriteError(errInvalidResponse.Error())
+func (nd *KVNode) spopCommand(cmd redcon.Command) (interface{}, error) {
+ if len(cmd.Args) != 2 && len(cmd.Args) != 3 {
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ return nil, err
+ }
+ hasCount := len(cmd.Args) == 3
+ if hasCount {
+ cnt, err := strconv.Atoi(string(cmd.Args[2]))
+ if err != nil {
+ return nil, err
+ }
+ if cnt < 1 {
+ return nil, errors.New("Invalid count")
+ }
+ }
+ key, err := common.CutNamesapce(cmd.Args[1])
+ if err != nil {
+ return nil, err
+ }
+ n, err := nd.store.SCard(key)
+ if err != nil {
+ return nil, err
+ }
+ // check if empty set
+ if n == 0 {
+ if !hasCount {
+ return nil, nil
+ } else {
+ return [][]byte{}, nil
+ }
+ }
+ v, err := rebuildFirstKeyAndPropose(nd, cmd, func(cmd redcon.Command, r interface{}) (interface{}, error) {
+ // without the count argument, it is bulk string
+ if !hasCount {
+ if r == nil {
+ return nil, nil
+ }
+ if rsp, ok := r.([]byte); ok {
+ return rsp, nil
+ }
+ return nil, errInvalidResponse
+ } else {
+ rsp, ok := r.([][]byte)
+ if !ok {
+ return nil, errInvalidResponse
+ }
+ return rsp, nil
+ }
+ })
+ if err != nil {
+ return nil, err
}
+ return v, nil
}
func (kvsm *kvStoreSM) localSadd(cmd redcon.Command, ts int64) (interface{}, error) {
@@ -140,13 +206,13 @@ func (kvsm *kvStoreSM) localSpop(cmd redcon.Command, ts int64) (interface{}, err
return vals, nil
}
if len(vals) > 0 {
- return string(vals[0]), nil
+ return vals[0], nil
}
return nil, nil
}
func (kvsm *kvStoreSM) localSclear(cmd redcon.Command, ts int64) (interface{}, error) {
- return kvsm.store.SClear(cmd.Args[1])
+ return kvsm.store.SClear(ts, cmd.Args[1])
}
func (kvsm *kvStoreSM) localSmclear(cmd redcon.Command, ts int64) (interface{}, error) {
return kvsm.store.SMclear(cmd.Args[1:]...)
diff --git a/node/set_test.go b/node/set_test.go
index 984f341e..43910d31 100644
--- a/node/set_test.go
+++ b/node/set_test.go
@@ -2,6 +2,8 @@ package node
import (
"os"
+ "strconv"
+ "sync"
"testing"
"github.com/absolute8511/redcon"
@@ -20,12 +22,19 @@ func TestKVNode_setCommand(t *testing.T) {
{"scard", buildCommand([][]byte{[]byte("scard"), testKey})},
{"sismember", buildCommand([][]byte{[]byte("sismember"), testKey, testMember})},
{"smembers", buildCommand([][]byte{[]byte("smembers"), testKey})},
+ {"srandmember", buildCommand([][]byte{[]byte("srandmember"), testKey})},
+ {"srandmember", buildCommand([][]byte{[]byte("srandmember"), testKey, []byte("2")})},
{"sadd", buildCommand([][]byte{[]byte("sadd"), testKey, testMember})},
{"sismember", buildCommand([][]byte{[]byte("sismember"), testKey, testMember})},
{"smembers", buildCommand([][]byte{[]byte("smembers"), testKey})},
{"scard", buildCommand([][]byte{[]byte("scard"), testKey})},
{"spop", buildCommand([][]byte{[]byte("spop"), testKey})},
+ {"spop", buildCommand([][]byte{[]byte("spop"), testKey, []byte("2")})},
{"srem", buildCommand([][]byte{[]byte("srem"), testKey, testMember})},
+ {"sttl", buildCommand([][]byte{[]byte("sttl"), testKey})},
+ {"skeyexist", buildCommand([][]byte{[]byte("skeyexist"), testKey})},
+ {"sexpire", buildCommand([][]byte{[]byte("sexpire"), testKey, []byte("10")})},
+ {"spersist", buildCommand([][]byte{[]byte("spersist"), testKey})},
{"sclear", buildCommand([][]byte{[]byte("sclear"), testKey})},
}
defer os.RemoveAll(dataDir)
@@ -34,8 +43,65 @@ func TestKVNode_setCommand(t *testing.T) {
c := &fakeRedisConn{}
for _, cmd := range tests {
c.Reset()
- handler, _, _ := nd.router.GetCmdHandler(cmd.name)
- handler(c, cmd.args)
- assert.Nil(t, c.GetError())
+ origCmd := append([]byte{}, cmd.args.Raw...)
+ handler, ok := nd.router.GetCmdHandler(cmd.name)
+ if ok {
+ handler(c, cmd.args)
+ assert.Nil(t, c.GetError())
+ } else {
+ whandler, _ := nd.router.GetWCmdHandler(cmd.name)
+ rsp, err := whandler(cmd.args)
+ assert.Nil(t, err)
+ _, ok := rsp.(error)
+ assert.True(t, !ok)
+ }
+ assert.Equal(t, origCmd, cmd.args.Raw)
}
}
+
+func TestKVNode_setCommandConcurrent(t *testing.T) {
+ nd, dataDir, stopC := getTestKVNode(t)
+
+ defer os.RemoveAll(dataDir)
+ defer nd.Stop()
+ defer close(stopC)
+ var wg sync.WaitGroup
+ for index := 0; index < 3; index++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for loop := 0; loop < 100; loop++ {
+ c := &fakeRedisConn{}
+ testKey := []byte("default:test:1")
+ testMember := []byte("1" + strconv.Itoa(loop))
+ tests := []struct {
+ name string
+ args redcon.Command
+ }{
+ {"sismember", buildCommand([][]byte{[]byte("sismember"), testKey, testMember})},
+ {"smembers", buildCommand([][]byte{[]byte("smembers"), testKey})},
+ {"sadd", buildCommand([][]byte{[]byte("sadd"), testKey, testMember})},
+ {"spop", buildCommand([][]byte{[]byte("spop"), testKey})},
+ {"spop", buildCommand([][]byte{[]byte("spop"), testKey, []byte("2")})},
+ }
+ for _, cmd := range tests {
+ c.Reset()
+ origCmd := append([]byte{}, cmd.args.Raw...)
+ handler, ok := nd.router.GetCmdHandler(cmd.name)
+ if ok {
+ handler(c, cmd.args)
+ assert.Nil(t, c.GetError())
+ } else {
+ whandler, _ := nd.router.GetWCmdHandler(cmd.name)
+ rsp, err := whandler(cmd.args)
+ assert.Nil(t, err)
+ _, ok := rsp.(error)
+ assert.True(t, !ok)
+ }
+ assert.Equal(t, origCmd, cmd.args.Raw)
+ }
+ }
+ }()
+ }
+ wg.Wait()
+}
diff --git a/node/slow_limiter.go b/node/slow_limiter.go
new file mode 100644
index 00000000..8f23a653
--- /dev/null
+++ b/node/slow_limiter.go
@@ -0,0 +1,458 @@
+package node
+
+import (
+ "context"
+ "errors"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/absolute8511/redcon"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/metric"
+
+ ps "github.com/prometheus/client_golang/prometheus"
+)
+
+var enableSlowLimiterTest = false
+
+func EnableSlowLimiterTest(t bool) {
+ enableSlowLimiterTest = t
+}
+
+// ErrSlowLimiterRefused indicated the write request is slow while applying so it is refused to avoid
+// slow down other write.
+var ErrSlowLimiterRefused = errors.New("refused by slow limiter")
+
+type slowLevelT int
+
+const (
+ minSlowLevel slowLevelT = iota
+ midSlowLevel
+ verySlowLevel
+ maxSlowLevel
+)
+
+const (
+ maxSlowThreshold = 300
+ heavySlowThreshold = 250
+ midSlowThreshold = 60
+ smallSlowThreshold = 20
+ slowQueueThreshold = 5
+)
+
+var SlowRefuseCostMs = int64(800)
+var SlowHalfOpenSec = int64(10)
+var maybeSlowCmd map[string]bool
+var slowQueueCostMs = int64(250)
+
+func init() {
+ maybeSlowCmd = make(map[string]bool, 100)
+ maybeSlowCmd["spop"] = true
+ maybeSlowCmd["zremrangebyrank"] = true
+ maybeSlowCmd["zremrangebyscore"] = true
+ maybeSlowCmd["zremrangebylex"] = true
+ maybeSlowCmd["ltrim"] = true
+ // remove below if compact ttl is enabled by default
+ maybeSlowCmd["sclear"] = true
+ maybeSlowCmd["zclear"] = true
+ maybeSlowCmd["lclear"] = true
+ maybeSlowCmd["hclear"] = true
+}
+
+func IsMaybeSlowWriteCmd(cmd string) bool {
+ _, ok := maybeSlowCmd[cmd]
+ return ok
+}
+
+func ChangeSlowRefuseCost(v int64) {
+ atomic.StoreInt64(&SlowRefuseCostMs, v)
+}
+
+func RegisterSlowConfChanged() {
+ common.RegisterConfChangedHandler(common.ConfSlowLimiterRefuseCostMs, func(v interface{}) {
+ iv, ok := v.(int)
+ if ok {
+ atomic.StoreInt64(&SlowRefuseCostMs, int64(iv))
+ }
+ })
+ common.RegisterConfChangedHandler(common.ConfSlowLimiterHalfOpenSec, func(v interface{}) {
+ iv, ok := v.(int)
+ if ok {
+ atomic.StoreInt64(&SlowHalfOpenSec, int64(iv))
+ }
+ })
+}
+
+type SlowWaitDone struct {
+ c chan struct{}
+}
+
+func (swd *SlowWaitDone) Done() {
+ if swd.c == nil {
+ return
+ }
+ select {
+ case <-swd.c:
+ default:
+ }
+}
+
+// SlowLimiter is used to limit some slow write command to avoid raft blocking
+type SlowLimiter struct {
+ ns string
+ slowCounter int64
+ slowQueueCounter int64
+
+ limiterOn int32
+ mutex sync.RWMutex
+ slowHistorys [maxSlowLevel]map[string]int64
+ lastSlowTs int64
+ stopC chan struct{}
+ wg sync.WaitGroup
+ // some slow write should wait in queue until others returned from raft apply
+ slowWaitQueue [maxSlowLevel]chan struct{}
+}
+
+func NewSlowLimiter(ns string) *SlowLimiter {
+ var his [maxSlowLevel]map[string]int64
+ var q [maxSlowLevel]chan struct{}
+ l := 100
+ for i := 0; i < len(his); i++ {
+ his[i] = make(map[string]int64)
+ q[i] = make(chan struct{}, l+3)
+ l = l / 5
+ }
+
+ sl := &SlowLimiter{
+ ns: ns,
+ limiterOn: int32(common.GetIntDynamicConf(common.ConfSlowLimiterSwitch)),
+ slowWaitQueue: q,
+ slowHistorys: his,
+ }
+ return sl
+}
+
+func (sl *SlowLimiter) Start() {
+ sl.stopC = make(chan struct{})
+ sl.wg.Add(1)
+ go sl.run(sl.stopC)
+}
+
+func (sl *SlowLimiter) Stop() {
+ if sl.stopC != nil {
+ close(sl.stopC)
+ sl.stopC = nil
+ }
+ sl.wg.Wait()
+}
+
+func (sl *SlowLimiter) run(stopC chan struct{}) {
+ defer sl.wg.Done()
+ checkInterval := time.Second * 2
+ if enableSlowLimiterTest {
+ checkInterval = checkInterval / 4
+ }
+ ticker := time.NewTicker(checkInterval)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ticker.C:
+ // decr slow counter more quickly to reduce the time
+ // in mid or heavy slow state to avoid refuse too much write with lower cost
+ old := atomic.LoadInt64(&sl.slowCounter)
+ nodeLog.Debugf("current slow %v , last slow ts: %v",
+ old, atomic.LoadInt64(&sl.lastSlowTs))
+ decr := -1
+ if old >= heavySlowThreshold {
+ decr = -10
+ } else if old >= midSlowThreshold {
+ decr = -2
+ }
+ // speed up for test
+ if enableSlowLimiterTest && old > 10 {
+ decr *= 3
+ }
+ n := atomic.AddInt64(&sl.slowCounter, int64(decr))
+ if old >= smallSlowThreshold && n < smallSlowThreshold {
+ // we only clear slow stats while we changed from real slow to no slow
+ nodeLog.Infof("the apply limiter is changed from slow %v to no slow: %v , last slow ts: %v",
+ old, n, atomic.LoadInt64(&sl.lastSlowTs))
+ sl.clearSlows()
+ }
+ if n < 0 {
+ atomic.AddInt64(&sl.slowCounter, int64(-1*decr))
+ }
+ case <-stopC:
+ return
+ }
+ }
+}
+
+func (sl *SlowLimiter) testSlowWrite1s(cmd redcon.Command, ts int64) (interface{}, error) {
+ time.Sleep(time.Second)
+ return nil, nil
+}
+func (sl *SlowLimiter) testSlowWrite100ms(cmd redcon.Command, ts int64) (interface{}, error) {
+ time.Sleep(time.Millisecond * 100)
+ return nil, nil
+}
+func (sl *SlowLimiter) testSlowWrite50ms(cmd redcon.Command, ts int64) (interface{}, error) {
+ time.Sleep(time.Millisecond * 50)
+ return nil, nil
+}
+func (sl *SlowLimiter) testSlowWrite5ms(cmd redcon.Command, ts int64) (interface{}, error) {
+ time.Sleep(time.Millisecond * 5)
+ return nil, nil
+}
+
+func (sl *SlowLimiter) TurnOn() {
+ atomic.StoreInt32(&sl.limiterOn, 1)
+}
+
+func (sl *SlowLimiter) TurnOff() {
+ atomic.StoreInt32(&sl.limiterOn, 0)
+}
+
+func (sl *SlowLimiter) isOn() bool {
+ return atomic.LoadInt32(&sl.limiterOn) > 0
+}
+
+func (sl *SlowLimiter) MarkHeavySlow() {
+ atomic.StoreInt64(&sl.slowCounter, maxSlowThreshold)
+ atomic.StoreInt64(&sl.lastSlowTs, time.Now().UnixNano())
+}
+
+func (sl *SlowLimiter) clearSlows() {
+ if !sl.isOn() {
+ return
+ }
+ sl.mutex.Lock()
+ defer sl.mutex.Unlock()
+ atomic.StoreInt64(&sl.slowQueueCounter, 0)
+ for i := 0; i < len(sl.slowHistorys); i++ {
+ if len(sl.slowHistorys[i]) > 0 {
+ sl.slowHistorys[i] = make(map[string]int64)
+ }
+ }
+}
+
+func (sl *SlowLimiter) MaybeAddSlow(ts int64, cost time.Duration, cmd string, prefix string) {
+ if cost < time.Millisecond*time.Duration(atomic.LoadInt64(&SlowRefuseCostMs)) {
+ // while we are in some slow down state, slow write will be refused,
+ // while in half open, some history slow write will be passed to do
+ // slow check again, in this way we need check the history to
+ // identify the possible slow write more fast.
+ if cost >= time.Millisecond*time.Duration(slowQueueCostMs) {
+ atomic.AddInt64(&sl.slowQueueCounter, 1)
+ }
+ if cost < getCostThresholdForSlowLevel(midSlowLevel) {
+ return
+ }
+ cnt := atomic.LoadInt64(&sl.slowCounter)
+ if cnt < smallSlowThreshold {
+ return
+ }
+ isSlow, _ := sl.isHistorySlow(cmd, prefix, cnt, minSlowLevel)
+ if !isSlow {
+ return
+ }
+ }
+ sl.AddSlow(ts)
+}
+
+// return isslow and issmallslow
+func (sl *SlowLimiter) isHistorySlow(cmd, prefix string, sc int64, ignoreSlowLevel slowLevelT) (bool, slowLevelT) {
+ feat := cmd + " " + prefix
+ sl.mutex.RLock()
+ defer sl.mutex.RUnlock()
+ for lv := minSlowLevel; lv < maxSlowLevel; lv++ {
+ if lv <= ignoreSlowLevel {
+ continue
+ }
+ slow := sl.slowHistorys[lv]
+ cnt, ok := slow[feat]
+ if lv >= verySlowLevel {
+ if ok && cnt > 2 {
+ return true, lv
+ }
+ } else if sc >= midSlowThreshold && lv >= midSlowLevel {
+ if ok && cnt > 4 {
+ return true, lv
+ }
+ } else if sc >= heavySlowThreshold && lv >= minSlowLevel {
+ if ok && cnt > 16 {
+ return true, lv
+ }
+ }
+ }
+ return false, 0
+}
+
+func (sl *SlowLimiter) AddSlow(ts int64) {
+ atomic.StoreInt64(&sl.lastSlowTs, ts)
+ sl.addCounterOnly()
+}
+
+func (sl *SlowLimiter) addCounterOnly() {
+ cnt := atomic.AddInt64(&sl.slowCounter, 1)
+ atomic.AddInt64(&sl.slowQueueCounter, 1)
+ if cnt > maxSlowThreshold {
+ atomic.AddInt64(&sl.slowCounter, -1)
+ }
+}
+
+func (sl *SlowLimiter) PreWaitQueue(ctx context.Context, cmd string, prefix string) (*SlowWaitDone, error) {
+ feat := cmd + " " + prefix
+ slv := slowLevelT(-1)
+ if IsMaybeSlowWriteCmd(cmd) {
+ slv = verySlowLevel
+ } else {
+ sl.mutex.RLock()
+ for lv := verySlowLevel; lv >= 0; lv-- {
+ slow := sl.slowHistorys[lv]
+ cnt, ok := slow[feat]
+ if ok && cnt > 2 {
+ slv = lv
+ break
+ }
+ }
+ sl.mutex.RUnlock()
+ }
+ if slv >= maxSlowLevel || slv < 0 {
+ return nil, nil
+ }
+ wq := sl.slowWaitQueue[slv]
+ begin := time.Now()
+ select {
+ case <-ctx.Done():
+ metric.SlowLimiterRefusedCnt.With(ps.Labels{
+ "table": prefix,
+ "cmd": cmd,
+ }).Inc()
+ return nil, ctx.Err()
+ case wq <- struct{}{}:
+ }
+ cost := time.Since(begin)
+ if cost >= time.Millisecond {
+ metric.SlowLimiterQueuedCost.With(ps.Labels{
+ "namespace": sl.ns,
+ "table": prefix,
+ "cmd": cmd,
+ }).Observe(float64(cost.Milliseconds()))
+ }
+ metric.SlowLimiterQueuedCnt.With(ps.Labels{
+ "table": prefix,
+ "cmd": cmd,
+ "slow_level": getSlowLevelDesp(slv),
+ }).Inc()
+ metric.QueueLen.With(ps.Labels{
+ "namespace": sl.ns,
+ "queue_name": "slow_wait_queue_" + getSlowLevelDesp(slv),
+ }).Set(float64(len(wq)))
+ return &SlowWaitDone{wq}, nil
+}
+
+func (sl *SlowLimiter) CanPass(ts int64, cmd string, prefix string) bool {
+ if prefix == "" {
+ return true
+ }
+ if !sl.isOn() {
+ return true
+ }
+ sc := atomic.LoadInt64(&sl.slowCounter)
+ if sc < smallSlowThreshold {
+ return true
+ }
+ if ts > atomic.LoadInt64(&sl.lastSlowTs)+time.Second.Nanoseconds()*SlowHalfOpenSec {
+ return true
+ }
+ if isSlow, _ := sl.isHistorySlow(cmd, prefix, sc, -1); isSlow {
+ // the write is refused, means it may slow down the raft loop if we passed,
+ // so we need add counter here even we refused it.
+ // However, we do not update timestamp for slow, so we can clear it if it become
+ // no slow while in half open state.
+ sl.addCounterOnly()
+ metric.SlowLimiterRefusedCnt.With(ps.Labels{
+ "table": prefix,
+ "cmd": cmd,
+ }).Inc()
+ return false
+ }
+ return true
+}
+
+func getCostThresholdForSlowLevel(slv slowLevelT) time.Duration {
+ if slv >= verySlowLevel {
+ return time.Millisecond * 100
+ }
+ if slv >= midSlowLevel {
+ return time.Millisecond * 50
+ }
+ if slv >= minSlowLevel {
+ return time.Millisecond * 10
+ }
+ return 0
+}
+
+func getSlowLevelDesp(slv slowLevelT) string {
+ return strconv.Itoa(int(slv))
+}
+
+func getSlowLevelFromCost(cost time.Duration) slowLevelT {
+ if cost >= time.Millisecond*100 {
+ return verySlowLevel
+ }
+ if cost >= time.Millisecond*50 {
+ return midSlowLevel
+ }
+ if cost >= time.Millisecond*10 {
+ return minSlowLevel
+ }
+ return -1
+}
+
+func (sl *SlowLimiter) RecordSlowCmd(cmd string, prefix string, cost time.Duration) {
+ if prefix == "" || cmd == "" {
+ return
+ }
+ slv := getSlowLevelFromCost(cost)
+ if slv < minSlowLevel || slv >= maxSlowLevel {
+ return
+ }
+ if slv == verySlowLevel {
+ metric.SlowWrite100msCnt.With(ps.Labels{
+ "table": prefix,
+ "cmd": cmd,
+ }).Inc()
+ } else if slv == midSlowLevel {
+ metric.SlowWrite50msCnt.With(ps.Labels{
+ "table": prefix,
+ "cmd": cmd,
+ }).Inc()
+ } else if slv == minSlowLevel {
+ metric.SlowWrite10msCnt.With(ps.Labels{
+ "table": prefix,
+ "cmd": cmd,
+ }).Inc()
+ }
+ if !sl.isOn() {
+ return
+ }
+ sc := atomic.LoadInt64(&sl.slowCounter)
+ qc := atomic.LoadInt64(&sl.slowQueueCounter)
+ if sc < smallSlowThreshold && qc < slowQueueThreshold {
+ return
+ }
+ feat := cmd + " " + prefix
+ sl.mutex.Lock()
+ slow := sl.slowHistorys[slv]
+ old, ok := slow[feat]
+ if !ok {
+ old = 0
+ }
+ old++
+ slow[feat] = old
+ sl.mutex.Unlock()
+}
diff --git a/node/slow_limiter_test.go b/node/slow_limiter_test.go
new file mode 100644
index 00000000..d90bc3c1
--- /dev/null
+++ b/node/slow_limiter_test.go
@@ -0,0 +1,147 @@
+package node
+
+import (
+ "sync/atomic"
+ "testing"
+ "time"
+
+ ps "github.com/prometheus/client_golang/prometheus"
+ io_prometheus_clients "github.com/prometheus/client_model/go"
+ "github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/metric"
+)
+
+var slowRefuseCost = time.Millisecond * time.Duration(SlowRefuseCostMs)
+
+func TestSlowLimiter_CanPass(t *testing.T) {
+ type fields struct {
+ slowCounter int64
+ limiterOn int32
+ slowHistorys [maxSlowLevel]map[string]int64
+ lastSlowTs int64
+ }
+ type args struct {
+ cmd string
+ prefix string
+ }
+ var emptySlows [maxSlowLevel]map[string]int64
+ slow100s := make(map[string]int64)
+ slow50s := make(map[string]int64)
+ slow10s := make(map[string]int64)
+ emptySlows[0] = slow10s
+ emptySlows[1] = slow50s
+ emptySlows[2] = slow100s
+ var allSlows [maxSlowLevel]map[string]int64
+ slow100sTestTable := make(map[string]int64)
+ slow100sTestTable["set test_table"] = 10
+ slow50sTestTable := make(map[string]int64)
+ slow50sTestTable["set test_table"] = 20
+ slow10sTestTable := make(map[string]int64)
+ slow10sTestTable["set test_table"] = 30
+ allSlows[0] = slow10sTestTable
+ allSlows[1] = slow50sTestTable
+ allSlows[2] = slow100sTestTable
+ var slow50s_10sHist [maxSlowLevel]map[string]int64
+ slow50s_10sHist[0] = slow10sTestTable
+ slow50s_10sHist[1] = slow50sTestTable
+ var slow100sHist [maxSlowLevel]map[string]int64
+ slow100sHist[2] = slow100sTestTable
+ var slow50sHist [maxSlowLevel]map[string]int64
+ slow50sHist[1] = slow50sTestTable
+ var slow10sHist [maxSlowLevel]map[string]int64
+ slow10sHist[0] = slow10sTestTable
+ tn := time.Now()
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ want bool
+ }{
+ // real no slow
+ {"canpass_noslow1", fields{0, 1, emptySlows, 0}, args{"set", "test_table"}, true},
+ // no recorded table
+ {"canpass_noslow_record", fields{maxSlowThreshold, 1, emptySlows, tn.UnixNano()}, args{"set", "test_table"}, true},
+ // last slow is long ago
+ {"canpass_slow_last_long_ago", fields{maxSlowThreshold, 1, allSlows, tn.Add(-1 * time.Hour).UnixNano()}, args{"set", "test_table"}, true},
+ // mid slow should only refuce 100ms write
+ {"canpass_below100ms_in_small_slow", fields{smallSlowThreshold, 1, slow50s_10sHist, tn.UnixNano()}, args{"set", "test_table"}, true},
+ {"cannotpass_100ms_in_small_slow", fields{smallSlowThreshold, 1, slow100sHist, tn.UnixNano()}, args{"set", "test_table"}, false},
+ {"canpass_below50ms_in_mid_slow", fields{midSlowThreshold, 1, slow10sHist, tn.UnixNano()}, args{"set", "test_table"}, true},
+ {"cannotpass_50ms_in_mid_slow", fields{midSlowThreshold, 1, slow50sHist, tn.UnixNano()}, args{"set", "test_table"}, false},
+ {"cannotpass_100ms_in_mid_slow", fields{midSlowThreshold, 1, slow100sHist, tn.UnixNano()}, args{"set", "test_table"}, false},
+ {"canpass_below10ms_in_heavy_slow", fields{heavySlowThreshold, 1, emptySlows, tn.UnixNano()}, args{"set", "test_table"}, true},
+ {"cannotpass_10ms_in_heavy_slow", fields{heavySlowThreshold, 1, slow10sHist, tn.UnixNano()}, args{"set", "test_table"}, false},
+ {"cannotpass_50ms_in_heavy_slow", fields{heavySlowThreshold, 1, slow50sHist, tn.UnixNano()}, args{"set", "test_table"}, false},
+ {"cannotpass_100ms_in_heavy_slow", fields{heavySlowThreshold, 1, slow100sHist, tn.UnixNano()}, args{"set", "test_table"}, false},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ sl := &SlowLimiter{
+ slowCounter: tt.fields.slowCounter,
+ limiterOn: tt.fields.limiterOn,
+ slowHistorys: tt.fields.slowHistorys,
+ lastSlowTs: tt.fields.lastSlowTs,
+ }
+ if got := sl.CanPass(tn.UnixNano(), tt.args.cmd, tt.args.prefix); got != tt.want {
+ t.Errorf("SlowLimiter.CanPass() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+ counter := metric.SlowLimiterRefusedCnt.With(ps.Labels{
+ "table": "test_table",
+ "cmd": "set",
+ })
+ out := io_prometheus_clients.Metric{}
+ counter.Write(&out)
+ assert.Equal(t, float64(6), *out.Counter.Value)
+}
+
+func TestSlowLimiter_SlowToNoSlow(t *testing.T) {
+ enableSlowLimiterTest = true
+ defer func() {
+ enableSlowLimiterTest = false
+ }()
+ sl := NewSlowLimiter("test")
+ sl.Start()
+ defer sl.Stop()
+ cnt := 0
+ atomic.StoreInt64(&sl.slowCounter, midSlowThreshold)
+ oldTs := time.Now().UnixNano()
+ atomic.StoreInt64(&sl.lastSlowTs, oldTs)
+ sl.RecordSlowCmd("test", "test_table", slowRefuseCost)
+ sl.RecordSlowCmd("test", "test_table", slowRefuseCost)
+ sl.RecordSlowCmd("test", "test_table", slowRefuseCost)
+ assert.True(t, !sl.CanPass(time.Now().UnixNano(), "test", "test_table"))
+ // use old ts to check pass to make sure we are passed by the cleared slow record
+ for {
+ cnt++
+ if sl.CanPass(time.Now().UnixNano(), "test", "test_table") && sl.CanPass(oldTs, "test", "test_table") {
+ break
+ }
+ // should sleep more than ticker
+ // in test the slow down ticker is more faster
+ time.Sleep(time.Second)
+ }
+ t.Logf("slow to noslow cnt : %v", cnt)
+ // in test the slow down ticker is more faster
+ assert.True(t, cnt >= smallSlowThreshold/4)
+ assert.True(t, cnt < heavySlowThreshold)
+}
+
+func TestSlowLimiter_NoSlowToSlow(t *testing.T) {
+ sl := NewSlowLimiter("test")
+ sl.Start()
+ defer sl.Stop()
+ cnt := 0
+ for {
+ sl.RecordSlowCmd("test", "test_table", slowRefuseCost)
+ sl.MaybeAddSlow(time.Now().UnixNano(), slowRefuseCost, "test", "test_table")
+ cnt++
+ if !sl.CanPass(time.Now().UnixNano(), "test", "test_table") {
+ break
+ }
+ }
+ t.Logf("noslow to slow cnt : %v", cnt)
+ assert.True(t, cnt >= smallSlowThreshold)
+ assert.True(t, cnt < heavySlowThreshold)
+}
diff --git a/node/state_machine.go b/node/state_machine.go
index 712dd9c6..fa569809 100644
--- a/node/state_machine.go
+++ b/node/state_machine.go
@@ -5,57 +5,79 @@ import (
"encoding/json"
"errors"
"fmt"
+ "io/ioutil"
"net/http"
"os"
"path"
+ "path/filepath"
+ "strconv"
"strings"
"sync/atomic"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/pkg/wait"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
- "github.com/absolute8511/ZanRedisDB/rockredis"
"github.com/absolute8511/redcon"
+ ps "github.com/prometheus/client_golang/prometheus"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/metric"
+ "github.com/youzan/ZanRedisDB/pkg/wait"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/rockredis"
+ "github.com/youzan/ZanRedisDB/slow"
)
const (
maxDBBatchCmdNum = 100
- dbWriteSlow = time.Millisecond * 200
+ dbWriteSlow = time.Millisecond * 100
)
// this error is used while the raft is applying the remote raft logs and notify we should
// not update the remote raft term-index state.
var errIgnoredRemoteApply = errors.New("remote raft apply should be ignored")
var errRemoteSnapTransferFailed = errors.New("remote raft snapshot transfer failed")
+var errNobackupAvailable = errors.New("no backup available from others")
+
+func isUnrecoveryError(err error) bool {
+ if strings.HasPrefix(err.Error(), "IO error: No space left on device") {
+ return true
+ }
+ return false
+}
type StateMachine interface {
- ApplyRaftRequest(isReplaying bool, req BatchInternalRaftRequest, term uint64, index uint64, stop chan struct{}) (bool, error)
+ ApplyRaftRequest(isReplaying bool, b IBatchOperator, req BatchInternalRaftRequest, term uint64, index uint64, stop chan struct{}) (bool, error)
ApplyRaftConfRequest(req raftpb.ConfChange, term uint64, index uint64, stop chan struct{}) error
GetSnapshot(term uint64, index uint64) (*KVSnapInfo, error)
- RestoreFromSnapshot(startup bool, raftSnapshot raftpb.Snapshot, stop chan struct{}) error
+ UpdateSnapshotState(term uint64, index uint64)
+ PrepareSnapshot(raftSnapshot raftpb.Snapshot, stop chan struct{}) error
+ RestoreFromSnapshot(raftSnapshot raftpb.Snapshot, stop chan struct{}) error
Destroy()
CleanData() error
Optimize(string)
- GetStats() common.NamespaceStats
+ OptimizeExpire()
+ OptimizeAnyRange(CompactAPIRange)
+ DisableOptimize(bool)
+ GetStats(table string, needDetail bool) metric.NamespaceStats
+ EnableTopn(on bool)
+ ClearTopn()
Start() error
Close()
- CheckExpiredData(buffer common.ExpiredDataBuffer, stop chan struct{}) error
+ GetBatchOperator() IBatchOperator
}
func NewStateMachine(opts *KVOptions, machineConfig MachineConfig, localID uint64,
- fullNS string, clusterInfo common.IClusterInfo, w wait.Wait) (StateMachine, error) {
+ fullNS string, clusterInfo common.IClusterInfo, w wait.Wait, sl *SlowLimiter) (StateMachine, error) {
if machineConfig.LearnerRole == "" {
if machineConfig.StateMachineType == "empty_sm" {
+ nodeLog.Infof("%v using empty sm for test", fullNS)
return &emptySM{w: w}, nil
}
- kvsm, err := NewKVStoreSM(opts, machineConfig, localID, fullNS, clusterInfo)
+ kvsm, err := NewKVStoreSM(opts, machineConfig, localID, fullNS, clusterInfo, sl)
if err != nil {
return nil, err
}
kvsm.w = w
return kvsm, err
- } else if machineConfig.LearnerRole == common.LearnerRoleLogSyncer {
+ } else if common.IsRoleLogSyncer(machineConfig.LearnerRole) {
lssm, err := NewLogSyncerSM(opts, machineConfig, localID, fullNS, clusterInfo)
if err != nil {
return nil, err
@@ -67,13 +89,149 @@ func NewStateMachine(opts *KVOptions, machineConfig MachineConfig, localID uint6
}
}
+type IBatchOperator interface {
+ SetBatched(bool)
+ IsBatched() bool
+ BeginBatch() error
+ AddBatchKey(string)
+ AddBatchRsp(uint64, interface{})
+ IsBatchable(string, string, [][]byte) bool
+ CommitBatch()
+ AbortBatchForError(err error)
+}
+
+type kvbatchOperator struct {
+ batchReqIDList []uint64
+ batchReqRspList []interface{}
+ batchStart time.Time
+ batching bool
+ dupCheckMap map[string]bool
+ kvsm *kvStoreSM
+}
+
+func (bo *kvbatchOperator) SetBatched(b bool) {
+ bo.batching = b
+ if b {
+ bo.batchStart = time.Now()
+ }
+}
+
+func (bo *kvbatchOperator) IsBatched() bool {
+ return bo.batching
+}
+
+func (bo *kvbatchOperator) BeginBatch() error {
+ err := bo.kvsm.store.BeginBatchWrite()
+ if err != nil {
+ return err
+ }
+ bo.SetBatched(true)
+ return nil
+}
+
+func (bo *kvbatchOperator) AddBatchKey(pk string) {
+ bo.dupCheckMap[string(pk)] = true
+}
+
+func (bo *kvbatchOperator) AddBatchRsp(reqID uint64, v interface{}) {
+ bo.batchReqIDList = append(bo.batchReqIDList, reqID)
+ bo.batchReqRspList = append(bo.batchReqRspList, v)
+}
+
+func (bo *kvbatchOperator) IsBatchable(cmdName string, pk string, args [][]byte) bool {
+ if cmdName == "del" && len(args) > 2 {
+ // del for multi keys, no batch
+ return false
+ }
+ _, ok := bo.dupCheckMap[string(pk)]
+ if rockredis.IsBatchableWrite(cmdName) &&
+ len(bo.batchReqIDList) < maxDBBatchCmdNum &&
+ !ok {
+ return true
+ }
+ return false
+}
+
+func (bo *kvbatchOperator) AbortBatchForError(err error) {
+ // we need clean write batch even no batched
+ bo.kvsm.store.AbortBatch()
+ if !bo.IsBatched() {
+ return
+ }
+ bo.SetBatched(false)
+ batchCost := time.Since(bo.batchStart)
+ // write the future response or error
+ for _, rid := range bo.batchReqIDList {
+ bo.kvsm.w.Trigger(rid, err)
+ }
+ slow.LogSlowDBWrite(batchCost, slow.NewSlowLogInfo(bo.kvsm.fullNS, "batched", strconv.Itoa(len(bo.batchReqIDList))))
+ bo.dupCheckMap = make(map[string]bool)
+ bo.batchReqIDList = bo.batchReqIDList[:0]
+ bo.batchReqRspList = bo.batchReqRspList[:0]
+}
+
+func (bo *kvbatchOperator) CommitBatch() {
+ if !bo.IsBatched() {
+ return
+ }
+ err := bo.kvsm.store.CommitBatchWrite()
+ bo.SetBatched(false)
+ batchCost := time.Since(bo.batchStart)
+ if nodeLog.Level() >= common.LOG_DETAIL && len(bo.batchReqIDList) > 1 {
+ bo.kvsm.Infof("batching command number: %v", len(bo.batchReqIDList))
+ }
+ // write the future response or error
+ for idx, rid := range bo.batchReqIDList {
+ if err != nil {
+ bo.kvsm.w.Trigger(rid, err)
+ } else {
+ bo.kvsm.w.Trigger(rid, bo.batchReqRspList[idx])
+ }
+ }
+ if len(bo.batchReqIDList) > 0 {
+ bk := "batched: "
+ // just use one of the batched keys as log
+ for k, _ := range bo.dupCheckMap {
+ bk += k + ","
+ break
+ }
+ slow.LogSlowDBWrite(batchCost, slow.NewSlowLogInfo(bo.kvsm.fullNS, bk, strconv.Itoa(len(bo.batchReqIDList))))
+ bo.kvsm.dbWriteStats.BatchUpdateLatencyStats(batchCost.Microseconds(), int64(len(bo.batchReqIDList)))
+ if batchCost >= time.Millisecond {
+ metric.DBWriteLatency.With(ps.Labels{
+ "namespace": bo.kvsm.fullNS,
+ }).Observe(float64(batchCost.Milliseconds()))
+ }
+ }
+ if len(bo.dupCheckMap) >= 10 {
+ bo.dupCheckMap = make(map[string]bool)
+ } else {
+ for k := range bo.dupCheckMap {
+ delete(bo.dupCheckMap, k)
+ }
+ }
+ bo.batchReqIDList = bo.batchReqIDList[:0]
+ bo.batchReqRspList = bo.batchReqRspList[:0]
+}
+
type emptySM struct {
w wait.Wait
}
-func (esm *emptySM) ApplyRaftRequest(isReplaying bool, reqList BatchInternalRaftRequest, term uint64, index uint64, stop chan struct{}) (bool, error) {
+func (esm *emptySM) ApplyRaftRequest(isReplaying bool, batch IBatchOperator, reqList BatchInternalRaftRequest, term uint64, index uint64, stop chan struct{}) (bool, error) {
+ ts := reqList.Timestamp
+ tn := time.Now()
+ if ts > 0 && !isReplaying {
+ cost := tn.UnixNano() - ts
+ if cost > raftSlow.Nanoseconds()*2 {
+ //nodeLog.Infof("receive raft requests in state machine slow cost: %v, %v, %v", reqList.ReqId, len(reqList.Reqs), cost)
+ }
+ }
for _, req := range reqList.Reqs {
reqID := req.Header.ID
+ if reqID == 0 {
+ reqID = reqList.ReqId
+ }
esm.w.Trigger(reqID, nil)
}
return false, nil
@@ -87,20 +245,42 @@ func (esm *emptySM) GetSnapshot(term uint64, index uint64) (*KVSnapInfo, error)
var s KVSnapInfo
return &s, nil
}
-func (esm *emptySM) RestoreFromSnapshot(startup bool, raftSnapshot raftpb.Snapshot, stop chan struct{}) error {
+
+func (esm *emptySM) UpdateSnapshotState(term uint64, index uint64) {
+}
+
+func (esm *emptySM) PrepareSnapshot(raftSnapshot raftpb.Snapshot, stop chan struct{}) error {
+ return nil
+}
+
+func (esm *emptySM) RestoreFromSnapshot(raftSnapshot raftpb.Snapshot, stop chan struct{}) error {
return nil
}
-func (esm *emptySM) Destroy() {
+func (esm *emptySM) GetBatchOperator() IBatchOperator {
+ return nil
}
+
+func (esm *emptySM) Destroy() {
+}
+
func (esm *emptySM) CleanData() error {
return nil
}
func (esm *emptySM) Optimize(t string) {
-
}
-func (esm *emptySM) GetStats() common.NamespaceStats {
- return common.NamespaceStats{}
+func (esm *emptySM) OptimizeExpire() {
+}
+func (esm *emptySM) OptimizeAnyRange(CompactAPIRange) {
+}
+func (esm *emptySM) DisableOptimize(bool) {
+}
+func (esm *emptySM) EnableTopn(on bool) {
+}
+func (esm *emptySM) ClearTopn() {
+}
+func (esm *emptySM) GetStats(table string, needDetail bool) metric.NamespaceStats {
+ return metric.NamespaceStats{}
}
func (esm *emptySM) Start() error {
return nil
@@ -119,15 +299,17 @@ type kvStoreSM struct {
fullNS string
machineConfig MachineConfig
ID uint64
- dbWriteStats common.WriteStats
+ dbWriteStats metric.WriteStats
w wait.Wait
router *common.SMCmdRouter
stopping int32
cRouter *conflictRouter
+ slowLimiter *SlowLimiter
+ topnWrites *metric.TopNHot
}
func NewKVStoreSM(opts *KVOptions, machineConfig MachineConfig, localID uint64, ns string,
- clusterInfo common.IClusterInfo) (*kvStoreSM, error) {
+ clusterInfo common.IClusterInfo, sl *SlowLimiter) (*kvStoreSM, error) {
store, err := NewKVStore(opts)
if err != nil {
return nil, err
@@ -140,6 +322,8 @@ func NewKVStoreSM(opts *KVOptions, machineConfig MachineConfig, localID uint64,
store: store,
router: common.NewSMCmdRouter(),
cRouter: NewConflictRouter(),
+ slowLimiter: sl,
+ topnWrites: metric.NewTopNHot(),
}
sm.registerHandlers()
sm.registerConflictHandlers()
@@ -172,9 +356,28 @@ func (kvsm *kvStoreSM) Close() {
kvsm.store.Close()
}
+func (kvsm *kvStoreSM) GetBatchOperator() IBatchOperator {
+ return &kvbatchOperator{
+ dupCheckMap: make(map[string]bool),
+ kvsm: kvsm,
+ }
+}
+
+func (kvsm *kvStoreSM) OptimizeExpire() {
+ kvsm.store.CompactOldExpireData()
+}
+
+func (kvsm *kvStoreSM) OptimizeAnyRange(r CompactAPIRange) {
+ kvsm.store.CompactRange(r.StartFrom, r.EndTo)
+}
+
+func (kvsm *kvStoreSM) DisableOptimize(disable bool) {
+ kvsm.store.DisableManualCompact(disable)
+}
+
func (kvsm *kvStoreSM) Optimize(table string) {
if table == "" {
- kvsm.store.CompactRange()
+ kvsm.store.CompactAllRange()
} else {
kvsm.store.CompactTableRange(table)
}
@@ -184,25 +387,50 @@ func (kvsm *kvStoreSM) GetDBInternalStats() string {
return kvsm.store.GetStatistics()
}
-func (kvsm *kvStoreSM) GetStats() common.NamespaceStats {
- tbs := kvsm.store.GetTables()
- var ns common.NamespaceStats
+func (kvsm *kvStoreSM) EnableTopn(on bool) {
+ if kvsm.topnWrites == nil {
+ return
+ }
+ kvsm.topnWrites.Enable(on)
+}
+
+func (kvsm *kvStoreSM) ClearTopn() {
+ if kvsm.topnWrites == nil {
+ return
+ }
+ kvsm.topnWrites.Clear()
+}
+
+func (kvsm *kvStoreSM) GetStats(table string, needDetail bool) metric.NamespaceStats {
+ var ns metric.NamespaceStats
ns.InternalStats = kvsm.store.GetInternalStatus()
+ ns.DBCompactStats = kvsm.store.GetCompactFilterStats()
ns.DBWriteStats = kvsm.dbWriteStats.Copy()
- diskUsages := kvsm.store.GetBTablesSizes(tbs)
- for i, t := range tbs {
- cnt, _ := kvsm.store.GetTableKeyCount(t)
- var ts common.TableStats
- ts.ApproximateKeyNum = kvsm.store.GetTableApproximateNumInRange(string(t), nil, nil)
- if cnt <= 0 {
- cnt = ts.ApproximateKeyNum
+ if needDetail || len(table) > 0 {
+ var tbs [][]byte
+ if len(table) > 0 {
+ tbs = [][]byte{[]byte(table)}
+ } else {
+ tbs = kvsm.store.GetTables()
+ }
+ diskUsages := kvsm.store.GetBTablesSizes(tbs)
+ for i, t := range tbs {
+ cnt, _ := kvsm.store.GetTableKeyCount(t)
+ var ts metric.TableStats
+ ts.ApproximateKeyNum = kvsm.store.GetTableApproximateNumInRange(string(t), nil, nil)
+ if cnt <= 0 {
+ cnt = ts.ApproximateKeyNum
+ }
+ ts.Name = string(t)
+ ts.KeyNum = cnt
+ ts.DiskBytesUsage = diskUsages[i]
+ ns.TStats = append(ns.TStats, ts)
}
- ts.Name = string(t)
- ts.KeyNum = cnt
- ts.DiskBytesUsage = diskUsages[i]
- ns.TStats = append(ns.TStats, ts)
+ if kvsm.topnWrites != nil {
+ ns.TopNWriteKeys = kvsm.topnWrites.GetTopNWrites()
+ }
+ ns.TopNLargeCollKeys = kvsm.store.GetTopLargeKeys()
}
-
return ns
}
@@ -214,8 +442,10 @@ func (kvsm *kvStoreSM) Destroy() {
kvsm.store.Destroy()
}
-func (kvsm *kvStoreSM) CheckExpiredData(buffer common.ExpiredDataBuffer, stop chan struct{}) error {
- return kvsm.store.CheckExpiredData(buffer, stop)
+func (kvsm *kvStoreSM) UpdateSnapshotState(term uint64, index uint64) {
+ if kvsm.store != nil {
+ kvsm.store.SetLatestSnapIndex(index)
+ }
}
func (kvsm *kvStoreSM) GetSnapshot(term uint64, index uint64) (*KVSnapInfo, error) {
@@ -230,14 +460,47 @@ func (kvsm *kvStoreSM) GetSnapshot(term uint64, index uint64) (*KVSnapInfo, erro
}
func checkLocalBackup(store *KVStore, rs raftpb.Snapshot) (bool, error) {
- var si KVSnapInfo
- err := json.Unmarshal(rs.Data, &si)
- if err != nil {
- return false, err
- }
return store.IsLocalBackupOK(rs.Metadata.Term, rs.Metadata.Index)
}
+func handleReuseOldCheckpoint(srcInfo string, localPath string, term uint64, index uint64, skipReuseN int) (string, string) {
+ newPath := path.Join(localPath, rockredis.GetCheckpointDir(term, index))
+ reused := ""
+
+ latest := rockredis.GetLatestCheckpoint(localPath, skipReuseN, func(dir string) bool {
+ // reuse last synced to speed up rsync
+ // check if source node info is matched current snapshot source
+ d, _ := ioutil.ReadFile(path.Join(dir, "source_node_info"))
+ if d != nil && string(d) == srcInfo {
+ return true
+ } else if dir == newPath {
+ // it has the same dir with the transferring snap but with the different node info,
+ // we should clean the dir to avoid reuse the data from different node
+ os.RemoveAll(dir)
+ nodeLog.Infof("clean old path: %v since node info mismatch and same with new", dir)
+ }
+ return false
+ })
+ if latest != "" && latest != newPath {
+ nodeLog.Infof("transfer reuse old path: %v to new: %v", latest, newPath)
+ reused = latest
+ // we use hard link to avoid change the old stable checkpoint files which should keep unchanged in case of
+ // crashed during new checkpoint transferring
+ files, _ := filepath.Glob(path.Join(latest, "*.sst"))
+ for _, fn := range files {
+ nfn := path.Join(newPath, filepath.Base(fn))
+ nodeLog.Infof("hard link for: %v, %v", fn, nfn)
+ common.CopyFileForHardLink(fn, nfn)
+ }
+ }
+ return reused, newPath
+}
+
+func postFileSync(newPath string, srcInfo string) {
+ // write source node info to allow reuse next time
+ ioutil.WriteFile(path.Join(newPath, "source_node_info"), []byte(srcInfo), common.FILE_PERM)
+}
+
func prepareSnapshotForStore(store *KVStore, machineConfig MachineConfig,
clusterInfo common.IClusterInfo, fullNS string,
localID uint64, stopChan chan struct{},
@@ -252,21 +515,33 @@ func prepareSnapshotForStore(store *KVStore, machineConfig MachineConfig,
}
syncAddr, syncDir := GetValidBackupInfo(machineConfig, clusterInfo, fullNS, localID, stopChan, raftSnapshot, retry, false)
if syncAddr == "" && syncDir == "" {
- return errors.New("no backup available from others")
+ return errNobackupAvailable
}
select {
case <-stopChan:
return common.ErrStopped
default:
}
+ localPath := store.GetBackupDir()
+ srcInfo := syncAddr + syncDir
+ srcPath := path.Join(rockredis.GetBackupDir(syncDir),
+ rockredis.GetCheckpointDir(raftSnapshot.Metadata.Term, raftSnapshot.Metadata.Index))
+
+ // since most backup on local is not transferred by others,
+ // if we need reuse we need check all backups that has source node info,
+ // and skip the latest snap file in snap dir.
+ _, newPath := handleReuseOldCheckpoint(srcInfo, localPath,
+ raftSnapshot.Metadata.Term, raftSnapshot.Metadata.Index,
+ 0)
+
// copy backup data from the remote leader node, and recovery backup from it
// if local has some old backup data, we should use rsync to sync the data file
// use the rocksdb backup/checkpoint interface to backup data
err := common.RunFileSync(syncAddr,
- path.Join(rockredis.GetBackupDir(syncDir),
- rockredis.GetCheckpointDir(raftSnapshot.Metadata.Term, raftSnapshot.Metadata.Index)),
- store.GetBackupDir(), stopChan)
+ srcPath,
+ localPath, stopChan)
+ postFileSync(newPath, srcInfo)
return err
}
@@ -311,7 +586,10 @@ func GetValidBackupInfo(machineConfig MachineConfig,
uri := "http://" + ssi.RemoteAddr + ":" +
ssi.HttpAPIPort + common.APICheckBackup + "/" + fullNS
- sc, err := common.APIRequest("GET", uri, bytes.NewBuffer(body), time.Second*3, nil)
+ // check may use long time, so we need use large timeout here, some slow disk
+ // may cause 10 min to check
+ to := time.Second * time.Duration(common.GetIntDynamicConf(common.ConfCheckSnapTimeout))
+ sc, err := common.APIRequest("GET", uri, bytes.NewBuffer(body), to, nil)
if err != nil {
nodeLog.Infof("request %v error: %v", uri, err)
continue
@@ -348,21 +626,25 @@ func GetValidBackupInfo(machineConfig MachineConfig,
return syncAddr, syncDir
}
-func (kvsm *kvStoreSM) RestoreFromSnapshot(startup bool, raftSnapshot raftpb.Snapshot, stop chan struct{}) error {
+func (kvsm *kvStoreSM) PrepareSnapshot(raftSnapshot raftpb.Snapshot, stop chan struct{}) error {
// while startup we can use the local snapshot to restart,
// but while running, we should install the leader's snapshot,
// so we need remove local and sync from leader
retry := 0
+ var finalErr error
for retry < 3 {
err := prepareSnapshotForStore(kvsm.store, kvsm.machineConfig, kvsm.clusterInfo, kvsm.fullNS,
kvsm.ID, stop, raftSnapshot, retry)
if err != nil {
kvsm.Infof("failed to prepare snapshot: %v", err)
- } else {
- err = kvsm.store.Restore(raftSnapshot.Metadata.Term, raftSnapshot.Metadata.Index)
- if err == nil {
- return nil
+ if err == common.ErrTransferOutofdate ||
+ err == common.ErrRsyncFailed ||
+ err == common.ErrStopped {
+ return err
}
+ finalErr = err
+ } else {
+ return nil
}
retry++
kvsm.Infof("failed to restore snapshot: %v", err)
@@ -372,42 +654,57 @@ func (kvsm *kvStoreSM) RestoreFromSnapshot(startup bool, raftSnapshot raftpb.Sna
case <-time.After(time.Second):
}
}
- return errors.New("failed to restore from snapshot")
+ if finalErr == errNobackupAvailable {
+ kvsm.Infof("failed to restore snapshot at startup since no any backup from anyware")
+ return finalErr
+ }
+ return finalErr
+}
+
+func (kvsm *kvStoreSM) RestoreFromSnapshot(raftSnapshot raftpb.Snapshot, stop chan struct{}) error {
+ if enableSnapApplyRestoreStorageTest {
+ return errors.New("failed to restore from snapshot in failed test")
+ }
+ return kvsm.store.Restore(raftSnapshot.Metadata.Term, raftSnapshot.Metadata.Index)
}
func (kvsm *kvStoreSM) ApplyRaftConfRequest(req raftpb.ConfChange, term uint64, index uint64, stop chan struct{}) error {
return nil
}
-func (kvsm *kvStoreSM) preCheckConflict(cmd redcon.Command, reqTs int64) bool {
+func (kvsm *kvStoreSM) preCheckConflict(cmd redcon.Command, reqTs int64) ConflictState {
cmdName := strings.ToLower(string(cmd.Args[0]))
h, ok := kvsm.cRouter.GetHandler(cmdName)
if !ok {
- return true
+ return Conflict
}
return h(cmd, reqTs)
}
-func (kvsm *kvStoreSM) ApplyRaftRequest(isReplaying bool, reqList BatchInternalRaftRequest, term uint64, index uint64, stop chan struct{}) (bool, error) {
+func (kvsm *kvStoreSM) ApplyRaftRequest(isReplaying bool, batch IBatchOperator, reqList BatchInternalRaftRequest, term uint64, index uint64, stop chan struct{}) (bool, error) {
forceBackup := false
start := time.Now()
- batching := false
- var batchReqIDList []uint64
- var batchReqRspList []interface{}
- var batchStart time.Time
- dupCheckMap := make(map[string]bool, len(reqList.Reqs))
- lastBatchCmd := ""
ts := reqList.Timestamp
if reqList.Type == FromClusterSyncer {
if nodeLog.Level() >= common.LOG_DETAIL {
kvsm.Debugf("recv write from cluster syncer at (%v-%v): %v", term, index, reqList.String())
}
- // TODO: here we need compare the key timestamp in this cluster and the timestamp from raft request to handle
- // the conflict change between two cluster.
- //
}
+ if ts > 0 {
+ cost := start.UnixNano() - ts
+ if cost > raftSlow.Nanoseconds()/2 && nodeLog.Level() >= common.LOG_DETAIL {
+ kvsm.Debugf("receive raft requests in state machine slow cost: %v, %v", len(reqList.Reqs), cost)
+ }
+ if cost >= time.Millisecond.Nanoseconds() {
+ metric.RaftWriteLatency.With(ps.Labels{
+ "namespace": kvsm.fullNS,
+ "step": "raft_commit_sm_received",
+ }).Observe(float64(cost / time.Millisecond.Nanoseconds()))
+ }
+ }
+ // TODO: maybe we can merge the same write with same key and value to avoid too much hot write on the same key-value
var retErr error
- for reqIndex, req := range reqList.Reqs {
+ for _, req := range reqList.Reqs {
reqTs := ts
if reqTs == 0 {
reqTs = req.Header.Timestamp
@@ -416,105 +713,110 @@ func (kvsm *kvStoreSM) ApplyRaftRequest(isReplaying bool, reqList BatchInternalR
if reqID == 0 {
reqID = reqList.ReqId
}
- if req.Header.DataType == int32(RedisReq) {
+ if req.Header.DataType == int32(RedisReq) || req.Header.DataType == int32(RedisV2Req) {
cmd, err := redcon.Parse(req.Data)
if err != nil {
kvsm.w.Trigger(reqID, err)
} else {
+ if req.Header.DataType == int32(RedisV2Req) {
+ // the old redis request cut before propose, the new redis v2 keep the namespace in raft proposal
+ key, _ := common.CutNamesapce(cmd.Args[1])
+ cmd.Args[1] = key
+ }
+ // we need compare the key timestamp in this cluster and the timestamp from raft request to handle
+ // the conflict change between two cluster.
+ //
if !isReplaying && reqList.Type == FromClusterSyncer && !IsSyncerOnly() {
// syncer only no need check conflict since it will be no write from redis api
conflict := kvsm.preCheckConflict(cmd, reqTs)
- if conflict {
+ if conflict == Conflict {
kvsm.Infof("conflict sync: %v, %v, %v", string(cmd.Raw), req.String(), reqTs)
+ // just ignore sync, should not return error because the syncer will block retrying for error sync
+ kvsm.w.Trigger(reqID, nil)
+ metric.EventCnt.With(ps.Labels{
+ "namespace": kvsm.fullNS,
+ "event_name": "cluster_syncer_conflicted",
+ }).Inc()
+ continue
+ }
+ if (reqTs > GetSyncedOnlyChangedTs() || conflict == MaybeConflict) && !MaybeConflictLogDisabled() {
+ // maybe unconsistence if write on slave after cluster switched,
+ // so we need log write here to know what writes are synced after we
+ // became the master cluster.
+ kvsm.Infof("write from syncer after syncer state changed, conflict state:%v: %v, %v, %v", conflict, string(cmd.Raw), req.String(), reqTs)
}
}
cmdStart := time.Now()
cmdName := strings.ToLower(string(cmd.Args[0]))
- _, pk, _ := common.ExtractNamesapce(cmd.Args[1])
- _, ok := dupCheckMap[string(pk)]
- handled := false
- if rockredis.IsBatchableWrite(cmdName) &&
- len(batchReqIDList) < maxDBBatchCmdNum &&
- !ok {
- if !batching {
- err := kvsm.store.BeginBatchWrite()
+ pk := cmd.Args[1]
+ if batch.IsBatchable(cmdName, string(pk), cmd.Args) {
+ if !batch.IsBatched() {
+ err := batch.BeginBatch()
if err != nil {
kvsm.Infof("begin batch command %v failed: %v, %v", cmdName, string(cmd.Raw), err)
kvsm.w.Trigger(reqID, err)
continue
}
- batchStart = time.Now()
- batching = true
}
- handled = true
- lastBatchCmd = cmdName
- h, ok := kvsm.router.GetInternalCmdHandler(cmdName)
- if !ok {
- kvsm.Infof("unsupported redis command: %v", cmdName)
- kvsm.w.Trigger(reqID, common.ErrInvalidCommand)
- } else {
- if pk != nil {
- dupCheckMap[string(pk)] = true
- }
- v, err := h(cmd, reqTs)
- if err != nil {
- kvsm.Infof("redis command %v error: %v, cmd: %v", cmdName, err, cmd)
- kvsm.w.Trigger(reqID, err)
- continue
- }
- if nodeLog.Level() > common.LOG_DETAIL {
- kvsm.Infof("batching write command: %v", string(cmd.Raw))
- }
- batchReqIDList = append(batchReqIDList, reqID)
- batchReqRspList = append(batchReqRspList, v)
- kvsm.dbWriteStats.UpdateSizeStats(int64(len(cmd.Raw)))
- }
- if nodeLog.Level() > common.LOG_DETAIL {
- kvsm.Infof("batching redis command: %v", cmdName)
- }
- if reqIndex < len(reqList.Reqs)-1 {
- continue
- }
- }
- if batching {
- batching = false
- batchReqIDList, batchReqRspList, dupCheckMap = kvsm.processBatching(lastBatchCmd, reqList, batchStart,
- batchReqIDList, batchReqRspList, dupCheckMap)
- }
- if handled {
- continue
+ } else {
+ batch.CommitBatch()
}
-
h, ok := kvsm.router.GetInternalCmdHandler(cmdName)
if !ok {
- kvsm.Infof("unsupported redis command: %v", cmd)
+ kvsm.Infof("unsupported redis command: %v", cmdName)
kvsm.w.Trigger(reqID, common.ErrInvalidCommand)
} else {
- v, err := h(cmd, reqTs)
- cmdCost := time.Since(cmdStart)
- if cmdCost > dbWriteSlow || nodeLog.Level() > common.LOG_DETAIL ||
- (nodeLog.Level() >= common.LOG_DEBUG && cmdCost > dbWriteSlow/2) {
- kvsm.Infof("slow write command: %v, cost: %v", string(cmd.Raw), cmdCost)
+ if pk != nil && batch.IsBatched() {
+ batch.AddBatchKey(string(pk))
}
-
- kvsm.dbWriteStats.UpdateWriteStats(int64(len(cmd.Raw)), cmdCost.Nanoseconds()/1000)
- // write the future response or error
+ if kvsm.topnWrites != nil {
+ kvsm.topnWrites.HitWrite(pk)
+ }
+ v, err := h(cmd, reqTs)
if err != nil {
- kvsm.Infof("redis command %v error: %v, cmd: %v", cmdName, err, string(cmd.Raw))
+ kvsm.Errorf("redis command %v error: %v, cmd: %v", cmdName, err, string(cmd.Raw))
kvsm.w.Trigger(reqID, err)
+ if isUnrecoveryError(err) {
+ panic(err)
+ }
+ if rockredis.IsNeedAbortError(err) {
+ batch.AbortBatchForError(err)
+ }
} else {
- kvsm.w.Trigger(reqID, v)
+ if len(cmd.Raw) >= 100 {
+ metric.WriteByteSize.With(ps.Labels{
+ "namespace": kvsm.fullNS,
+ }).Observe(float64(len(cmd.Raw)))
+ }
+ if batch.IsBatched() {
+ batch.AddBatchRsp(reqID, v)
+ if nodeLog.Level() > common.LOG_DETAIL {
+ kvsm.Infof("batching write command:%v, %v", cmdName, string(cmd.Raw))
+ }
+ kvsm.dbWriteStats.UpdateSizeStats(int64(len(cmd.Raw)))
+ } else {
+ kvsm.w.Trigger(reqID, v)
+ cmdCost := time.Since(cmdStart)
+ slow.LogSlowDBWrite(cmdCost, slow.NewSlowLogInfo(kvsm.fullNS, string(cmd.Raw), ""))
+ kvsm.dbWriteStats.UpdateWriteStats(int64(len(cmd.Raw)), cmdCost.Microseconds())
+ if cmdCost >= time.Millisecond {
+ metric.DBWriteLatency.With(ps.Labels{
+ "namespace": kvsm.fullNS,
+ }).Observe(float64(cmdCost.Milliseconds()))
+ }
+ if kvsm.slowLimiter != nil {
+ table, _, _ := common.ExtractTable(pk)
+ kvsm.slowLimiter.RecordSlowCmd(cmdName, string(table), cmdCost)
+ }
+ }
}
}
}
} else {
- if batching {
- batching = false
- batchReqIDList, batchReqRspList, dupCheckMap = kvsm.processBatching(lastBatchCmd, reqList, batchStart,
- batchReqIDList, batchReqRspList, dupCheckMap)
- }
+ batch.CommitBatch()
+
if req.Header.DataType == int32(CustomReq) {
- forceBackup, retErr = kvsm.handleCustomRequest(req, reqID)
+ forceBackup, retErr = kvsm.handleCustomRequest(reqList.Type == FromClusterSyncer, &req, reqID, stop)
} else if req.Header.DataType == int32(SchemaChangeReq) {
kvsm.Infof("handle schema change: %v", string(req.Data))
var sc SchemaChange
@@ -532,20 +834,23 @@ func (kvsm *kvStoreSM) ApplyRaftRequest(isReplaying bool, reqList BatchInternalR
}
}
// TODO: add test case for this
- if batching {
- kvsm.processBatching(lastBatchCmd, reqList, batchStart,
- batchReqIDList, batchReqRspList, dupCheckMap)
+ if reqList.ReqId > 0 {
+ // reqid only be used for cluster sync grpc.
+ // we commit here to allow grpc get notify earlier.
+ batch.CommitBatch()
}
- for _, req := range reqList.Reqs {
- if kvsm.w.IsRegistered(req.Header.ID) {
- kvsm.Infof("missing process request: %v", req.String())
- kvsm.w.Trigger(req.Header.ID, errUnknownData)
+ if !batch.IsBatched() {
+ for _, req := range reqList.Reqs {
+ if kvsm.w.IsRegistered(req.Header.ID) {
+ kvsm.Infof("missing process request: %v", req.String())
+ kvsm.w.Trigger(req.Header.ID, errUnknownData)
+ }
}
}
cost := time.Since(start)
- if cost >= time.Second {
- kvsm.Infof("slow for batch write db: %v, cost %v", len(reqList.Reqs), cost)
+ if cost >= raftSlow {
+ slow.LogSlowDBWrite(cost, slow.NewSlowLogInfo(kvsm.fullNS, "batched", strconv.Itoa(len(reqList.Reqs))))
}
// used for grpc raft proposal, will notify that all the raft logs in this batch is done.
if reqList.ReqId > 0 {
@@ -554,7 +859,7 @@ func (kvsm *kvStoreSM) ApplyRaftRequest(isReplaying bool, reqList BatchInternalR
return forceBackup, retErr
}
-func (kvsm *kvStoreSM) handleCustomRequest(req *InternalRaftRequest, reqID uint64) (bool, error) {
+func (kvsm *kvStoreSM) handleCustomRequest(fromClusterSyncer bool, req *InternalRaftRequest, reqID uint64, stop chan struct{}) (bool, error) {
var p customProposeData
var forceBackup bool
var retErr error
@@ -574,7 +879,11 @@ func (kvsm *kvStoreSM) handleCustomRequest(req *InternalRaftRequest, reqID uint6
if err != nil {
kvsm.Infof("invalid delete table range data: %v", string(p.Data))
} else {
- err = kvsm.store.DeleteTableRange(dr.Dryrun, dr.Table, dr.StartFrom, dr.EndTo)
+ if dr.NoReplayToRemoteCluster && fromClusterSyncer {
+ kvsm.Infof("ignore delete table range since noreplay: %v, %v", string(p.Data), dr)
+ } else {
+ err = kvsm.store.DeleteTableRange(dr.Dryrun, dr.Table, dr.StartFrom, dr.EndTo)
+ }
}
kvsm.w.Trigger(reqID, err)
} else if p.ProposeOp == ProposeOp_RemoteConfChange {
@@ -582,76 +891,53 @@ func (kvsm *kvStoreSM) handleCustomRequest(req *InternalRaftRequest, reqID uint6
cc.Unmarshal(p.Data)
kvsm.Infof("remote config changed: %v, %v ", p, cc.String())
kvsm.w.Trigger(reqID, nil)
+ if kvsm.topnWrites != nil {
+ kvsm.topnWrites.Clear()
+ }
} else if p.ProposeOp == ProposeOp_TransferRemoteSnap {
- localPath := path.Join(kvsm.store.GetBackupDir(), "remote")
+ localPath := kvsm.store.GetBackupDirForRemote()
kvsm.Infof("transfer remote snap request: %v to local: %v", p, localPath)
retErr = errRemoteSnapTransferFailed
err := os.MkdirAll(localPath, common.DIR_PERM)
+ if !IsSyncerOnly() {
+ err = retErr
+ kvsm.Errorf("invalid state for remote snap request: %v to local: %v", p, localPath)
+ }
// trigger early to allow client api return quickly
// the transfer status already be saved.
kvsm.w.Trigger(reqID, err)
if err == nil {
- // how to make sure the client is not timeout while transferring
- err = common.RunFileSync(p.SyncAddr,
- path.Join(rockredis.GetBackupDir(p.SyncPath),
- rockredis.GetCheckpointDir(p.RemoteTerm, p.RemoteIndex)),
- localPath, nil,
- )
+ srcInfo := p.SyncAddr + p.SyncPath
+ srcPath := path.Join(rockredis.GetBackupDir(p.SyncPath),
+ rockredis.GetCheckpointDir(p.RemoteTerm, p.RemoteIndex))
+
+ _, newPath := handleReuseOldCheckpoint(srcInfo, localPath, p.RemoteTerm, p.RemoteIndex, 0)
+
+ if common.IsConfSetted(common.ConfIgnoreRemoteFileSync) {
+ err = nil
+ } else {
+ err = common.RunFileSync(p.SyncAddr,
+ srcPath,
+ localPath, stop,
+ )
+ postFileSync(newPath, srcInfo)
+ }
if err != nil {
kvsm.Infof("transfer remote snap request: %v to local: %v failed: %v", p, localPath, err)
} else {
- // TODO: check the transferred snapshot file
- //rockredis.IsLocalBackupOK()
retErr = nil
}
}
} else if p.ProposeOp == ProposeOp_ApplyRemoteSnap {
kvsm.Infof("begin apply remote snap : %v", p)
retErr = errIgnoredRemoteApply
- // check if there is the same term-index backup on local
- // if not, we can just rename remote snap to this name.
- // if already exist, we need handle rename
- backupDir := kvsm.store.GetBackupDir()
- checkpointDir := rockredis.GetCheckpointDir(p.RemoteTerm, p.RemoteIndex)
- fullPath := path.Join(backupDir, checkpointDir)
- remotePath := path.Join(backupDir, "remote", checkpointDir)
- tmpLocalPath := path.Join(fullPath, "tmplocal")
- _, err := os.Stat(remotePath)
- if err != nil {
- kvsm.Infof("apply remote snap %v failed since backup data error: %v", p, err)
- kvsm.w.Trigger(reqID, err)
- return forceBackup, retErr
- }
- oldOK, err := kvsm.store.IsLocalBackupOK(p.RemoteTerm, p.RemoteIndex)
- if oldOK {
- err = os.Rename(fullPath, tmpLocalPath)
- if err != nil {
- kvsm.Infof("apply remote snap %v failed to rename path : %v", p, err)
- kvsm.w.Trigger(reqID, err)
- return forceBackup, retErr
- }
- defer os.Rename(tmpLocalPath, fullPath)
- }
- err = os.Rename(remotePath, fullPath)
- if err != nil {
- kvsm.Infof("apply remote snap %v failed : %v", p, err)
- kvsm.w.Trigger(reqID, err)
- return forceBackup, retErr
- }
- defer os.Rename(fullPath, remotePath)
- newOK, err := kvsm.store.IsLocalBackupOK(p.RemoteTerm, p.RemoteIndex)
- if err != nil || !newOK {
- kvsm.Errorf("apply remote snap failed since remote backup is not ok: %v", err)
- kvsm.w.Trigger(reqID, err)
- return forceBackup, retErr
- }
- err = kvsm.store.Restore(p.RemoteTerm, p.RemoteIndex)
+ err := kvsm.store.RestoreFromRemoteBackup(p.RemoteTerm, p.RemoteIndex)
kvsm.w.Trigger(reqID, err)
if err != nil {
- kvsm.Errorf("apply remote snap failed to restore backup: %v", err)
+ kvsm.Infof("apply remote snap %v failed : %v", p, err)
} else {
- forceBackup = true
retErr = nil
+ forceBackup = true
}
} else if p.ProposeOp == ProposeOp_ApplySkippedRemoteSnap {
kvsm.Infof("apply remote skip snap %v ", p)
@@ -661,33 +947,3 @@ func (kvsm *kvStoreSM) handleCustomRequest(req *InternalRaftRequest, reqID uint6
}
return forceBackup, retErr
}
-
-// return if configure changed and whether need force backup
-func (kvsm *kvStoreSM) processBatching(cmdName string, reqList BatchInternalRaftRequest, batchStart time.Time, batchReqIDList []uint64, batchReqRspList []interface{},
- dupCheckMap map[string]bool) ([]uint64, []interface{}, map[string]bool) {
-
- err := kvsm.store.CommitBatchWrite()
- dupCheckMap = make(map[string]bool, len(reqList.Reqs))
- batchCost := time.Since(batchStart)
- if nodeLog.Level() > common.LOG_DETAIL {
- kvsm.Infof("batching command number: %v", len(batchReqIDList))
- }
- // write the future response or error
- for idx, rid := range batchReqIDList {
- if err != nil {
- kvsm.w.Trigger(rid, err)
- } else {
- kvsm.w.Trigger(rid, batchReqRspList[idx])
- }
- }
- if batchCost > dbWriteSlow || (nodeLog.Level() >= common.LOG_DEBUG && batchCost > dbWriteSlow/2) {
- kvsm.Infof("slow batch write db, command: %v, batch: %v, cost: %v",
- cmdName, len(batchReqIDList), batchCost)
- }
- if len(batchReqIDList) > 0 {
- kvsm.dbWriteStats.BatchUpdateLatencyStats(batchCost.Nanoseconds()/1000, int64(len(batchReqIDList)))
- }
- batchReqIDList = batchReqIDList[:0]
- batchReqRspList = batchReqRspList[:0]
- return batchReqIDList, batchReqRspList, dupCheckMap
-}
diff --git a/node/state_machine_test.go b/node/state_machine_test.go
new file mode 100644
index 00000000..254b0899
--- /dev/null
+++ b/node/state_machine_test.go
@@ -0,0 +1,104 @@
+package node
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_handleReuseOldCheckpoint(t *testing.T) {
+ tmpDir, err := ioutil.TempDir("", fmt.Sprintf("sm-test-%d", time.Now().UnixNano()))
+ assert.Nil(t, err)
+ defer os.RemoveAll(tmpDir)
+ t.Logf("dir:%v\n", tmpDir)
+ terms := []uint64{
+ 0x0001,
+ 0x0002,
+ 0x0003,
+ 0x011a,
+ 0x021a,
+ 0x031a,
+ }
+ indexs := []uint64{
+ 0x01000,
+ 0x02000,
+ 0x03000,
+ 0x0a000,
+ 0x0b000,
+ 0x0c000,
+ }
+ cntIdx := 10
+ type args struct {
+ srcInfo string
+ localPath string
+ term uint64
+ index uint64
+ skipReuseN int
+ }
+ destPath1 := path.Join(tmpDir, fmt.Sprintf("%016x-%016x", 1, 1))
+ destPath2 := path.Join(tmpDir, fmt.Sprintf("%016x-%016x", 2, 2))
+ lastPath := path.Join(tmpDir, fmt.Sprintf("%04x-%05x", terms[len(terms)-1], int(indexs[len(indexs)-1])+cntIdx-1))
+ firstPath := path.Join(tmpDir, fmt.Sprintf("%04x-%05x", terms[0], indexs[0]))
+ mid1Path := path.Join(tmpDir, fmt.Sprintf("%04x-%05x", terms[len(terms)-1], int(indexs[len(indexs)-1])+cntIdx/2))
+ mid2Path := path.Join(tmpDir, fmt.Sprintf("%04x-%05x", terms[len(terms)-2], int(indexs[len(indexs)-2])+cntIdx/2))
+ mid3Path := path.Join(tmpDir, fmt.Sprintf("%04x-%05x", terms[len(terms)-3], int(indexs[len(indexs)-3])+cntIdx/2))
+ tests := []struct {
+ name string
+ args args
+ want string
+ want1 string
+ }{
+ {"src match in first", args{"matchfirst", tmpDir, 1, 1, 0}, lastPath, destPath1},
+ {"src match in last", args{"matchlast", tmpDir, 1, 1, 0}, firstPath, destPath1},
+ {"src not match", args{"notmatch", tmpDir, 1, 1, 0}, "", destPath1},
+ {"src match in middle", args{"matchmiddle", tmpDir, 1, 1, 0}, mid1Path, destPath1},
+ {"src match in same with old", args{"matchfirst", tmpDir, 0x01ba, 0x02ab46c3, 0}, lastPath, path.Join(tmpDir, "00000000000001ba-0000000002ab46c3")},
+ {"src match first skip 1", args{"matchfirst", tmpDir, 2, 2, 1}, "", destPath2},
+ {"src match last skip 1", args{"matchlast", tmpDir, 2, 2, 1}, "", destPath2},
+ {"src match middle skip 1", args{"matchmiddle", tmpDir, 2, 2, 1}, mid2Path, destPath2},
+ {"src match middle skip 2", args{"matchmiddle", tmpDir, 2, 2, 2}, mid3Path, destPath2},
+ {"src match middle skip all", args{"matchmiddle", tmpDir, 2, 2, len(terms)}, "", destPath2},
+ }
+
+ // source info
+ for i := 0; i < len(terms); i++ {
+ for j := 0; j < cntIdx; j++ {
+ idx := indexs[i] + uint64(j)
+ p := path.Join(tmpDir, fmt.Sprintf("%04x-%05x", terms[i], idx))
+ err := os.MkdirAll(p, 0777)
+ assert.Nil(t, err)
+ if i == 0 && j == 0 {
+ postFileSync(p, "matchlast")
+ t.Logf("gen: %v, %v", p, "matchlast")
+ } else if j == cntIdx/2 {
+ postFileSync(p, "matchmiddle")
+ t.Logf("gen: %v, %v", p, "matchmiddle")
+ } else if i == len(terms)-1 && j == cntIdx-1 {
+ postFileSync(p, "matchfirst")
+ t.Logf("gen: %v, %v", p, "matchfirst")
+ } else {
+ t.Logf("gen: %v, no src info", p)
+ }
+ }
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, got1 := handleReuseOldCheckpoint(tt.args.srcInfo, tt.args.localPath, tt.args.term, tt.args.index, tt.args.skipReuseN)
+ if got != tt.want {
+ t.Errorf("handleReuseOldCheckpoint() got = %v, want %v", got, tt.want)
+ }
+ if got1 != tt.want1 {
+ t.Errorf("handleReuseOldCheckpoint() got1 = %v, want %v", got1, tt.want1)
+ }
+ // rename back to normal to test next
+ if got != "" {
+ os.Rename(got1, got)
+ }
+ })
+ }
+}
diff --git a/node/syncer_learner.go b/node/syncer_learner.go
index 6d460f21..941902df 100644
--- a/node/syncer_learner.go
+++ b/node/syncer_learner.go
@@ -4,13 +4,16 @@ import (
"encoding/json"
"errors"
"fmt"
+ "math/rand"
"sync"
"sync/atomic"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/pkg/wait"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/metric"
+ "github.com/youzan/ZanRedisDB/pkg/wait"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/syncerpb"
)
var enableTest = false
@@ -20,17 +23,27 @@ func EnableForTest() {
}
const (
- logSendBufferLen = 64
+ logSendBufferLen = 512
)
-var syncerNormalInit = false
+var syncerNormalInit int32
-func SetSyncerNormalInit() {
- syncerNormalInit = true
+var remoteSnapRecoverCnt int64
+
+func SetSyncerNormalInit(enable bool) {
+ if enable {
+ atomic.StoreInt32(&syncerNormalInit, 1)
+ } else {
+ atomic.StoreInt32(&syncerNormalInit, 0)
+ }
+}
+
+func IsSyncerNormalInit() bool {
+ return atomic.LoadInt32(&syncerNormalInit) == 1
}
-var syncLearnerRecvStats common.WriteStats
-var syncLearnerDoneStats common.WriteStats
+var syncLearnerRecvStats metric.WriteStats
+var syncLearnerDoneStats metric.WriteStats
type logSyncerSM struct {
clusterInfo common.IClusterInfo
@@ -42,7 +55,7 @@ type logSyncerSM struct {
syncedState SyncedState
lgSender *RemoteLogSender
stopping int32
- sendCh chan *BatchInternalRaftRequest
+ sendCh chan BatchInternalRaftRequest
sendStop chan struct{}
wg sync.WaitGroup
waitSendLogChs chan chan struct{}
@@ -59,7 +72,7 @@ func NewLogSyncerSM(opts *KVOptions, machineConfig MachineConfig, localID uint64
machineConfig: machineConfig,
ID: localID,
clusterInfo: clusterInfo,
- sendCh: make(chan *BatchInternalRaftRequest, logSendBufferLen),
+ sendCh: make(chan BatchInternalRaftRequest, logSendBufferLen),
sendStop: make(chan struct{}),
waitSendLogChs: make(chan chan struct{}, 1),
//dataDir: path.Join(opts.DataDir, "logsyncer"),
@@ -94,18 +107,28 @@ func (sm *logSyncerSM) Errorf(f string, args ...interface{}) {
func (sm *logSyncerSM) Optimize(t string) {
}
+func (sm *logSyncerSM) OptimizeAnyRange(CompactAPIRange) {
+}
+func (sm *logSyncerSM) DisableOptimize(bool) {
+}
+func (sm *logSyncerSM) OptimizeExpire() {
+}
+func (sm *logSyncerSM) ClearTopn() {
+}
+func (sm *logSyncerSM) EnableTopn(on bool) {
+}
func (sm *logSyncerSM) GetDBInternalStats() string {
return ""
}
-func GetLogLatencyStats() (*common.WriteStats, *common.WriteStats) {
+func GetLogLatencyStats() (*metric.WriteStats, *metric.WriteStats) {
return syncLearnerRecvStats.Copy(), syncLearnerDoneStats.Copy()
}
-func (sm *logSyncerSM) GetLogSyncStats() (common.LogSyncStats, common.LogSyncStats) {
- var recvStats common.LogSyncStats
- var syncStats common.LogSyncStats
+func (sm *logSyncerSM) GetLogSyncStats() (metric.LogSyncStats, metric.LogSyncStats) {
+ var recvStats metric.LogSyncStats
+ var syncStats metric.LogSyncStats
syncStats.Name = sm.fullNS
syncStats.Term, syncStats.Index, syncStats.Timestamp = sm.getSyncedState()
recvStats.Term = atomic.LoadUint64(&sm.receivedState.SyncedTerm)
@@ -115,10 +138,10 @@ func (sm *logSyncerSM) GetLogSyncStats() (common.LogSyncStats, common.LogSyncSta
return recvStats, syncStats
}
-func (sm *logSyncerSM) GetStats() common.NamespaceStats {
- var ns common.NamespaceStats
+func (sm *logSyncerSM) GetStats(table string, needTableDetail bool) metric.NamespaceStats {
+ var ns metric.NamespaceStats
stat := make(map[string]interface{})
- stat["role"] = common.LearnerRoleLogSyncer
+ stat["role"] = sm.machineConfig.LearnerRole
stat["synced"] = atomic.LoadInt64(&sm.syncedCnt)
stat["synced_index"] = atomic.LoadUint64(&sm.syncedState.SyncedIndex)
stat["synced_term"] = atomic.LoadUint64(&sm.syncedState.SyncedTerm)
@@ -138,6 +161,10 @@ func (sm *logSyncerSM) CheckExpiredData(buffer common.ExpiredDataBuffer, stop ch
return nil
}
+func (sm *logSyncerSM) GetBatchOperator() IBatchOperator {
+ return nil
+}
+
func (sm *logSyncerSM) Start() error {
sm.wg.Add(1)
go func() {
@@ -200,23 +227,33 @@ func (sm *logSyncerSM) handlerRaftLogs() {
syncedTerm, syncedIndex, syncedTs := sm.getSyncedState()
sm.Infof("raft log syncer send loop exit at synced: %v-%v-%v", syncedTerm, syncedIndex, syncedTs)
}()
- raftLogs := make([]*BatchInternalRaftRequest, 0, logSendBufferLen)
- var last *BatchInternalRaftRequest
+ raftLogs := make([]BatchInternalRaftRequest, 0, logSendBufferLen)
+ var last BatchInternalRaftRequest
state, err := sm.lgSender.getRemoteSyncedRaft(sm.sendStop)
if err != nil {
sm.Errorf("failed to get the synced state from remote: %v", err)
}
+ in := syncerpb.RaftReqs{}
+ logListBuf := make([]syncerpb.RaftLogData, logSendBufferLen*2)
+ marshalBufs := make([][]byte, logSendBufferLen*2)
+ waitSendNum := 0
+ lastIndex := uint64(0)
for {
handled := false
var err error
sendCh := sm.sendCh
- if len(raftLogs) > logSendBufferLen*2 {
+ if waitSendNum > logSendBufferLen*10 {
sendCh = nil
}
select {
case req := <-sendCh:
+ if lastIndex != 0 && req.OrigIndex != lastIndex+1 {
+ sm.Infof("syncer log commit is not continue: %v, %v, %v", req, lastIndex, last)
+ }
last = req
+ lastIndex = last.OrigIndex
raftLogs = append(raftLogs, req)
+ waitSendNum += len(req.Reqs)
if nodeLog.Level() > common.LOG_DETAIL {
sm.Debugf("batching raft log: %v in batch: %v", req.String(), len(raftLogs))
}
@@ -226,16 +263,28 @@ func (sm *logSyncerSM) handlerRaftLogs() {
case <-sm.sendStop:
return
case req := <-sm.sendCh:
+ if lastIndex != 0 && req.OrigIndex != lastIndex+1 {
+ sm.Infof("syncer log commit is not continue: %v, %v, %v", req, lastIndex, last)
+ }
last = req
+ lastIndex = last.OrigIndex
raftLogs = append(raftLogs, req)
+ waitSendNum += len(req.Reqs)
if nodeLog.Level() >= common.LOG_DETAIL {
sm.Debugf("batching raft log: %v in batch: %v", req.String(), len(raftLogs))
}
case waitCh := <-sm.waitSendLogChs:
select {
case req := <-sm.sendCh:
+
+ if lastIndex != 0 && req.OrigIndex != lastIndex+1 {
+ sm.Infof("syncer log commit is not continue: %v, %v, %v", req, lastIndex, last)
+ }
last = req
+ lastIndex = last.OrigIndex
+
raftLogs = append(raftLogs, req)
+ waitSendNum += len(req.Reqs)
go func() {
select {
// put back to wait next again
@@ -254,7 +303,34 @@ func (sm *logSyncerSM) handlerRaftLogs() {
if state.IsNewer2(last.OrigTerm, last.OrigIndex) {
// remote is already replayed this raft log
} else {
- err = sm.lgSender.sendRaftLog(raftLogs, sm.sendStop)
+ if len(logListBuf) < len(raftLogs) {
+ logListBuf = append(logListBuf, make([]syncerpb.RaftLogData, len(raftLogs)-len(logListBuf))...)
+ }
+ if len(marshalBufs) < len(raftLogs) {
+ marshalBufs = append(marshalBufs, make([][]byte, len(raftLogs)-len(marshalBufs))...)
+ }
+ in.RaftLog = logListBuf[:len(raftLogs)]
+ for i, e := range raftLogs {
+ logs := in.RaftLog
+ logs[i].Type = syncerpb.EntryNormalRaw
+ dbuf := marshalBufs[i]
+ if len(dbuf) < e.Size() {
+ dbuf = make([]byte, e.Size())
+ marshalBufs[i] = dbuf
+ }
+ used, err := e.MarshalTo(dbuf)
+ if err != nil {
+ sm.Errorf("failed to marshal %v: %v", e.String(), err)
+ panic(err)
+ }
+ logs[i].Data = dbuf[:used]
+ logs[i].Term = e.OrigTerm
+ logs[i].Index = e.OrigIndex
+ logs[i].RaftTimestamp = e.Timestamp
+ logs[i].RaftGroupName = sm.lgSender.grpName
+ logs[i].ClusterName = sm.lgSender.localCluster
+ }
+ err = sm.lgSender.sendRaftLog(in, sm.sendStop)
}
}
if err != nil {
@@ -272,10 +348,12 @@ func (sm *logSyncerSM) handlerRaftLogs() {
atomic.AddInt64(&sm.syncedCnt, int64(len(raftLogs)))
sm.setSyncedState(last.OrigTerm, last.OrigIndex, last.Timestamp)
t := time.Now().UnixNano()
- for _, rl := range raftLogs {
+ for i, rl := range raftLogs {
syncLearnerDoneStats.UpdateLatencyStats((t - rl.Timestamp) / time.Microsecond.Nanoseconds())
+ raftLogs[i].Reqs = nil
}
raftLogs = raftLogs[:0]
+ waitSendNum = 0
}
}
}
@@ -313,6 +391,9 @@ func (sm *logSyncerSM) GetSnapshot(term uint64, index uint64) (*KVSnapInfo, erro
return &si, err
}
+func (sm *logSyncerSM) UpdateSnapshotState(term uint64, index uint64) {
+}
+
func (sm *logSyncerSM) waitIgnoreUntilChanged(term uint64, index uint64, stop chan struct{}) (bool, error) {
for {
if atomic.LoadInt32(&sm.ignoreSend) == 1 {
@@ -333,8 +414,10 @@ func (sm *logSyncerSM) waitIgnoreUntilChanged(term uint64, index uint64, stop ch
t := time.NewTimer(time.Second)
select {
case <-stop:
+ t.Stop()
return true, common.ErrStopped
case <-sm.sendStop:
+ t.Stop()
return true, common.ErrStopped
case <-t.C:
t.Stop()
@@ -345,7 +428,33 @@ func (sm *logSyncerSM) waitIgnoreUntilChanged(term uint64, index uint64, stop ch
}
}
-func (sm *logSyncerSM) RestoreFromSnapshot(startup bool, raftSnapshot raftpb.Snapshot, stop chan struct{}) error {
+func (sm *logSyncerSM) waitAndCheckTransferLimit(start time.Time, stop chan struct{}) (newMyRun int64, err error) {
+ r := rand.Int31n(10) + 10
+ t := time.NewTimer(time.Second * time.Duration(r))
+ atomic.AddInt64(&remoteSnapRecoverCnt, -1)
+ defer func() {
+ newMyRun = atomic.AddInt64(&remoteSnapRecoverCnt, 1)
+ }()
+ select {
+ case <-stop:
+ t.Stop()
+ err = common.ErrStopped
+ return newMyRun, err
+ case <-t.C:
+ t.Stop()
+ if time.Since(start) > common.SnapWaitTimeout {
+ err = common.ErrTransferOutofdate
+ return newMyRun, err
+ }
+ }
+ return newMyRun, err
+}
+
+func (sm *logSyncerSM) RestoreFromSnapshot(raftSnapshot raftpb.Snapshot, stop chan struct{}) error {
+ return nil
+}
+
+func (sm *logSyncerSM) PrepareSnapshot(raftSnapshot raftpb.Snapshot, stop chan struct{}) error {
// get (term-index) from the remote cluster, if the remote cluster has
// greater (term-index) than snapshot, we can just ignore the snapshot restore
// since we already synced the data in snapshot.
@@ -367,6 +476,7 @@ func (sm *logSyncerSM) RestoreFromSnapshot(startup bool, raftSnapshot raftpb.Sna
return nil
}
+ sm.setReceivedState(raftSnapshot.Metadata.Term, raftSnapshot.Metadata.Index, 0)
sm.Infof("wait buffered send logs while restore from snapshot")
err = sm.waitBufferedLogs(0)
if err != nil {
@@ -381,50 +491,85 @@ func (sm *logSyncerSM) RestoreFromSnapshot(startup bool, raftSnapshot raftpb.Sna
return nil
}
- if syncerNormalInit {
+ if IsSyncerNormalInit() {
// set term-index to remote cluster with skipped snap so we can
// avoid transfer the snapshot while the two clusters have the exactly same logs
- return sm.lgSender.sendAndWaitApplySkippedSnap(raftSnapshot, stop)
+ err := sm.lgSender.sendAndWaitApplySkippedSnap(raftSnapshot, stop)
+ if err != nil {
+ return err
+ }
+ sm.Infof("init snap done %v-%v", raftSnapshot.Metadata.Term, raftSnapshot.Metadata.Index)
+ sm.setSyncedState(raftSnapshot.Metadata.Term, raftSnapshot.Metadata.Index, 0)
+ return nil
+ }
+
+ myRun := atomic.AddInt64(&remoteSnapRecoverCnt, 1)
+ defer atomic.AddInt64(&remoteSnapRecoverCnt, -1)
+ start := time.Now()
+ for myRun > int64(common.GetIntDynamicConf(common.ConfMaxRemoteRecover)) {
+ oldRun := myRun
+ myRun, err = sm.waitAndCheckTransferLimit(start, stop)
+ if err != nil {
+ sm.Infof("waiting restore snapshot failed: %v", raftSnapshot.Metadata.String())
+ return err
+ }
+ sm.Infof("waiting restore snapshot %v, my run: %v, old: %v", raftSnapshot.Metadata.String(), myRun, oldRun)
}
// while startup we can use the local snapshot to restart,
// but while running, we should install the leader's snapshot,
// so we need remove local and sync from leader
retry := 0
+ var restoreErr error
for retry < 3 {
forceRemote := true
if enableTest {
// for test we use local
forceRemote = false
}
+ state, err := sm.lgSender.getRemoteSyncedRaft(stop)
+ if err == nil {
+ if state.IsNewer2(raftSnapshot.Metadata.Term, raftSnapshot.Metadata.Index) {
+ sm.Infof("ignored restore snapshot since remote has newer raft: %v than %v", state, raftSnapshot.Metadata.String())
+ sm.setSyncedState(raftSnapshot.Metadata.Term, raftSnapshot.Metadata.Index, 0)
+ return nil
+ }
+ } else {
+ restoreErr = err
+ }
syncAddr, syncDir := GetValidBackupInfo(sm.machineConfig, sm.clusterInfo, sm.fullNS, sm.ID, stop, raftSnapshot, retry, forceRemote)
// note the local sync path not supported, so we need try another replica if syncAddr is empty
if syncAddr == "" && syncDir == "" {
- err = errors.New("no backup available from others")
+ // the snap may be out of date on others, so we can not restore from old snapshot
+ restoreErr = errNobackupAvailable
} else {
- err = sm.lgSender.notifyTransferSnap(raftSnapshot, syncAddr, syncDir)
- if err != nil {
- sm.Infof("notify apply snap %v,%v,%v failed: %v", raftSnapshot.Metadata, syncAddr, syncDir, err)
+ restoreErr = sm.lgSender.waitTransferSnapStatus(raftSnapshot, syncAddr, syncDir, stop)
+ if restoreErr != nil {
+ sm.Infof("wait transfer snap %v,%v,%v failed: %v", raftSnapshot.Metadata, syncAddr, syncDir, restoreErr)
} else {
- err := sm.lgSender.waitApplySnapStatus(raftSnapshot, stop)
- if err != nil {
- sm.Infof("wait apply snap %v,%v,%v failed: %v", raftSnapshot.Metadata, syncAddr, syncDir, err)
- } else {
- break
+ restoreErr = sm.lgSender.waitApplySnapStatus(raftSnapshot, stop)
+ if restoreErr != nil {
+ sm.Infof("wait apply snap %v,%v,%v failed: %v", raftSnapshot.Metadata, syncAddr, syncDir, restoreErr)
}
}
+ // the snapshot maybe out of date, so we do not retry here
+ break
}
retry++
select {
case <-stop:
- return err
+ return restoreErr
case <-time.After(time.Second):
}
}
- if err != nil {
- return err
+ if restoreErr != nil {
+ sm.Infof("restore snapshot %v failed: %v", raftSnapshot.Metadata.String(), restoreErr)
+ if restoreErr == errNobackupAvailable {
+ sm.Infof("restore snapshot %v while startup failed due to no snapshot, we can ignore in learner while startup", raftSnapshot.Metadata.String())
+ }
+ return restoreErr
}
- sm.Infof("apply snap done %v", raftSnapshot.Metadata)
+ sm.Infof("apply snap done %v", raftSnapshot.Metadata.String())
sm.setSyncedState(raftSnapshot.Metadata.Term, raftSnapshot.Metadata.Index, 0)
return nil
}
@@ -441,18 +586,18 @@ func (sm *logSyncerSM) ApplyRaftConfRequest(req raftpb.ConfChange, term uint64,
p.RemoteIndex = index
p.Data, _ = req.Marshal()
rreq.Data, _ = json.Marshal(p)
- rreq.Header = &RequestHeader{
+ rreq.Header = RequestHeader{
DataType: int32(CustomReq),
ID: 0,
Timestamp: reqList.Timestamp,
}
- reqList.Reqs = append(reqList.Reqs, &rreq)
+ reqList.Reqs = append(reqList.Reqs, rreq)
reqList.ReqId = rreq.Header.ID
- _, err := sm.ApplyRaftRequest(false, reqList, term, index, stop)
+ _, err := sm.ApplyRaftRequest(false, nil, reqList, term, index, stop)
return err
}
-func (sm *logSyncerSM) ApplyRaftRequest(isReplaying bool, reqList BatchInternalRaftRequest, term uint64, index uint64, stop chan struct{}) (bool, error) {
+func (sm *logSyncerSM) ApplyRaftRequest(isReplaying bool, batch IBatchOperator, reqList BatchInternalRaftRequest, term uint64, index uint64, stop chan struct{}) (bool, error) {
if nodeLog.Level() >= common.LOG_DETAIL {
sm.Debugf("applying in log syncer: %v at (%v, %v)", reqList.String(), term, index)
}
@@ -460,9 +605,16 @@ func (sm *logSyncerSM) ApplyRaftRequest(isReplaying bool, reqList BatchInternalR
sm.Infof("ignore sync from cluster syncer, %v-%v:%v", term, index, reqList.String())
return false, nil
}
+ ts := reqList.Timestamp
+ if ts == 0 {
+ // for some event such as leader transfer, the reqList will be empty, so no timestamp in it
+ sm.Infof("miss timestamp in raft request: %v", reqList.String())
+ reqList.Timestamp = time.Now().UnixNano()
+ } else {
+ latency := time.Now().UnixNano() - ts
+ syncLearnerRecvStats.UpdateLatencyStats(latency / time.Microsecond.Nanoseconds())
+ }
sm.setReceivedState(term, index, reqList.Timestamp)
- latency := time.Now().UnixNano() - reqList.Timestamp
- syncLearnerRecvStats.UpdateLatencyStats(latency / time.Microsecond.Nanoseconds())
forceBackup := false
reqList.OrigTerm = term
@@ -494,10 +646,6 @@ func (sm *logSyncerSM) ApplyRaftRequest(isReplaying bool, reqList BatchInternalR
break
}
}
- if reqList.Timestamp == 0 {
- sm.Errorf("miss timestamp in raft request: %v", reqList)
- }
- // TODO: stats latency raft write begin to begin sync.
for _, req := range reqList.Reqs {
if req.Header.DataType == int32(CustomReq) {
var p customProposeData
@@ -513,7 +661,7 @@ func (sm *logSyncerSM) ApplyRaftRequest(isReplaying bool, reqList BatchInternalR
}
}
select {
- case sm.sendCh <- &reqList:
+ case sm.sendCh <- reqList:
case <-stop:
return false, nil
case <-sm.sendStop:
diff --git a/node/ttl.go b/node/ttl.go
index f39932e1..aaa1b6a3 100644
--- a/node/ttl.go
+++ b/node/ttl.go
@@ -1,16 +1,11 @@
package node
import (
- "bytes"
"errors"
- "fmt"
"strconv"
- "sync"
- "sync/atomic"
- "time"
- "github.com/absolute8511/ZanRedisDB/common"
"github.com/absolute8511/redcon"
+ "github.com/youzan/ZanRedisDB/common"
)
var (
@@ -33,50 +28,6 @@ func init() {
expireCmds[common.ZSET] = []byte("zmclear")
}
-func (nd *KVNode) setexCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- conn.WriteString("OK")
-}
-
-func (nd *KVNode) expireCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- if rsp, ok := v.(int64); ok {
- conn.WriteInt64(rsp)
- } else {
- conn.WriteError(errInvalidResponse.Error())
- }
-}
-
-func (nd *KVNode) listExpireCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- if rsp, ok := v.(int64); ok {
- conn.WriteInt64(rsp)
- } else {
- conn.WriteError(errInvalidResponse.Error())
- }
-}
-
-func (nd *KVNode) hashExpireCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- if rsp, ok := v.(int64); ok {
- conn.WriteInt64(rsp)
- } else {
- conn.WriteError(errInvalidResponse.Error())
- }
-}
-
-func (nd *KVNode) setExpireCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- if rsp, ok := v.(int64); ok {
- conn.WriteInt64(rsp)
- } else {
- conn.WriteError(errInvalidResponse.Error())
- }
-}
-
-func (nd *KVNode) zsetExpireCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- if rsp, ok := v.(int64); ok {
- conn.WriteInt64(rsp)
- } else {
- conn.WriteError(errInvalidResponse.Error())
- }
-}
-
func (kvsm *kvStoreSM) localSetexCommand(cmd redcon.Command, ts int64) (interface{}, error) {
if duration, err := strconv.Atoi(string(cmd.Args[2])); err != nil {
return nil, err
@@ -89,7 +40,7 @@ func (kvsm *kvStoreSM) localExpireCommand(cmd redcon.Command, ts int64) (interfa
if duration, err := strconv.Atoi(string(cmd.Args[2])); err != nil {
return int64(0), err
} else {
- return kvsm.store.Expire(cmd.Args[1], int64(duration))
+ return kvsm.store.Expire(ts, cmd.Args[1], int64(duration))
}
}
@@ -97,7 +48,7 @@ func (kvsm *kvStoreSM) localHashExpireCommand(cmd redcon.Command, ts int64) (int
if duration, err := strconv.Atoi(string(cmd.Args[2])); err != nil {
return int64(0), err
} else {
- return kvsm.store.HExpire(cmd.Args[1], int64(duration))
+ return kvsm.store.HExpire(ts, cmd.Args[1], int64(duration))
}
}
@@ -105,7 +56,7 @@ func (kvsm *kvStoreSM) localListExpireCommand(cmd redcon.Command, ts int64) (int
if duration, err := strconv.Atoi(string(cmd.Args[2])); err != nil {
return int64(0), err
} else {
- return kvsm.store.LExpire(cmd.Args[1], int64(duration))
+ return kvsm.store.LExpire(ts, cmd.Args[1], int64(duration))
}
}
@@ -113,7 +64,7 @@ func (kvsm *kvStoreSM) localSetExpireCommand(cmd redcon.Command, ts int64) (inte
if duration, err := strconv.Atoi(string(cmd.Args[2])); err != nil {
return int64(0), err
} else {
- return kvsm.store.SExpire(cmd.Args[1], int64(duration))
+ return kvsm.store.SExpire(ts, cmd.Args[1], int64(duration))
}
}
@@ -121,36 +72,40 @@ func (kvsm *kvStoreSM) localZSetExpireCommand(cmd redcon.Command, ts int64) (int
if duration, err := strconv.Atoi(string(cmd.Args[2])); err != nil {
return int64(0), err
} else {
- return kvsm.store.ZExpire(cmd.Args[1], int64(duration))
+ return kvsm.store.ZExpire(ts, cmd.Args[1], int64(duration))
}
}
-func (nd *KVNode) persistCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- if rsp, ok := v.(int64); ok {
- conn.WriteInt64(rsp)
+func (kvsm *kvStoreSM) localBitExpireCommand(cmd redcon.Command, ts int64) (interface{}, error) {
+ if duration, err := strconv.Atoi(string(cmd.Args[2])); err != nil {
+ return int64(0), err
} else {
- conn.WriteError(errInvalidResponse.Error())
+ return kvsm.store.BitExpire(ts, cmd.Args[1], int64(duration))
}
}
func (kvsm *kvStoreSM) localPersistCommand(cmd redcon.Command, ts int64) (interface{}, error) {
- return kvsm.store.Persist(cmd.Args[1])
+ return kvsm.store.Persist(ts, cmd.Args[1])
}
func (kvsm *kvStoreSM) localHashPersistCommand(cmd redcon.Command, ts int64) (interface{}, error) {
- return kvsm.store.HPersist(cmd.Args[1])
+ return kvsm.store.HPersist(ts, cmd.Args[1])
}
func (kvsm *kvStoreSM) localListPersistCommand(cmd redcon.Command, ts int64) (interface{}, error) {
- return kvsm.store.LPersist(cmd.Args[1])
+ return kvsm.store.LPersist(ts, cmd.Args[1])
}
func (kvsm *kvStoreSM) localSetPersistCommand(cmd redcon.Command, ts int64) (interface{}, error) {
- return kvsm.store.SPersist(cmd.Args[1])
+ return kvsm.store.SPersist(ts, cmd.Args[1])
}
func (kvsm *kvStoreSM) localZSetPersistCommand(cmd redcon.Command, ts int64) (interface{}, error) {
- return kvsm.store.ZPersist(cmd.Args[1])
+ return kvsm.store.ZPersist(ts, cmd.Args[1])
+}
+
+func (kvsm *kvStoreSM) localBitPersistCommand(cmd redcon.Command, ts int64) (interface{}, error) {
+ return kvsm.store.BitPersist(ts, cmd.Args[1])
}
//read commands related to TTL
@@ -194,256 +149,50 @@ func (nd *KVNode) zttlCommand(conn redcon.Conn, cmd redcon.Command) {
}
}
-type ExpireHandler struct {
- node *KVNode
- quitC chan struct{}
- leaderChangedCh chan struct{}
- applyLock sync.Mutex
- batchBuffer *raftExpiredBuffer
-
- running int32
- wg sync.WaitGroup
-}
-
-func NewExpireHandler(node *KVNode) *ExpireHandler {
- return &ExpireHandler{
- node: node,
- leaderChangedCh: make(chan struct{}, 8),
- quitC: make(chan struct{}),
- batchBuffer: newRaftExpiredBuffer(node),
- }
-}
-
-func (exp *ExpireHandler) Start() {
- if !atomic.CompareAndSwapInt32(&exp.running, 0, 1) {
- return
- }
-
- exp.wg.Add(1)
- go func() {
- defer exp.wg.Done()
- exp.watchLeaderChanged()
- }()
-
-}
-
-func (exp *ExpireHandler) Stop() {
- if atomic.CompareAndSwapInt32(&exp.running, 1, 0) {
- close(exp.quitC)
- exp.wg.Wait()
- }
-}
-
-func (exp *ExpireHandler) LeaderChanged() {
- select {
- case exp.leaderChangedCh <- struct{}{}:
- case <-exp.quitC:
- return
- }
-}
-
-func (exp *ExpireHandler) watchLeaderChanged() {
- var stop chan struct{}
- applying := false
- for {
- select {
- case <-exp.leaderChangedCh:
- if exp.node.expirationPolicy != common.ConsistencyDeletion {
- continue
- }
- if exp.node.IsLead() && !applying {
- stop = make(chan struct{})
- exp.wg.Add(1)
- go func(stop chan struct{}) {
- defer exp.wg.Done()
- exp.applyExpiration(stop)
- }(stop)
- applying = true
-
- } else if !exp.node.IsLead() && applying {
- close(stop)
- applying = false
- }
- case <-exp.quitC:
- if applying {
- close(stop)
- applying = false
- }
- return
- }
- }
-}
-
-func buildRawExpireCommand(dt common.DataType, keys [][]byte) []byte {
- cmd := expireCmds[dt]
- buf := make([]byte, 0, 128)
-
- buf = append(buf, '*')
- buf = append(buf, strconv.FormatInt(int64(len(keys)+1), 10)...)
- buf = append(buf, '\r', '\n')
-
- buf = append(buf, '$')
- buf = append(buf, strconv.FormatInt(int64(len(cmd)), 10)...)
- buf = append(buf, '\r', '\n')
- buf = append(buf, cmd...)
- buf = append(buf, '\r', '\n')
-
- for _, key := range keys {
- buf = append(buf, '$')
- buf = append(buf, strconv.FormatInt(int64(len(key)), 10)...)
- buf = append(buf, '\r', '\n')
- buf = append(buf, key...)
- buf = append(buf, '\r', '\n')
- }
- return buf
-}
-
-func (exp *ExpireHandler) applyExpiration(stop chan struct{}) {
- nodeLog.Infof("begin to apply expiration")
- exp.applyLock.Lock()
- checkTicker := time.NewTicker(time.Second)
-
- defer func() {
- checkTicker.Stop()
- exp.batchBuffer.Reset()
- exp.applyLock.Unlock()
- nodeLog.Infof("apply expiration has been stopped")
- }()
-
- var buffFullTimes int
- for {
- select {
- case <-checkTicker.C:
- if err := exp.node.sm.CheckExpiredData(exp.batchBuffer, stop); err == ErrExpiredBatchedBuffFull {
- if buffFullTimes += 1; buffFullTimes >= 3 {
- nodeLog.Warningf("expired data buffer is filled three times in succession, stats:%s", exp.batchBuffer.GetStats())
- buffFullTimes = 0
- }
- } else if err != nil {
- nodeLog.Errorf("check expired data by the underlying storage system failed, err:%s", err.Error())
- }
-
- select {
- case <-stop:
- return
- default:
- exp.batchBuffer.CommitAll()
- }
- case <-stop:
- return
- }
- }
-}
-
-type raftExpiredBuffer struct {
- internalBuf [common.ALL - common.NONE]*raftBatchBuffer
-}
-
-func newRaftExpiredBuffer(nd *KVNode) *raftExpiredBuffer {
- raftBuff := &raftExpiredBuffer{}
-
- types := []common.DataType{common.KV, common.LIST, common.HASH,
- common.SET, common.ZSET}
-
- for _, t := range types {
- raftBuff.internalBuf[t] = newRaftBatchBuffer(nd, t)
- }
-
- return raftBuff
-}
-
-func (raftBuffer *raftExpiredBuffer) Write(dt common.DataType, key []byte) error {
- return raftBuffer.internalBuf[dt].propose(key)
-}
-
-func (raftBuffer *raftExpiredBuffer) CommitAll() {
- for _, buff := range raftBuffer.internalBuf {
- if buff != nil {
- buff.commit()
- }
- }
-}
-
-func (raftBuffer *raftExpiredBuffer) Reset() {
- for _, buff := range raftBuffer.internalBuf {
- if buff != nil {
- buff.clear()
- }
+func (nd *KVNode) bttlCommand(conn redcon.Conn, cmd redcon.Command) {
+ if v, err := nd.store.BitTtl(cmd.Args[1]); err != nil {
+ conn.WriteError(err.Error())
+ } else {
+ conn.WriteInt64(v)
}
}
-func (raftBuff *raftExpiredBuffer) GetStats() string {
- stats := bytes.NewBufferString("the stats of expired data buffer:\r\n")
- for _, buff := range raftBuff.internalBuf {
- if buff != nil {
- stats.WriteString(buff.GetStats())
- }
+func (nd *KVNode) hKeyExistCommand(conn redcon.Conn, cmd redcon.Command) {
+ if v, err := nd.store.HKeyExists(cmd.Args[1]); err != nil {
+ conn.WriteError(err.Error())
+ } else {
+ conn.WriteInt64(v)
}
- return stats.String()
-}
-
-type raftBatchBuffer struct {
- sync.Mutex
- dataType common.DataType
- keys [][]byte
- node *KVNode
}
-func newRaftBatchBuffer(nd *KVNode, dt common.DataType) *raftBatchBuffer {
- return &raftBatchBuffer{
- node: nd,
- keys: make([][]byte, 0, raftBatchBufferSize),
- dataType: dt,
+func (nd *KVNode) lKeyExistCommand(conn redcon.Conn, cmd redcon.Command) {
+ if v, err := nd.store.LKeyExists(cmd.Args[1]); err != nil {
+ conn.WriteError(err.Error())
+ } else {
+ conn.WriteInt64(v)
}
}
-func (rb *raftBatchBuffer) propose(key []byte) error {
- defer rb.Unlock()
- rb.Lock()
-
- if len(rb.keys) >= raftBatchBufferSize {
- return ErrExpiredBatchedBuffFull
+func (nd *KVNode) sKeyExistCommand(conn redcon.Conn, cmd redcon.Command) {
+ if v, err := nd.store.SKeyExists(cmd.Args[1]); err != nil {
+ conn.WriteError(err.Error())
} else {
- rb.keys = append(rb.keys, key)
+ conn.WriteInt64(v)
}
- return nil
}
-func (rb *raftBatchBuffer) commit() {
- rb.Lock()
- if len(rb.keys) > 0 {
- rb.node.Propose(buildRawExpireCommand(rb.dataType, rb.keys))
- rb.keys = rb.keys[:0]
+func (nd *KVNode) zKeyExistCommand(conn redcon.Conn, cmd redcon.Command) {
+ if v, err := nd.store.ZKeyExists(cmd.Args[1]); err != nil {
+ conn.WriteError(err.Error())
+ } else {
+ conn.WriteInt64(v)
}
- rb.Unlock()
-}
-
-func (rb *raftBatchBuffer) clear() {
- rb.Lock()
- rb.keys = rb.keys[:0]
- rb.Unlock()
-
}
-func (rb *raftBatchBuffer) GetStats() string {
- stats := make(map[string]int)
- rb.Lock()
- for _, k := range rb.keys {
- if t, _, err := common.ExtractTable(k); err != nil {
- continue
- } else {
- stats[string(t)] += 1
- }
- }
- rb.Unlock()
-
- statsBuf := bytes.NewBufferString(fmt.Sprintf("tables have more than 300 %s keys expired: ", rb.dataType.String()))
- for table, count := range stats {
- if count >= 300 {
- statsBuf.WriteString(fmt.Sprintf("[%s: %d], ", table, count))
- }
+func (nd *KVNode) bKeyExistCommand(conn redcon.Conn, cmd redcon.Command) {
+ if v, err := nd.store.BitKeyExist(cmd.Args[1]); err != nil {
+ conn.WriteError(err.Error())
+ } else {
+ conn.WriteInt64(v)
}
- statsBuf.WriteString("\r\n")
-
- return statsBuf.String()
}
diff --git a/node/util.go b/node/util.go
index ae00a2a9..f69e9774 100644
--- a/node/util.go
+++ b/node/util.go
@@ -2,15 +2,17 @@ package node
import (
"fmt"
- "strconv"
"sync/atomic"
+ "time"
- "github.com/absolute8511/ZanRedisDB/common"
"github.com/absolute8511/redcon"
+ "github.com/youzan/ZanRedisDB/common"
)
-var nodeLog = common.NewLevelLogger(common.LOG_INFO, common.NewDefaultLogger("node"))
+var nodeLog = common.NewLevelLogger(common.LOG_INFO, common.NewLogger())
var syncerOnly int32
+var logMaybeConflictDisabled int32
+var syncerOnlyChangedTs int64
func SetLogLevel(level int) {
nodeLog.SetLevel(int32(level))
@@ -26,63 +28,85 @@ func SetSyncerOnly(enable bool) {
atomic.StoreInt32(&syncerOnly, 1)
} else {
atomic.StoreInt32(&syncerOnly, 0)
+ atomic.StoreInt64(&syncerOnlyChangedTs, time.Now().UnixNano())
}
}
+func SwitchDisableMaybeConflictLog(disable bool) {
+ if disable {
+ atomic.StoreInt32(&logMaybeConflictDisabled, 1)
+ } else {
+ atomic.StoreInt32(&logMaybeConflictDisabled, 0)
+ }
+}
+
+func MaybeConflictLogDisabled() bool {
+ return atomic.LoadInt32(&logMaybeConflictDisabled) == 1
+}
+
+func GetSyncedOnlyChangedTs() int64 {
+ return atomic.LoadInt64(&syncerOnlyChangedTs)
+}
+
func IsSyncerOnly() bool {
return atomic.LoadInt32(&syncerOnly) == 1
}
-func buildCommand(args [][]byte) redcon.Command {
- // build a pipeline command
- buf := make([]byte, 0, 512)
- buf = append(buf, '*')
- buf = append(buf, strconv.FormatInt(int64(len(args)), 10)...)
- buf = append(buf, '\r', '\n')
-
- poss := make([]int, 0, len(args)*2)
- for _, arg := range args {
- buf = append(buf, '$')
- buf = append(buf, strconv.FormatInt(int64(len(arg)), 10)...)
- buf = append(buf, '\r', '\n')
- poss = append(poss, len(buf), len(buf)+len(arg))
- buf = append(buf, arg...)
- buf = append(buf, '\r', '\n')
- }
+func checkOKRsp(cmd redcon.Command, v interface{}) (interface{}, error) {
+ return "OK", nil
+}
- // reformat a new command
- var ncmd redcon.Command
- ncmd.Raw = buf
- ncmd.Args = make([][]byte, len(poss)/2)
- for i, j := 0, 0; i < len(poss); i, j = i+2, j+1 {
- ncmd.Args[j] = ncmd.Raw[poss[i]:poss[i+1]]
+func checkAndRewriteIntRsp(cmd redcon.Command, v interface{}) (interface{}, error) {
+ if rsp, ok := v.(int64); ok {
+ return rsp, nil
}
- return ncmd
+ return nil, errInvalidResponse
}
-func rebuildFirstKeyAndPropose(kvn *KVNode, conn redcon.Conn, cmd redcon.Command) (redcon.Command,
- interface{}, bool) {
- _, key, err := common.ExtractNamesapce(cmd.Args[1])
- if err != nil {
- conn.WriteError(err.Error())
- return cmd, nil, false
+func checkAndRewriteBulkRsp(cmd redcon.Command, v interface{}) (interface{}, error) {
+ if v == nil {
+ return nil, nil
}
-
- if common.IsValidTableName(key) {
- conn.WriteError(common.ErrInvalidTableName.Error())
- return cmd, nil, false
+ rsp, ok := v.([]byte)
+ if ok {
+ return rsp, nil
}
+ return nil, errInvalidResponse
+}
+
+func buildCommand(args [][]byte) redcon.Command {
+ return common.BuildCommand(args)
+}
+
+// we can only use redis v2 for single key write command, otherwise we need cut namespace for different keys in different command
+func rebuildFirstKeyAndPropose(kvn *KVNode, cmd redcon.Command, f common.CommandRspFunc) (interface{}, error) {
- cmd.Args[1] = key
- ncmd := buildCommand(cmd.Args)
- copy(cmd.Raw[0:], ncmd.Raw[:])
- cmd.Raw = cmd.Raw[:len(ncmd.Raw)]
- rsp, err := kvn.Propose(cmd.Raw)
+ var rsp *FutureRsp
+ var err error
+ if !UseRedisV2 {
+ var key []byte
+ key, err = common.CutNamesapce(cmd.Args[1])
+ if err != nil {
+ return nil, err
+ }
+
+ orig := cmd.Args[1]
+ cmd.Args[1] = key
+ ncmd := buildCommand(cmd.Args)
+ rsp, err = kvn.RedisProposeAsync(ncmd.Raw)
+ cmd.Args[1] = orig
+ } else {
+ rsp, err = kvn.RedisV2ProposeAsync(cmd.Raw)
+ }
if err != nil {
- conn.WriteError(err.Error())
- return cmd, nil, false
+ return nil, err
}
- return cmd, rsp, true
+ if f != nil {
+ rsp.rspHandle = func(r interface{}) (interface{}, error) {
+ return f(cmd, r)
+ }
+ }
+ return rsp, err
}
func wrapReadCommandK(f common.CommandFunc) common.CommandFunc {
@@ -91,7 +115,7 @@ func wrapReadCommandK(f common.CommandFunc) common.CommandFunc {
conn.WriteError("ERR wrong number of arguments for '" + string(cmd.Args[0]) + "' command")
return
}
- _, key, err := common.ExtractNamesapce(cmd.Args[1])
+ key, err := common.CutNamesapce(cmd.Args[1])
if err != nil {
conn.WriteError(err.Error())
return
@@ -107,7 +131,7 @@ func wrapReadCommandKSubkey(f common.CommandFunc) common.CommandFunc {
conn.WriteError("ERR wrong number of arguments for '" + string(cmd.Args[0]) + "' command")
return
}
- _, key, err := common.ExtractNamesapce(cmd.Args[1])
+ key, err := common.CutNamesapce(cmd.Args[1])
if err != nil {
conn.WriteError(err.Error())
return
@@ -123,7 +147,7 @@ func wrapReadCommandKSubkeySubkey(f common.CommandFunc) common.CommandFunc {
conn.WriteError("ERR wrong number of arguments for '" + string(cmd.Args[0]) + "' command")
return
}
- _, key, err := common.ExtractNamesapce(cmd.Args[1])
+ key, err := common.CutNamesapce(cmd.Args[1])
if err != nil {
conn.WriteError(err.Error())
return
@@ -143,7 +167,7 @@ func wrapReadCommandKAnySubkeyN(f common.CommandFunc, minSubLen int) common.Comm
conn.WriteError("ERR wrong number of arguments for '" + string(cmd.Args[0]) + "' command")
return
}
- _, key, err := common.ExtractNamesapce(cmd.Args[1])
+ key, err := common.CutNamesapce(cmd.Args[1])
if err != nil {
conn.WriteError(err.Error())
return
@@ -159,12 +183,12 @@ func wrapReadCommandKK(f common.CommandFunc) common.CommandFunc {
conn.WriteError("ERR wrong number of arguments for '" + string(cmd.Args[0]) + "' command")
return
}
- if len(cmd.Args[1:]) >= common.MAX_BATCH_NUM {
+ if len(cmd.Args[1:]) > common.MAX_BATCH_NUM {
conn.WriteError(errTooMuchBatchSize.Error())
return
}
for i := 1; i < len(cmd.Args); i++ {
- _, key, err := common.ExtractNamesapce(cmd.Args[i])
+ key, err := common.CutNamesapce(cmd.Args[i])
if err != nil {
conn.WriteError(err.Error())
return
@@ -175,203 +199,144 @@ func wrapReadCommandKK(f common.CommandFunc) common.CommandFunc {
}
}
-func wrapWriteCommandK(kvn *KVNode, f common.CommandRspFunc) common.CommandFunc {
- return func(conn redcon.Conn, cmd redcon.Command) {
+func wrapWriteCommandK(kvn *KVNode, preCheck func(key []byte) (bool, interface{}, error), f common.CommandRspFunc) common.WriteCommandFunc {
+ return func(cmd redcon.Command) (interface{}, error) {
if len(cmd.Args) != 2 {
- conn.WriteError("ERR wrong number of arguments for '" + string(cmd.Args[0]) + "' command")
- return
- }
- cmd, rsp, ok := rebuildFirstKeyAndPropose(kvn, conn, cmd)
- if !ok {
- return
- }
- f(conn, cmd, rsp)
- }
-}
-
-func wrapWriteCommandKK(kvn *KVNode, f common.CommandRspFunc) common.CommandFunc {
- return func(conn redcon.Conn, cmd redcon.Command) {
- if len(cmd.Args) < 2 {
- conn.WriteError("ERR wrong number of arguments for '" + string(cmd.Args[0]) + "' command")
- return
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ return nil, err
}
- args := cmd.Args[1:]
- if len(args) >= common.MAX_BATCH_NUM {
- conn.WriteError(errTooMuchBatchSize.Error())
- return
+ if err := common.CheckKey(cmd.Args[1]); err != nil {
+ return nil, err
}
- for i, v := range args {
- _, key, err := common.ExtractNamesapce(v)
+ if preCheck != nil {
+ key, err := common.CutNamesapce(cmd.Args[1])
if err != nil {
- conn.WriteError(err.Error())
- return
+ return nil, err
}
- if common.IsValidTableName(key) {
- conn.WriteError(common.ErrInvalidTableName.Error())
- return
+ needContinue, rsp, err := preCheck(key)
+ if err != nil {
+ return nil, err
+ }
+ if !needContinue {
+ return f(cmd, rsp)
}
-
- args[i] = key
- }
- ncmd := buildCommand(cmd.Args)
- copy(cmd.Raw[0:], ncmd.Raw[:])
- cmd.Raw = cmd.Raw[:len(ncmd.Raw)]
-
- rsp, err := kvn.Propose(cmd.Raw)
- if err != nil {
- conn.WriteError(err.Error())
- return
}
-
- f(conn, cmd, rsp)
+ rsp, err := rebuildFirstKeyAndPropose(kvn, cmd, f)
+ return rsp, err
}
}
-func wrapWriteCommandKSubkey(kvn *KVNode, f common.CommandRspFunc) common.CommandFunc {
- return func(conn redcon.Conn, cmd redcon.Command) {
+func wrapWriteCommandKSubkey(kvn *KVNode, f common.CommandRspFunc) common.WriteCommandFunc {
+ return func(cmd redcon.Command) (interface{}, error) {
if len(cmd.Args) != 3 {
- conn.WriteError("ERR wrong number of arguments for '" + string(cmd.Args[0]) + "' command")
- return
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ return nil, err
}
- cmd, rsp, ok := rebuildFirstKeyAndPropose(kvn, conn, cmd)
- if !ok {
- return
+ if err := common.CheckKey(cmd.Args[1]); err != nil {
+ return nil, err
}
- f(conn, cmd, rsp)
+ rsp, err := rebuildFirstKeyAndPropose(kvn, cmd, f)
+ return rsp, err
}
}
-func wrapWriteCommandKSubkeySubkey(kvn *KVNode, f common.CommandRspFunc) common.CommandFunc {
- return func(conn redcon.Conn, cmd redcon.Command) {
+func wrapWriteCommandKSubkeySubkey(kvn *KVNode, f common.CommandRspFunc) common.WriteCommandFunc {
+ return func(cmd redcon.Command) (interface{}, error) {
if len(cmd.Args) < 3 {
- conn.WriteError("ERR wrong number of arguments for '" + string(cmd.Args[0]) + "' command")
- return
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ return nil, err
}
- cmd, rsp, ok := rebuildFirstKeyAndPropose(kvn, conn, cmd)
- if !ok {
- return
+ if err := common.CheckKey(cmd.Args[1]); err != nil {
+ return nil, err
}
- f(conn, cmd, rsp)
+ rsp, err := rebuildFirstKeyAndPropose(kvn, cmd, f)
+ return rsp, err
}
}
-func wrapWriteCommandKAnySubkey(kvn *KVNode, f common.CommandRspFunc, minSubKeyLen int) common.CommandFunc {
- return func(conn redcon.Conn, cmd redcon.Command) {
+func wrapWriteCommandKAnySubkey(kvn *KVNode, f common.CommandRspFunc, minSubKeyLen int) common.WriteCommandFunc {
+ return func(cmd redcon.Command) (interface{}, error) {
if len(cmd.Args) < 2+minSubKeyLen {
- conn.WriteError("ERR wrong number arguments for '" + string(cmd.Args[0]) + "' command")
- return
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ return nil, err
}
- cmd, rsp, ok := rebuildFirstKeyAndPropose(kvn, conn, cmd)
- if !ok {
- return
+ if err := common.CheckKey(cmd.Args[1]); err != nil {
+ return nil, err
}
- f(conn, cmd, rsp)
+ rsp, err := rebuildFirstKeyAndPropose(kvn, cmd, f)
+ return rsp, err
}
}
-
-func wrapWriteCommandKV(kvn *KVNode, f common.CommandRspFunc) common.CommandFunc {
- return func(conn redcon.Conn, cmd redcon.Command) {
- if len(cmd.Args) != 3 {
- conn.WriteError("ERR wrong number arguments for '" + string(cmd.Args[0]) + "' command")
- return
- }
- cmd, rsp, ok := rebuildFirstKeyAndPropose(kvn, conn, cmd)
- if !ok {
- return
+func wrapWriteCommandKAnySubkeyAndMax(kvn *KVNode, f common.CommandRspFunc, minSubKeyLen int, maxSubKeyLen int) common.WriteCommandFunc {
+ return func(cmd redcon.Command) (interface{}, error) {
+ if len(cmd.Args) < 2+minSubKeyLen {
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ return nil, err
}
- f(conn, cmd, rsp)
- }
-}
-
-func wrapWriteCommandKVV(kvn *KVNode, f common.CommandRspFunc) common.CommandFunc {
- return func(conn redcon.Conn, cmd redcon.Command) {
- if len(cmd.Args) < 3 {
- conn.WriteError("ERR wrong number arguments for '" + string(cmd.Args[0]) + "' command")
- return
+ if len(cmd.Args) > 2+maxSubKeyLen {
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ return nil, err
}
- cmd, rsp, ok := rebuildFirstKeyAndPropose(kvn, conn, cmd)
- if !ok {
- return
+ if err := common.CheckKey(cmd.Args[1]); err != nil {
+ return nil, err
}
- f(conn, cmd, rsp)
+ rsp, err := rebuildFirstKeyAndPropose(kvn, cmd, f)
+ return rsp, err
}
}
-func wrapWriteCommandKVKV(kvn *KVNode, f common.CommandRspFunc) common.CommandFunc {
- return func(conn redcon.Conn, cmd redcon.Command) {
- if len(cmd.Args) < 3 || len(cmd.Args[1:])%2 != 0 {
- conn.WriteError("ERR wrong number arguments for '" + string(cmd.Args[0]) + "' command")
- return
- }
- if len(cmd.Args[1:])/2 >= common.MAX_BATCH_NUM {
- conn.WriteError(errTooMuchBatchSize.Error())
- return
- }
- args := cmd.Args[1:]
- for i, v := range args {
- if i%2 != 0 {
- continue
- }
- _, key, err := common.ExtractNamesapce(v)
- if err != nil {
- conn.WriteError(err.Error())
- return
- }
- if common.IsValidTableName(key) {
- conn.WriteError(common.ErrInvalidTableName.Error())
- return
- }
-
- args[i] = key
+func wrapWriteCommandKV(kvn *KVNode, f common.CommandRspFunc) common.WriteCommandFunc {
+ return func(cmd redcon.Command) (interface{}, error) {
+ if len(cmd.Args) != 3 {
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ return nil, err
}
- ncmd := buildCommand(cmd.Args)
- copy(cmd.Raw[0:], ncmd.Raw[:])
- cmd.Raw = cmd.Raw[:len(ncmd.Raw)]
-
- rsp, err := kvn.Propose(cmd.Raw)
- if err != nil {
- conn.WriteError(err.Error())
- return
+ if err := common.CheckKey(cmd.Args[1]); err != nil {
+ return nil, err
}
- f(conn, cmd, rsp)
+ rsp, err := rebuildFirstKeyAndPropose(kvn, cmd, f)
+ return rsp, err
}
}
-func wrapWriteCommandKSubkeyV(kvn *KVNode, f common.CommandRspFunc) common.CommandFunc {
- return func(conn redcon.Conn, cmd redcon.Command) {
+func wrapWriteCommandKVV(kvn *KVNode, f common.CommandRspFunc) common.WriteCommandFunc {
+ return func(cmd redcon.Command) (interface{}, error) {
if len(cmd.Args) != 4 {
- conn.WriteError("ERR wrong number arguments for '" + string(cmd.Args[0]) + "' command")
- return
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ return nil, err
}
- cmd, rsp, ok := rebuildFirstKeyAndPropose(kvn, conn, cmd)
- if !ok {
- return
+ if err := common.CheckKey(cmd.Args[1]); err != nil {
+ return nil, err
}
- f(conn, cmd, rsp)
+ rsp, err := rebuildFirstKeyAndPropose(kvn, cmd, f)
+ return rsp, err
}
}
-func wrapWriteCommandKSubkeyVSubkeyV(kvn *KVNode, f common.CommandRspFunc) common.CommandFunc {
- return func(conn redcon.Conn, cmd redcon.Command) {
+func wrapWriteCommandKSubkeyV(kvn *KVNode, f common.CommandRspFunc) common.WriteCommandFunc {
+ return wrapWriteCommandKVV(kvn, f)
+}
+
+func wrapWriteCommandKSubkeyVSubkeyV(kvn *KVNode, f common.CommandRspFunc) common.WriteCommandFunc {
+ return func(cmd redcon.Command) (interface{}, error) {
if len(cmd.Args) < 4 || len(cmd.Args[2:])%2 != 0 {
- conn.WriteError("ERR wrong number arguments for '" + string(cmd.Args[0]) + "' command")
- return
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ return nil, err
}
- if len(cmd.Args[2:])/2 >= common.MAX_BATCH_NUM {
- conn.WriteError(errTooMuchBatchSize.Error())
- return
+ if len(cmd.Args[2:])/2 > common.MAX_BATCH_NUM {
+ return nil, errTooMuchBatchSize
}
- cmd, rsp, ok := rebuildFirstKeyAndPropose(kvn, conn, cmd)
- if !ok {
- return
+ if err := common.CheckKey(cmd.Args[1]); err != nil {
+ return nil, err
}
- f(conn, cmd, rsp)
+ rsp, err := rebuildFirstKeyAndPropose(kvn, cmd, f)
+ return rsp, err
}
}
func wrapMergeCommand(f common.MergeCommandFunc) common.MergeCommandFunc {
return func(cmd redcon.Command) (interface{}, error) {
- _, key, err := common.ExtractNamesapce(cmd.Args[1])
+ key, err := common.CutNamesapce(cmd.Args[1])
if err != nil {
return nil, err
}
@@ -386,11 +351,11 @@ func wrapMergeCommandKK(f common.MergeCommandFunc) common.MergeCommandFunc {
if len(cmd.Args) < 2 {
return nil, fmt.Errorf("ERR wrong number of arguments for '%s' command", string(cmd.Args[0]))
}
- if len(cmd.Args[1:]) >= common.MAX_BATCH_NUM {
+ if len(cmd.Args[1:]) > common.MAX_BATCH_NUM {
return nil, errTooMuchBatchSize
}
for i := 1; i < len(cmd.Args); i++ {
- _, key, err := common.ExtractNamesapce(cmd.Args[i])
+ key, err := common.CutNamesapce(cmd.Args[i])
if err != nil {
return nil, err
}
@@ -406,29 +371,25 @@ func wrapWriteMergeCommandKK(kvn *KVNode, f common.MergeWriteCommandFunc) common
return nil, fmt.Errorf("ERR wrong number of arguments for '%s' command", string(cmd.Args[0]))
}
args := cmd.Args[1:]
- if len(args) >= common.MAX_BATCH_NUM {
+ if len(args) > common.MAX_BATCH_NUM {
return nil, errTooMuchBatchSize
}
for i, v := range args {
- _, key, err := common.ExtractNamesapce(v)
+ key, err := common.CutNamesapce(v)
if err != nil {
return nil, err
}
- if common.IsValidTableName(key) {
- return nil, common.ErrInvalidTableName
- }
args[i] = key
}
ncmd := buildCommand(cmd.Args)
- copy(cmd.Raw[0:], ncmd.Raw[:])
- cmd.Raw = cmd.Raw[:len(ncmd.Raw)]
-
- rsp, err := kvn.Propose(cmd.Raw)
+ rsp, err := kvn.RedisPropose(ncmd.Raw)
if err != nil {
return nil, err
}
-
- return f(cmd, rsp)
+ if f != nil {
+ return f(cmd, rsp)
+ }
+ return rsp, nil
}
}
@@ -437,7 +398,7 @@ func wrapWriteMergeCommandKVKV(kvn *KVNode, f common.MergeWriteCommandFunc) comm
if len(cmd.Args) < 3 || len(cmd.Args[1:])%2 != 0 {
return nil, fmt.Errorf("ERR wrong number arguments for '%s' command", string(cmd.Args[0]))
}
- if len(cmd.Args[1:])/2 >= common.MAX_BATCH_NUM {
+ if len(cmd.Args[1:])/2 > common.MAX_BATCH_NUM {
return nil, errTooMuchBatchSize
}
args := cmd.Args[1:]
@@ -445,23 +406,37 @@ func wrapWriteMergeCommandKVKV(kvn *KVNode, f common.MergeWriteCommandFunc) comm
if i%2 != 0 {
continue
}
- _, key, err := common.ExtractNamesapce(v)
+ key, err := common.CutNamesapce(v)
if err != nil {
return nil, err
}
- if common.IsValidTableName(key) {
- return nil, common.ErrInvalidTableName
- }
args[i] = key
}
ncmd := buildCommand(cmd.Args)
- copy(cmd.Raw[0:], ncmd.Raw[:])
- cmd.Raw = cmd.Raw[:len(ncmd.Raw)]
- rsp, err := kvn.Propose(cmd.Raw)
+ rsp, err := kvn.RedisPropose(ncmd.Raw)
if err != nil {
return nil, err
}
- return f(cmd, rsp)
+ if f != nil {
+ return f(cmd, rsp)
+ }
+ return rsp, nil
+ }
+}
+
+type notifier struct {
+ c chan struct{}
+ err error
+}
+
+func newNotifier() *notifier {
+ return ¬ifier{
+ c: make(chan struct{}),
}
}
+
+func (nc *notifier) notify(err error) {
+ nc.err = err
+ close(nc.c)
+}
diff --git a/node/zset.go b/node/zset.go
index 93c59800..bc0eb1ff 100644
--- a/node/zset.go
+++ b/node/zset.go
@@ -3,11 +3,13 @@ package node
import (
"bytes"
"errors"
+ "fmt"
"strconv"
"strings"
- "github.com/absolute8511/ZanRedisDB/common"
"github.com/absolute8511/redcon"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/rockredis"
)
var (
@@ -351,147 +353,134 @@ func (nd *KVNode) zrevrankCommand(conn redcon.Conn, cmd redcon.Command) {
}
}
-func (nd *KVNode) zaddCommand(conn redcon.Conn, cmd redcon.Command) {
+func (nd *KVNode) zaddCommand(cmd redcon.Command) (interface{}, error) {
if len(cmd.Args) < 4 || len(cmd.Args)%2 != 0 {
- conn.WriteError("ERR wrong number of arguments for '" + string(cmd.Args[0]) + "' command")
- return
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ return nil, err
}
- _, err := getScorePairs(cmd.Args[2:])
+ key := cmd.Args[1]
+ args, err := getScorePairs(cmd.Args[2:])
if err != nil {
- conn.WriteError(err.Error())
- return
+ return nil, err
}
-
- _, v, ok := rebuildFirstKeyAndPropose(nd, conn, cmd)
- if !ok {
- return
+ for i := 0; i < len(args); i++ {
+ err := common.CheckKeySubKey(key, args[i].Member)
+ if err != nil {
+ return nil, err
+ }
}
- rsp, ok := v.(int64)
- if ok {
- conn.WriteInt64(rsp)
- } else {
- conn.WriteError(errInvalidResponse.Error())
+ // TODO: optimize check exist before propose to raft if we later support nx/xx options
+
+ v, err := rebuildFirstKeyAndPropose(nd, cmd, checkAndRewriteIntRsp)
+ if err != nil {
+ return nil, err
}
+ return v, nil
}
-func (nd *KVNode) zincrbyCommand(conn redcon.Conn, cmd redcon.Command) {
- if len(cmd.Args) != 4 {
- conn.WriteError("ERR wrong number of arguments for '" + string(cmd.Args[0]) + "' command")
- return
+func (nd *KVNode) zremCommand(cmd redcon.Command) (interface{}, error) {
+ if len(cmd.Args) < 3 {
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ return nil, err
}
- _, err := strconv.ParseFloat(string(cmd.Args[2]), 64)
+ // optimize check exist before propose to raft
+ key, err := common.CutNamesapce(cmd.Args[1])
if err != nil {
- conn.WriteError(err.Error())
- return
+ return nil, err
}
- _, v, ok := rebuildFirstKeyAndPropose(nd, conn, cmd)
- if !ok {
- return
+ needChange := false
+ for _, m := range cmd.Args[2:] {
+ _, err := nd.store.ZScore(key, m)
+ if !rockredis.IsMemberNotExist(err) {
+ // found a member, we need do raft proposal
+ needChange = true
+ break
+ }
}
- rsp, ok := v.(float64)
- if ok {
- conn.WriteBulkString(strconv.FormatFloat(rsp, 'g', -1, 64))
- } else {
- conn.WriteError(errInvalidResponse.Error())
+ if !needChange {
+ return int64(0), nil
}
+ rsp, err := rebuildFirstKeyAndPropose(nd, cmd, checkAndRewriteIntRsp)
+ return rsp, err
}
-func (nd *KVNode) zremCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- rsp, ok := v.(int64)
- if ok {
- conn.WriteInt64(rsp)
- } else {
- conn.WriteError(errInvalidResponse.Error())
+func (nd *KVNode) zincrbyCommand(cmd redcon.Command) (interface{}, error) {
+ if len(cmd.Args) != 4 {
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ return nil, err
+ }
+ _, err := strconv.ParseFloat(string(cmd.Args[2]), 64)
+ if err != nil {
+ return nil, err
}
+
+ v, err := rebuildFirstKeyAndPropose(nd, cmd, func(cmd redcon.Command, r interface{}) (interface{}, error) {
+ rsp, ok := r.(float64)
+ if ok {
+ return []byte(strconv.FormatFloat(rsp, 'g', -1, 64)), nil
+ }
+ return nil, errInvalidResponse
+ })
+ if err != nil {
+ return nil, err
+ }
+ return v, nil
}
-func (nd *KVNode) zremrangebyrankCommand(conn redcon.Conn, cmd redcon.Command) {
+func (nd *KVNode) zremrangebyrankCommand(cmd redcon.Command) (interface{}, error) {
if len(cmd.Args) != 4 {
- conn.WriteError("ERR wrong number of arguments for '" + string(cmd.Args[0]) + "' command")
- return
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ return nil, err
}
_, err := strconv.ParseInt(string(cmd.Args[2]), 10, 64)
if err != nil {
- conn.WriteError("Invalid index: " + err.Error())
- return
+ return nil, err
}
_, err = strconv.ParseInt(string(cmd.Args[3]), 10, 64)
if err != nil {
- conn.WriteError("Invalid index: " + err.Error())
- return
+ return nil, err
}
- _, v, ok := rebuildFirstKeyAndPropose(nd, conn, cmd)
- if !ok {
- return
- }
- rsp, ok := v.(int64)
- if ok {
- conn.WriteInt64(rsp)
- } else {
- conn.WriteError(errInvalidResponse.Error())
+ v, err := rebuildFirstKeyAndPropose(nd, cmd, checkAndRewriteIntRsp)
+ if err != nil {
+ return nil, err
}
+ return v, nil
}
-func (nd *KVNode) zremrangebyscoreCommand(conn redcon.Conn, cmd redcon.Command) {
+func (nd *KVNode) zremrangebyscoreCommand(cmd redcon.Command) (interface{}, error) {
if len(cmd.Args) != 4 {
- conn.WriteError("ERR wrong number of arguments for '" + string(cmd.Args[0]) + "' command")
- return
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ return nil, err
}
_, _, err := getScoreRange(cmd.Args[2], cmd.Args[3])
if err != nil {
- conn.WriteError(err.Error())
- return
- }
- _, v, ok := rebuildFirstKeyAndPropose(nd, conn, cmd)
- if !ok {
- return
+ return nil, err
}
- rsp, ok := v.(int64)
- if ok {
- conn.WriteInt64(rsp)
- } else {
- conn.WriteError(errInvalidResponse.Error())
+ v, err := rebuildFirstKeyAndPropose(nd, cmd, checkAndRewriteIntRsp)
+ if err != nil {
+ return nil, err
}
-
+ return v, nil
}
-func (nd *KVNode) zremrangebylexCommand(conn redcon.Conn, cmd redcon.Command) {
+func (nd *KVNode) zremrangebylexCommand(cmd redcon.Command) (interface{}, error) {
if len(cmd.Args) != 4 {
- conn.WriteError("ERR wrong number of arguments for '" + string(cmd.Args[0]) + "' command")
- return
+ err := fmt.Errorf("ERR wrong number arguments for '%v' command", string(cmd.Args[0]))
+ return nil, err
}
_, _, _, err := getLexRange(cmd.Args[2], cmd.Args[3])
if err != nil {
- conn.WriteError(err.Error())
- return
- }
- _, v, ok := rebuildFirstKeyAndPropose(nd, conn, cmd)
- if !ok {
- return
- }
- rsp, ok := v.(int64)
- if ok {
- conn.WriteInt64(rsp)
- } else {
- conn.WriteError(errInvalidResponse.Error())
+ return nil, err
}
-}
-
-func (nd *KVNode) zclearCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- rsp, ok := v.(int64)
- if ok {
- conn.WriteInt64(rsp)
- } else {
- conn.WriteError(errInvalidResponse.Error())
+ v, err := rebuildFirstKeyAndPropose(nd, cmd, checkAndRewriteIntRsp)
+ if err != nil {
+ return nil, err
}
-}
-
-func (nd *KVNode) zfixkeyCommand(conn redcon.Conn, cmd redcon.Command, v interface{}) {
- conn.WriteString("OK")
+ return v, nil
}
func getScorePairs(args [][]byte) ([]common.ScorePair, error) {
@@ -568,13 +557,13 @@ func (kvsm *kvStoreSM) localZclearCommand(cmd redcon.Command, ts int64) (interfa
if len(cmd.Args) != 2 {
return nil, common.ErrInvalidArgs
}
- return kvsm.store.ZClear(cmd.Args[1])
+ return kvsm.store.ZClear(ts, cmd.Args[1])
}
func (kvsm *kvStoreSM) localZMClearCommand(cmd redcon.Command, ts int64) (interface{}, error) {
var count int64
for _, zkey := range cmd.Args[1:] {
- if _, err := kvsm.store.ZClear(zkey); err != nil {
+ if _, err := kvsm.store.ZClear(ts, zkey); err != nil {
return count, err
} else {
count++
diff --git a/node/zset_test.go b/node/zset_test.go
index f221b2b5..24e5f6d5 100644
--- a/node/zset_test.go
+++ b/node/zset_test.go
@@ -52,6 +52,10 @@ func TestKVNode_zsetCommand(t *testing.T) {
{"zremrangebyrank", buildCommand([][]byte{[]byte("zremrangebyrank"), testKey, []byte("0"), []byte("1")})},
{"zremrangebyscore", buildCommand([][]byte{[]byte("zremrangebyscore"), testKey, testLrange, testRrange})},
{"zremrangebylex", buildCommand([][]byte{[]byte("zremrangebylex"), testKey, testLexLrange, testLexRrange})},
+ {"zttl", buildCommand([][]byte{[]byte("zttl"), testKey})},
+ {"zkeyexist", buildCommand([][]byte{[]byte("zkeyexist"), testKey})},
+ {"zexpire", buildCommand([][]byte{[]byte("zexpire"), testKey, []byte("10")})},
+ {"zpersist", buildCommand([][]byte{[]byte("zpersist"), testKey})},
{"zclear", buildCommand([][]byte{[]byte("zclear"), testKey})},
}
defer os.RemoveAll(dataDir)
@@ -60,8 +64,18 @@ func TestKVNode_zsetCommand(t *testing.T) {
c := &fakeRedisConn{}
for _, cmd := range tests {
c.Reset()
- handler, _, _ := nd.router.GetCmdHandler(cmd.name)
- handler(c, cmd.args)
- assert.Nil(t, c.GetError(), cmd.name)
+ origCmd := append([]byte{}, cmd.args.Raw...)
+ handler, ok := nd.router.GetCmdHandler(cmd.name)
+ if ok {
+ handler(c, cmd.args)
+ assert.Nil(t, c.GetError(), cmd.name)
+ } else {
+ whandler, _ := nd.router.GetWCmdHandler(cmd.name)
+ rsp, err := whandler(cmd.args)
+ assert.Nil(t, err)
+ _, ok := rsp.(error)
+ assert.True(t, !ok)
+ }
+ assert.Equal(t, origCmd, cmd.args.Raw)
}
}
diff --git a/pdserver/config.go b/pdserver/config.go
index ca121565..011669fe 100644
--- a/pdserver/config.go
+++ b/pdserver/config.go
@@ -3,10 +3,13 @@ package pdserver
import (
"log"
"os"
+
+ "github.com/youzan/ZanRedisDB/cluster/pdnode_coord"
)
type ServerConfig struct {
HTTPAddress string `flag:"http-address"`
+ MetricAddress string `flag:"metric-address"`
BroadcastAddr string `flag:"broadcast-address"`
BroadcastInterface string `flag:"broadcast-interface"`
@@ -18,10 +21,12 @@ type ServerConfig struct {
AutoBalanceAndMigrate bool `flag:"auto-balance-and-migrate"`
BalanceInterval []string `flag:"balance-interval"`
- LogLevel int32 `flag:"log-level" cfg:"log_level"`
- LogDir string `flag:"log-dir" cfg:"log_dir"`
- DataDir string `flag:"data-dir" cfg:"data_dir"`
- LearnerRole string `flag:"learner-role" cfg:"learner_role"`
+ LogLevel int32 `flag:"log-level" cfg:"log_level"`
+ LogDir string `flag:"log-dir" cfg:"log_dir"`
+ DataDir string `flag:"data-dir" cfg:"data_dir"`
+ LearnerRole string `flag:"learner-role" cfg:"learner_role"`
+ FilterNamespaces string `flag:"filter-namespaces" cfg:"filter_namespaces"`
+ BalanceVer string `flag:"balance-ver" cfg:"balance_ver"`
}
func NewServerConfig() *ServerConfig {
@@ -38,6 +43,7 @@ func NewServerConfig() *ServerConfig {
ClusterLeadershipAddresses: "",
ClusterID: "",
+ BalanceVer: pdnode_coord.BalanceV2Str,
LogLevel: 1,
LogDir: "",
diff --git a/pdserver/http.go b/pdserver/http.go
index eacd62d6..7dda616a 100644
--- a/pdserver/http.go
+++ b/pdserver/http.go
@@ -3,19 +3,23 @@ package pdserver
import (
"encoding/json"
"errors"
+ "fmt"
"io/ioutil"
"net/http"
_ "net/http/pprof"
"net/url"
"strconv"
"strings"
+ "time"
- "github.com/absolute8511/ZanRedisDB/cluster"
- "github.com/absolute8511/ZanRedisDB/common"
"github.com/julienschmidt/httprouter"
+ "github.com/youzan/ZanRedisDB/cluster"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/metric"
)
type nodeInfo struct {
+ NodeID string `json:"node_id"`
BroadcastAddress string `json:"broadcast_address"`
Hostname string `json:"hostname"`
RedisPort string `json:"redis_port"`
@@ -73,13 +77,18 @@ func (s *Server) initHttpHandler() {
router.Handle("GET", "/datanodes", common.Decorate(s.getDataNodes, common.V1))
router.Handle("GET", "/listpd", common.Decorate(s.listPDNodes, common.V1))
router.Handle("GET", "/query/:namespace", common.Decorate(s.doQueryNamespace, debugLog, common.V1))
+ router.Handle("GET", "/querytable/stats/:table", common.Decorate(s.doQueryTableStats, debugLog, common.V1))
router.Handle("DELETE", "/namespace/rmlearner", common.Decorate(s.doRemoveNamespaceLearner, log, common.V1))
+ router.Handle("POST", "/learner/stop", common.Decorate(s.doStopLearner, log, common.V1))
+ router.Handle("POST", "/learner/start", common.Decorate(s.doStartLearner, log, common.V1))
+ router.Handle("GET", "/learner/state", common.Decorate(s.getLearnerRunningState, log, common.V1))
// cluster prefix url means only handled by leader of pd
router.Handle("GET", "/cluster/stats", common.Decorate(s.doClusterStats, common.V1))
router.Handle("POST", "/cluster/balance", common.Decorate(s.doClusterSwitchBalance, log, common.V1))
router.Handle("POST", "/cluster/pd/tombstone", common.Decorate(s.doClusterTombstonePD, log, common.V1))
router.Handle("POST", "/cluster/node/remove", common.Decorate(s.doClusterRemoveDataNode, log, common.V1))
+ router.Handle("DELETE", "/cluster/partition/remove_node", common.Decorate(s.doClusterNamespacePartRemoveNode, log, common.V1))
router.Handle("POST", "/cluster/upgrade/begin", common.Decorate(s.doClusterBeginUpgrade, log, common.V1))
router.Handle("POST", "/cluster/upgrade/done", common.Decorate(s.doClusterFinishUpgrade, log, common.V1))
router.Handle("POST", "/cluster/namespace/create", common.Decorate(s.doCreateNamespace, log, common.V1))
@@ -130,6 +139,7 @@ func (s *Server) getDataNodes(w http.ResponseWriter, req *http.Request, ps httpr
dc, _ := n.Tags[cluster.DCInfoTag]
dcInfo, _ := dc.(string)
dn := &nodeInfo{
+ NodeID: n.GetID(),
BroadcastAddress: n.NodeIP,
Hostname: n.Hostname,
Version: n.Version,
@@ -168,6 +178,57 @@ func (s *Server) listPDNodes(w http.ResponseWriter, req *http.Request, ps httpro
}, nil
}
+func (s *Server) doQueryTableStats(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ table := ps.ByName("table")
+ if table == "" {
+ return nil, common.HttpErr{Code: 400, Text: "MISSING_ARG_TABLE"}
+ }
+ reqParams, err := url.ParseQuery(req.URL.RawQuery)
+ if err != nil {
+ return nil, common.HttpErr{Code: 400, Text: "INVALID_REQUEST"}
+ }
+ leaderOnly := reqParams.Get("leader_only")
+
+ dns, _ := s.pdCoord.GetAllDataNodes()
+ nodeTableStats := make(map[string]map[string]metric.TableStats)
+ totalTableStats := make(map[string]metric.TableStats)
+ type TableStatsType struct {
+ TableStats map[string]metric.TableStats `json:"table_stats"`
+ }
+ for _, n := range dns {
+ uri := fmt.Sprintf("http://%s:%v%v?leader_only=%v&table=%v", n.Hostname, n.HttpPort, common.APITableStats, leaderOnly, table)
+ var tableStats TableStatsType
+ rspCode, err := common.APIRequest("GET", uri, nil, time.Second*10, &tableStats)
+ if err != nil {
+ sLog.Infof("get table stats error %v, %v", err, uri)
+ continue
+ }
+ if rspCode != http.StatusOK {
+ sLog.Infof("get table stats not ok %v, %v", rspCode, uri)
+ continue
+ }
+ nodeTableStats[n.GetID()] = tableStats.TableStats
+ for ns, tbs := range tableStats.TableStats {
+ if tbs.Name != table {
+ continue
+ }
+ var tmp metric.TableStats
+ tmp.Name = table
+ if t, ok := totalTableStats[ns]; ok {
+ tmp = t
+ }
+ tmp.KeyNum += tbs.KeyNum
+ tmp.DiskBytesUsage += tbs.DiskBytesUsage
+ tmp.ApproximateKeyNum += tbs.ApproximateKeyNum
+ totalTableStats[ns] = tmp
+ }
+ }
+ return map[string]interface{}{
+ "total": totalTableStats,
+ "nodes": nodeTableStats,
+ }, nil
+}
+
func (s *Server) doQueryNamespace(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
ns := ps.ByName("namespace")
if ns == "" {
@@ -198,9 +259,15 @@ func (s *Server) doQueryNamespace(w http.ResponseWriter, req *http.Request, ps h
partNodes := make(map[int]PartitionNodeInfo)
pnum := 0
+ replicator := 1
+ ex := ""
+ useFsync := false
engType := ""
for _, nsInfo := range nsPartsInfo {
pnum = nsInfo.PartitionNum
+ replicator = nsInfo.Replica
+ ex = nsInfo.ExpirationPolicy
+ useFsync = nsInfo.OptimizedFsync
engType = nsInfo.EngType
var pn PartitionNodeInfo
for _, nid := range nsInfo.RaftNodes {
@@ -220,6 +287,7 @@ func (s *Server) doQueryNamespace(w http.ResponseWriter, req *http.Request, ps h
}
}
dn := nodeInfo{
+ NodeID: nid,
BroadcastAddress: ip,
Hostname: hostname,
Version: version,
@@ -236,10 +304,13 @@ func (s *Server) doQueryNamespace(w http.ResponseWriter, req *http.Request, ps h
partNodes[nsInfo.Partition] = pn
}
return map[string]interface{}{
- "epoch": curEpoch,
- "partition_num": pnum,
- "eng_type": engType,
- "partitions": partNodes,
+ "epoch": curEpoch,
+ "partition_num": pnum,
+ "replicator": replicator,
+ "expire_policy": ex,
+ "fsync_optimized": useFsync,
+ "eng_type": engType,
+ "partitions": partNodes,
}, nil
}
@@ -333,6 +404,22 @@ func (s *Server) doClusterRemoveDataNode(w http.ResponseWriter, req *http.Reques
return nil, nil
}
+func (s *Server) doClusterNamespacePartRemoveNode(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ reqParams, err := url.ParseQuery(req.URL.RawQuery)
+ if err != nil {
+ return nil, common.HttpErr{Code: 400, Text: "INVALID_REQUEST"}
+ }
+ nid := reqParams.Get("node")
+ ns := reqParams.Get("namespace")
+ pid := reqParams.Get("partition")
+
+ err = s.pdCoord.RemoveNamespaceFromNode(ns, pid, nid)
+ if err != nil {
+ return nil, common.HttpErr{Code: 500, Text: err.Error()}
+ }
+ return nil, nil
+}
+
func (s *Server) doClusterBeginUpgrade(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
err := s.pdCoord.SetClusterUpgradeState(true)
if err != nil {
@@ -385,12 +472,28 @@ func (s *Server) doCreateNamespace(w http.ResponseWriter, req *http.Request, ps
return nil, common.HttpErr{Code: 400, Text: "INVALID_ARG_REPLICATOR"}
}
+ dataVersion := reqParams.Get("data_version")
+ if dataVersion == "" {
+ dataVersion = common.ValueHeaderV1Str
+ }
+ dv, err := common.StringToDataVersionType(dataVersion)
+ if err != nil {
+ return nil, common.HttpErr{Code: 400, Text: "INVALID_ARG_DATA_VERSION"}
+ }
expPolicy := reqParams.Get("expiration_policy")
if expPolicy == "" {
expPolicy = common.DefaultExpirationPolicy
+ if dv == common.ValueHeaderV1 {
+ expPolicy = common.WaitCompactExpirationPolicy
+ }
} else if _, err := common.StringToExpirationPolicy(expPolicy); err != nil {
return nil, common.HttpErr{Code: 400, Text: "INVALID_ARG_EXPIRATION_POLICY"}
}
+ if expPolicy == common.WaitCompactExpirationPolicy {
+ if dataVersion != common.ValueHeaderV1Str {
+ return nil, common.HttpErr{Code: 400, Text: "INVALID_ARG_EXPIRATION_POLICY data version must be v1 in compact ttl"}
+ }
+ }
tagStr := reqParams.Get("tags")
var tagList []string
@@ -411,6 +514,7 @@ func (s *Server) doCreateNamespace(w http.ResponseWriter, req *http.Request, ps
meta.Replica = replicator
meta.EngType = engType
meta.ExpirationPolicy = expPolicy
+ meta.DataVersion = dataVersion
meta.Tags = make(map[string]interface{})
for _, tag := range tagList {
if strings.TrimSpace(tag) != "" {
@@ -605,6 +709,30 @@ func (s *Server) doUpdateNamespaceMeta(w http.ResponseWriter, req *http.Request,
}
+func (s *Server) doStopLearner(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ err := s.pdCoord.SwitchStartLearner(false)
+ if err != nil {
+ return nil, common.HttpErr{Code: http.StatusInternalServerError, Text: err.Error()}
+ }
+ return nil, nil
+}
+
+func (s *Server) getLearnerRunningState(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ v, err := s.pdCoord.GetLearnerRunningState()
+ if err != nil {
+ return nil, common.HttpErr{Code: http.StatusInternalServerError, Text: err.Error()}
+ }
+ return v, nil
+}
+
+func (s *Server) doStartLearner(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ err := s.pdCoord.SwitchStartLearner(true)
+ if err != nil {
+ return nil, common.HttpErr{Code: http.StatusInternalServerError, Text: err.Error()}
+ }
+ return nil, nil
+}
+
func (s *Server) doRemoveNamespaceLearner(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
reqParams, err := url.ParseQuery(req.URL.RawQuery)
if err != nil {
diff --git a/pdserver/http_test.go b/pdserver/http_test.go
index 55035a4b..35205604 100644
--- a/pdserver/http_test.go
+++ b/pdserver/http_test.go
@@ -14,11 +14,11 @@ import (
"testing"
"time"
- "github.com/absolute8511/ZanRedisDB/cluster"
- "github.com/absolute8511/ZanRedisDB/common"
- ds "github.com/absolute8511/ZanRedisDB/server"
- "github.com/absolute8511/go-zanredisdb"
"github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/cluster"
+ "github.com/youzan/ZanRedisDB/common"
+ ds "github.com/youzan/ZanRedisDB/server"
+ "github.com/youzan/go-zanredisdb"
)
const (
@@ -27,8 +27,11 @@ const (
pdHttpPort = "18007"
pdRemoteHttpPort = "18008"
pdLearnerHttpPort = "18009"
+ baseRedisPort = 62345
+ testEngineType = "pebble"
)
+var balanceVer = "v2"
var testEtcdServers = "http://127.0.0.1:2379"
var testOnce sync.Once
var gpdServer *Server
@@ -54,16 +57,20 @@ func startTestClusterForLearner(t *testing.T, n int) (*Server, []dataNodeWrapper
opts.ClusterID = "unit-test"
opts.ClusterLeadershipAddresses = testEtcdServers
opts.BalanceInterval = []string{"0", "24"}
+ opts.BalanceVer = balanceVer
opts.LearnerRole = common.LearnerRoleLogSyncer
- pd := NewServer(opts)
+ pd, err := NewServer(opts)
+ assert.Nil(t, err)
pd.Start()
+ // init start syncer
+ pd.pdCoord.SwitchStartLearner(true)
for i := 0; i < n; i++ {
tmpDir := path.Join(clusterTmpDir, strconv.Itoa(i))
os.MkdirAll(tmpDir, common.DIR_PERM)
- raftAddr := "http://127.0.0.1:" + strconv.Itoa(15345+i*100)
- redisPort := 25345 + i*100
- httpPort := 35345 + i*100
+ raftAddr := "http://127.0.0.1:" + strconv.Itoa(baseRedisPort-100+i*10)
+ redisPort := baseRedisPort - 101 + i*10
+ httpPort := baseRedisPort - 102 + i*10
kvOpts := ds.ServerConfig{
ClusterID: TestClusterName,
EtcdClusterAddresses: testEtcdServers,
@@ -76,8 +83,11 @@ func startTestClusterForLearner(t *testing.T, n int) (*Server, []dataNodeWrapper
ElectionTick: 5,
LearnerRole: common.LearnerRoleLogSyncer,
RemoteSyncCluster: "http://127.0.0.1:" + pdRemoteHttpPort,
+ UseRocksWAL: true,
}
- kv := ds.NewServer(kvOpts)
+ kvOpts.RocksDBOpts.EnablePartitionedIndexFilter = true
+ kvOpts.RocksDBOpts.EngineType = testEngineType
+ kv, _ := ds.NewServer(kvOpts)
kv.Start()
time.Sleep(time.Second)
kvList = append(kvList, dataNodeWrapper{kv, redisPort, httpPort, tmpDir})
@@ -86,11 +96,51 @@ func startTestClusterForLearner(t *testing.T, n int) (*Server, []dataNodeWrapper
}
func startRemoteSyncTestCluster(t *testing.T, n int) (*Server, []dataNodeWrapper, string) {
- return startTestCluster(t, true, TestRemoteSyncClusterName, pdRemoteHttpPort, n, 16345)
+ return startTestCluster(t, true, TestRemoteSyncClusterName, pdRemoteHttpPort, n, baseRedisPort+1000)
}
func startDefaultTestCluster(t *testing.T, n int) (*Server, []dataNodeWrapper, string) {
- return startTestCluster(t, false, TestClusterName, pdHttpPort, n, 17345)
+ return startTestCluster(t, false, TestClusterName, pdHttpPort, n, baseRedisPort+2000)
+}
+
+func addMoreTestDataNodeToCluster(t *testing.T, n int) ([]dataNodeWrapper, string) {
+ basePort := baseRedisPort + 3000
+ clusterName := TestClusterName
+ syncOnly := false
+ kvList := make([]dataNodeWrapper, 0, n)
+ clusterTmpDir, err := ioutil.TempDir("", fmt.Sprintf("rocksdb-test-%d", time.Now().UnixNano()))
+ assert.Nil(t, err)
+ t.Logf("dir:%v\n", clusterTmpDir)
+
+ for i := 0; i < n; i++ {
+ tmpDir := path.Join(clusterTmpDir, strconv.Itoa(i))
+ os.MkdirAll(tmpDir, common.DIR_PERM)
+ raftAddr := "http://127.0.0.1:" + strconv.Itoa(basePort+100+i*10)
+ redisPort := basePort + 101 + i*10
+ httpPort := basePort + 102 + i*10
+ rpcPort := basePort + 103 + i*10
+ kvOpts := ds.ServerConfig{
+ ClusterID: clusterName,
+ EtcdClusterAddresses: testEtcdServers,
+ DataDir: tmpDir,
+ RedisAPIPort: redisPort,
+ LocalRaftAddr: raftAddr,
+ BroadcastAddr: "127.0.0.1",
+ HttpAPIPort: httpPort,
+ GrpcAPIPort: rpcPort,
+ TickMs: 100,
+ ElectionTick: 5,
+ SyncerWriteOnly: syncOnly,
+ UseRocksWAL: true,
+ }
+ kvOpts.RocksDBOpts.EnablePartitionedIndexFilter = true
+ kvOpts.RocksDBOpts.EngineType = testEngineType
+ kv, _ := ds.NewServer(kvOpts)
+ kv.Start()
+ time.Sleep(time.Second)
+ kvList = append(kvList, dataNodeWrapper{kv, redisPort, httpPort, tmpDir})
+ }
+ return kvList, clusterTmpDir
}
func startTestCluster(t *testing.T, syncOnly bool, clusterName string, pdPort string, n int, basePort int) (*Server, []dataNodeWrapper, string) {
@@ -122,16 +172,18 @@ func startTestCluster(t *testing.T, syncOnly bool, clusterName string, pdPort st
opts.ClusterID = clusterName
opts.ClusterLeadershipAddresses = testEtcdServers
opts.BalanceInterval = []string{"0", "24"}
- pd := NewServer(opts)
+ opts.BalanceVer = balanceVer
+ pd, err := NewServer(opts)
+ assert.Nil(t, err)
pd.Start()
for i := 0; i < n; i++ {
tmpDir := path.Join(clusterTmpDir, strconv.Itoa(i))
os.MkdirAll(tmpDir, common.DIR_PERM)
- raftAddr := "http://127.0.0.1:" + strconv.Itoa(basePort+i*100)
- redisPort := basePort + 10000 + i*100
- httpPort := basePort + 20000 + i*100
- rpcPort := basePort + 22000 + i*100
+ raftAddr := "http://127.0.0.1:" + strconv.Itoa(basePort+100+i*10)
+ redisPort := basePort + 101 + i*10
+ httpPort := basePort + 102 + i*10
+ rpcPort := basePort + 103 + i*10
kvOpts := ds.ServerConfig{
ClusterID: clusterName,
EtcdClusterAddresses: testEtcdServers,
@@ -143,9 +195,14 @@ func startTestCluster(t *testing.T, syncOnly bool, clusterName string, pdPort st
GrpcAPIPort: rpcPort,
TickMs: 100,
ElectionTick: 5,
+ DefaultSnapCount: 100,
+ DefaultSnapCatchup: 50,
SyncerWriteOnly: syncOnly,
+ UseRocksWAL: true,
}
- kv := ds.NewServer(kvOpts)
+ kvOpts.RocksDBOpts.EnablePartitionedIndexFilter = true
+ kvOpts.RocksDBOpts.EngineType = testEngineType
+ kv, _ := ds.NewServer(kvOpts)
kv.Start()
time.Sleep(time.Second)
kvList = append(kvList, dataNodeWrapper{kv, redisPort, httpPort, tmpDir})
@@ -169,6 +226,16 @@ func cleanAllCluster(ret int) {
}
}
+func cleanDataNodes(dns []dataNodeWrapper, tmpDir string) {
+ for _, n := range dns {
+ n.s.Stop()
+ }
+ if strings.Contains(tmpDir, "rocksdb-test") {
+ fmt.Println("removing: ", tmpDir)
+ os.RemoveAll(tmpDir)
+ }
+}
+
func getTestClient(t *testing.T, ns string) *zanredisdb.ZanRedisClient {
conf := &zanredisdb.Conf{
DialTimeout: time.Second * 15,
@@ -178,13 +245,13 @@ func getTestClient(t *testing.T, ns string) *zanredisdb.ZanRedisClient {
Namespace: ns,
}
conf.LookupList = append(conf.LookupList, "127.0.0.1:"+pdHttpPort)
- c := zanredisdb.NewZanRedisClient(conf)
+ c, _ := zanredisdb.NewZanRedisClient(conf)
c.Start()
return c
}
-func startTestClusterAndCheck(t *testing.T) (*Server, []dataNodeWrapper, string) {
- pd, kvList, tmpDir := startDefaultTestCluster(t, 4)
+func startTestClusterAndCheck(t *testing.T, n int) (*Server, []dataNodeWrapper, string) {
+ pd, kvList, tmpDir := startDefaultTestCluster(t, n)
time.Sleep(time.Second)
pduri := "http://127.0.0.1:" + pdHttpPort
uri := fmt.Sprintf("%s/datanodes", pduri)
@@ -241,9 +308,9 @@ func startTestClusterAndCheck(t *testing.T) (*Server, []dataNodeWrapper, string)
return pd, kvList, tmpDir
}
-func ensureClusterReady(t *testing.T) {
+func ensureClusterReady(t *testing.T, n int) {
testOnce.Do(func() {
- gpdServer, gkvList, gtmpDir = startTestClusterAndCheck(t)
+ gpdServer, gkvList, gtmpDir = startTestClusterAndCheck(t, n)
},
)
}
@@ -377,10 +444,10 @@ func ensureDeleteNamespace(t *testing.T, pduri string, ns string) {
}
func TestClusterInitStart(t *testing.T) {
- ensureClusterReady(t)
+ ensureClusterReady(t, 4)
}
func TestClusterSchemaAddIndex(t *testing.T) {
- ensureClusterReady(t)
+ ensureClusterReady(t, 4)
time.Sleep(time.Second)
ns := "test_schema_ns"
diff --git a/pdserver/pdconf.example b/pdserver/pdconf.example.conf
similarity index 84%
rename from pdserver/pdconf.example
rename to pdserver/pdconf.example.conf
index 23e209d3..641fc2cc 100644
--- a/pdserver/pdconf.example
+++ b/pdserver/pdconf.example.conf
@@ -2,7 +2,7 @@
http_address = "0.0.0.0:18001"
## the network interface for broadcast, the ip will be detected automatically.
-broadcast_interface = "eth0"
+broadcast_interface = "lo"
## local reverse proxy port, basically used for collecting the stats
# reverse_proxy_port = "18003"
@@ -11,7 +11,7 @@ profile_port = "6667"
cluster_id = "test-cluster-dev-1"
## the etcd cluster ip list
-cluster_leadership_addresses = "127.0.0.1:2379"
+cluster_leadership_addresses = "http://127.0.0.1:2379"
## data dir for some cluster data
data_dir = ""
@@ -31,3 +31,6 @@ balance_interval = ["4", "5"]
## learner role is used for learner placement, currently only role_log_syncer supported
## learner role will never became master and never balance data node which is not learner.
#learner_role = "role_log_syncer"
+
+## balance strategy version, use v2 to reduce the data migration
+balance_ver="v2"
diff --git a/pdserver/server.go b/pdserver/server.go
index 066c5a91..9f09ffff 100644
--- a/pdserver/server.go
+++ b/pdserver/server.go
@@ -1,18 +1,21 @@
package pdserver
import (
+ "errors"
+ "fmt"
"net"
"net/http"
"os"
"strconv"
"sync"
- "github.com/absolute8511/ZanRedisDB/cluster"
- "github.com/absolute8511/ZanRedisDB/cluster/pdnode_coord"
- "github.com/absolute8511/ZanRedisDB/common"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+ "github.com/youzan/ZanRedisDB/cluster"
+ "github.com/youzan/ZanRedisDB/cluster/pdnode_coord"
+ "github.com/youzan/ZanRedisDB/common"
)
-var sLog = common.NewLevelLogger(common.LOG_INFO, common.NewDefaultLogger("pdserver"))
+var sLog = common.NewLevelLogger(common.LOG_INFO, common.NewLogger())
func SetLogger(level int32, logger common.Logger) {
sLog.SetLevel(level)
@@ -33,10 +36,11 @@ type Server struct {
tombstonePDNodes map[string]bool
}
-func NewServer(conf *ServerConfig) *Server {
+func NewServer(conf *ServerConfig) (*Server, error) {
hname, err := os.Hostname()
if err != nil {
- sLog.Fatal(err)
+ sLog.Error(err)
+ return nil, err
}
myNode := &cluster.NodeInfo{
@@ -47,7 +51,8 @@ func NewServer(conf *ServerConfig) *Server {
}
if conf.ClusterID == "" {
- sLog.Fatalf("cluster id can not be empty")
+ sLog.Errorf("cluster id can not be empty")
+ return nil, errors.New("empty cluster id")
}
if conf.BroadcastInterface != "" {
myNode.NodeIP = common.GetIPv4ForInterfaceName(conf.BroadcastInterface)
@@ -58,8 +63,9 @@ func NewServer(conf *ServerConfig) *Server {
conf.BroadcastAddr = myNode.NodeIP
}
if myNode.NodeIP == "0.0.0.0" || myNode.NodeIP == "" {
- sLog.Errorf("can not decide the broadcast ip: %v", myNode.NodeIP)
- os.Exit(1)
+ err := fmt.Errorf("can not decide the broadcast ip: %v , %v", myNode.NodeIP, conf.BroadcastInterface)
+ sLog.Errorf(err.Error())
+ return nil, err
}
_, myNode.HttpPort, _ = net.SplitHostPort(conf.HTTPAddress)
if conf.ReverseProxyPort != "" {
@@ -71,16 +77,18 @@ func NewServer(conf *ServerConfig) *Server {
clusterOpts := &cluster.Options{}
clusterOpts.DataDir = conf.DataDir
clusterOpts.AutoBalanceAndMigrate = conf.AutoBalanceAndMigrate
+ clusterOpts.FilterNamespaces = conf.FilterNamespaces
+ clusterOpts.BalanceVer = conf.BalanceVer
if len(conf.BalanceInterval) == 2 {
clusterOpts.BalanceStart, err = strconv.Atoi(conf.BalanceInterval[0])
if err != nil {
sLog.Errorf("invalid balance interval: %v", err)
- os.Exit(1)
+ return nil, err
}
clusterOpts.BalanceEnd, err = strconv.Atoi(conf.BalanceInterval[1])
if err != nil {
sLog.Errorf("invalid balance interval: %v", err)
- os.Exit(1)
+ return nil, err
}
}
s := &Server{
@@ -90,10 +98,23 @@ func NewServer(conf *ServerConfig) *Server {
tombstonePDNodes: make(map[string]bool),
}
- r := cluster.NewPDEtcdRegister(conf.ClusterLeadershipAddresses)
+ r, err := cluster.NewPDEtcdRegister(conf.ClusterLeadershipAddresses)
+ if err != nil {
+ sLog.Errorf("failed to init register: %v", err)
+ return nil, err
+ }
s.pdCoord.SetRegister(r)
- return s
+ metricAddr := conf.MetricAddress
+ if metricAddr == "" {
+ metricAddr = ":8800"
+ }
+ go func() {
+ mux := http.NewServeMux()
+ mux.Handle("/metrics", promhttp.Handler())
+ http.ListenAndServe(metricAddr, mux)
+ }()
+ return s, nil
}
func (s *Server) Stop() {
diff --git a/pdserver/server_test.go b/pdserver/server_test.go
index 9bcfeb9e..9938514e 100644
--- a/pdserver/server_test.go
+++ b/pdserver/server_test.go
@@ -1,25 +1,38 @@
package pdserver
import (
+ "flag"
"fmt"
+ "math"
"net/http"
"os"
+ "path"
+ "path/filepath"
+ "sort"
"strconv"
"strings"
"testing"
"time"
- "github.com/absolute8511/ZanRedisDB/cluster/datanode_coord"
+ "github.com/siddontang/goredis"
+ "github.com/youzan/ZanRedisDB/cluster"
+ "github.com/youzan/ZanRedisDB/cluster/datanode_coord"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/internal/test"
+ zanredisdb "github.com/youzan/go-zanredisdb"
- "github.com/absolute8511/ZanRedisDB/cluster/pdnode_coord"
- "github.com/absolute8511/ZanRedisDB/node"
- ds "github.com/absolute8511/ZanRedisDB/server"
"github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/cluster/pdnode_coord"
+ "github.com/youzan/ZanRedisDB/node"
)
func TestMain(m *testing.M) {
pdnode_coord.ChangeIntervalForTest()
datanode_coord.ChangeIntervalForTest()
+ flag.Parse()
+ if testing.Verbose() {
+ cluster.SetLogLevel(int(common.LOG_DEBUG))
+ }
ret := m.Run()
@@ -27,6 +40,20 @@ func TestMain(m *testing.M) {
os.Exit(ret)
}
+func enableStaleRead(t *testing.T, addr string, enable bool) {
+ uri := fmt.Sprintf("%s/staleread?allow=true", addr)
+ if !enable {
+ uri = fmt.Sprintf("%s/staleread?allow=false", addr)
+ }
+ rsp, err := http.Post(uri, "", nil)
+ assert.Nil(t, err)
+ if rsp.StatusCode != 200 {
+ assert.FailNow(t, rsp.Status)
+ }
+ assert.Equal(t, 200, rsp.StatusCode)
+ rsp.Body.Close()
+}
+
func enableAutoBalance(t *testing.T, pduri string, enable bool) {
uri := fmt.Sprintf("%s/cluster/balance?enable=true", pduri)
if !enable {
@@ -42,32 +69,36 @@ func enableAutoBalance(t *testing.T, pduri string, enable bool) {
//gpdServer.pdCoord.SetBalanceInterval(0, 24)
}
-func waitForLeader(t *testing.T, ns string, part int) (*ds.Server, *node.NamespaceNode) {
+func waitForLeaderFromNodes(t *testing.T, ns string, part int, nodeList []dataNodeWrapper) (dataNodeWrapper, *node.NamespaceNode) {
start := time.Now()
for {
- if time.Since(start) > time.Minute {
+ if time.Since(start) > time.Minute*3 {
t.Errorf("timeout while waiting leader")
break
}
- for _, kv := range gkvList {
+ for _, kv := range nodeList {
nsNode := kv.s.GetNamespaceFromFullName(ns + "-" + strconv.Itoa(part))
if nsNode == nil {
continue
}
assert.NotNil(t, nsNode)
if nsNode.Node.IsLead() {
- return kv.s, nsNode
+ return kv, nsNode
}
}
time.Sleep(time.Millisecond * 100)
}
- return nil, nil
+ return dataNodeWrapper{}, nil
+}
+
+func waitForLeader(t *testing.T, ns string, part int) (dataNodeWrapper, *node.NamespaceNode) {
+ return waitForLeaderFromNodes(t, ns, part, gkvList)
}
func waitForAllFullReady(t *testing.T, ns string, part int) {
start := time.Now()
for {
- if time.Since(start) > time.Minute {
+ if time.Since(start) > time.Minute*3 {
t.Errorf("timeout while waiting full ready")
break
}
@@ -90,7 +121,7 @@ func waitForAllFullReady(t *testing.T, ns string, part int) {
func waitMarkAsRemoving(t *testing.T, ns string, part int, leaderID string) {
start := time.Now()
for {
- if time.Since(start) > time.Minute {
+ if time.Since(start) > time.Minute*3 {
t.Errorf("timeout while waiting mark removing")
break
}
@@ -109,10 +140,34 @@ func waitMarkAsRemoving(t *testing.T, ns string, part int, leaderID string) {
}
}
+func waitMarkAsRemovingUntilTimeout(t *testing.T, ns string, part int, until time.Duration) []string {
+ start := time.Now()
+ removed := make([]string, 0)
+ for {
+ if time.Since(start) > until {
+ break
+ }
+ allInfo, _, err := gpdServer.pdCoord.GetAllNamespaces()
+ assert.Nil(t, err)
+ nsInfo, ok := allInfo[ns]
+ assert.True(t, ok)
+ nsPartInfo, ok := nsInfo[part]
+ assert.True(t, ok)
+ if len(nsPartInfo.Removings) > 0 {
+ for nid, _ := range nsPartInfo.Removings {
+ removed = append(removed, nid)
+ }
+ break
+ }
+ time.Sleep(time.Millisecond * 100)
+ }
+ return removed
+}
+
func waitRemoveFromRemoving(t *testing.T, ns string, part int) {
start := time.Now()
for {
- if time.Since(start) > time.Minute {
+ if time.Since(start) > time.Minute*3 {
t.Errorf("timeout while waiting remove removing node")
break
}
@@ -133,7 +188,7 @@ func waitRemoveFromRemoving(t *testing.T, ns string, part int) {
func waitEnoughReplica(t *testing.T, ns string, part int) {
start := time.Now()
for {
- if time.Since(start) > time.Minute {
+ if time.Since(start) > time.Minute*3 {
t.Errorf("timeout while waiting enough replicas")
break
}
@@ -151,10 +206,32 @@ func waitEnoughReplica(t *testing.T, ns string, part int) {
}
}
+func getNsInfo(t *testing.T, ns string, part int) cluster.PartitionMetaInfo {
+ allInfo, _, err := gpdServer.pdCoord.GetAllNamespaces()
+ assert.Nil(t, err)
+ nsInfo, ok := allInfo[ns]
+ assert.True(t, ok)
+ nsPartInfo, ok := nsInfo[part]
+ assert.True(t, ok)
+ return nsPartInfo
+}
+
+func getCurrentPartitionNodes(t *testing.T, ns string) [][]string {
+ allInfo, _, err := gpdServer.pdCoord.GetAllNamespaces()
+ assert.Nil(t, err)
+ nsInfo, ok := allInfo[ns]
+ assert.True(t, ok)
+ partNodes := make([][]string, len(nsInfo))
+ for pid, partInfo := range nsInfo {
+ partNodes[pid] = partInfo.GetISR()
+ }
+ return partNodes
+}
+
func waitBalancedLeader(t *testing.T, ns string, part int) {
start := time.Now()
for {
- if time.Since(start) > time.Minute {
+ if time.Since(start) > time.Minute*3 {
t.Errorf("timeout while waiting balanced leader become real leader")
break
}
@@ -175,7 +252,7 @@ func waitBalancedLeader(t *testing.T, ns string, part int) {
func waitBalancedAndExpectedLeader(t *testing.T, ns string, part int, expected string) {
start := time.Now()
for {
- if time.Since(start) > time.Minute {
+ if time.Since(start) > time.Minute*3 {
t.Errorf("timeout while waiting expected leader become real leader ")
break
}
@@ -196,7 +273,7 @@ func waitBalancedAndExpectedLeader(t *testing.T, ns string, part int, expected s
func waitBalancedAndJoined(t *testing.T, ns string, part int, expected string) {
start := time.Now()
for {
- if time.Since(start) > time.Minute*2 {
+ if time.Since(start) > time.Minute {
t.Errorf("timeout while waiting expected node become isr")
break
}
@@ -222,7 +299,7 @@ func waitBalancedAndJoined(t *testing.T, ns string, part int, expected string) {
}
}
-func getFollowerNode(t *testing.T, ns string, part int) (*ds.Server, *node.NamespaceNode) {
+func getFollowerNode(t *testing.T, ns string, part int) (dataNodeWrapper, *node.NamespaceNode) {
for _, kv := range gkvList {
nsNode := kv.s.GetNamespaceFromFullName(ns + "-" + strconv.Itoa(part))
if nsNode == nil {
@@ -232,22 +309,166 @@ func getFollowerNode(t *testing.T, ns string, part int) (*ds.Server, *node.Names
if nsNode.Node.IsLead() {
continue
}
- return kv.s, nsNode
+ return kv, nsNode
}
- return nil, nil
+ return dataNodeWrapper{}, nil
}
-func TestClusterBalanceAcrossMultiDC(t *testing.T) {
- // TODO:
+func getTestRedisConn(t *testing.T, port int) *goredis.PoolConn {
+ c := goredis.NewClient("127.0.0.1:"+strconv.Itoa(port), "")
+ c.SetMaxIdleConns(4)
+ conn, err := c.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ return conn
+}
+
+func checkPartitionNodesBalance(t *testing.T, balanceVer string, partitionNodes [][]string) bool {
+ replicaNodesMap := make(map[string]int)
+ leaderNodesMap := make(map[string]int)
+ for _, nlist := range partitionNodes {
+ l := nlist[0]
+ cnt, ok := leaderNodesMap[l]
+ if !ok {
+ cnt = 0
+ }
+ cnt++
+ leaderNodesMap[l] = cnt
+ nameMap := make(map[string]bool)
+ for _, n := range nlist {
+ nameMap[n] = true
+ cnt, ok = replicaNodesMap[n]
+ if !ok {
+ cnt = 0
+ }
+ cnt++
+ replicaNodesMap[n] = cnt
+ }
+ assert.Equal(t, len(nlist), len(nameMap), nlist)
+ }
+
+ maxL := 0
+ minL := math.MaxInt32
+ for _, cnt := range leaderNodesMap {
+ if cnt > maxL {
+ maxL = cnt
+ }
+ if cnt < minL {
+ minL = cnt
+ }
+ }
+ t.Logf("leader max vs min: %v, %v", maxL, minL)
+ balanced := true
+ if maxL-minL <= 1 {
+ assert.True(t, maxL-minL <= 1, partitionNodes)
+ } else {
+ balanced = false
+ }
+
+ maxL = 0
+ minL = math.MaxInt32
+ for _, cnt := range replicaNodesMap {
+ if cnt > maxL {
+ maxL = cnt
+ }
+ if cnt < minL {
+ minL = cnt
+ }
+ }
+ t.Logf("replica max vs min: %v, %v", maxL, minL)
+ if balanceVer == "" {
+ // default balance may not have balanced replicas
+ if maxL-minL <= 3 {
+ assert.True(t, maxL-minL <= 3, partitionNodes)
+ } else {
+ balanced = false
+ }
+ return balanced
+ }
+ if maxL-minL <= 1 {
+ assert.True(t, maxL-minL <= 1, partitionNodes)
+ } else {
+ balanced = false
+ }
+ return balanced
}
-func TestClusterRemoveNode(t *testing.T) {
- // TODO: remove a node from api
+func TestRWMultiPartOnDifferentNodes(t *testing.T) {
+ ensureClusterReady(t, 4)
+
+ time.Sleep(time.Second)
+ ns := "test_multi_part_rw"
+ partNum := 4
+
+ pduri := "http://127.0.0.1:" + pdHttpPort
+
+ ensureDataNodesReady(t, pduri, len(gkvList))
+ enableAutoBalance(t, pduri, true)
+ ensureNamespace(t, pduri, ns, partNum, 2)
+ defer ensureDeleteNamespace(t, pduri, ns)
+
+ for i := 0; i < partNum; i++ {
+ leader, _ := waitForLeader(t, ns, i)
+ assert.NotNil(t, leader.s)
+ }
+ time.Sleep(time.Second)
+ // test should write to different parts
+ table := "test_set_kv_multi"
+ zanClient := getTestClient(t, ns)
+ defer zanClient.Stop()
+ for i := 0; i < 20; i++ {
+ k := []byte(fmt.Sprintf("kv%d", i))
+ err := zanClient.KVSet(table, k, k)
+ assert.Nil(t, err)
+ v, err := zanClient.KVGet(table, k)
+ assert.Nil(t, err)
+ assert.Equal(t, k, v)
+ }
+ for i := 0; i < partNum; i++ {
+ leader, nsNode := waitForLeader(t, ns, i)
+ assert.NotNil(t, leader)
+ stats := nsNode.Node.GetStats("", true)
+ for _, st := range stats.TStats {
+ assert.Equal(t, table, st.Name)
+ t.Log(st)
+ assert.True(t, st.KeyNum > 3)
+ }
+ }
+ // test write to server with no part and not leader part should return error
+ for i := 0; i < 20; i++ {
+ k := []byte(fmt.Sprintf("%v:kv%d", table, i))
+ pid := zanredisdb.GetHashedPartitionID(k, partNum)
+ leader, _ := waitForLeader(t, ns, pid)
+ t.Logf("pk %v hash to pid: %v, leader is %v", string(k), pid, leader.redisPort)
+ for _, srv := range gkvList {
+ if srv.redisPort == leader.redisPort {
+ continue
+ }
+ conn := getTestRedisConn(t, srv.redisPort)
+ assert.NotNil(t, conn)
+ _, err := goredis.String(conn.Do("get", ns+":"+string(k)))
+ // get should not send to non-leader
+ t.Log(err)
+ t.Logf("pk %v send to node: %v", string(k), srv.redisPort)
+ assert.NotNil(t, err)
+ nsNode := srv.s.GetNamespaceFromFullName(ns + "-" + strconv.Itoa(pid))
+ if nsNode == nil {
+ _, err := conn.Do("set", ns+":"+string(k), []byte(k))
+ t.Log(err)
+ assert.NotNil(t, err)
+ } else {
+ // set can be handled by non-leader
+ _, err := conn.Do("set", ns+":"+string(k), []byte(k))
+ assert.Nil(t, err)
+ }
+ }
+ }
}
func TestLeaderLost(t *testing.T) {
// leader is lost and mark leader as removing
- ensureClusterReady(t)
+ ensureClusterReady(t, 4)
time.Sleep(time.Second)
ns := "test_leader_lost"
@@ -260,7 +481,8 @@ func TestLeaderLost(t *testing.T) {
ensureNamespace(t, pduri, ns, partNum, 3)
defer ensureDeleteNamespace(t, pduri, ns)
- leader, nsNode := waitForLeader(t, ns, 0)
+ nodeWrapper, nsNode := waitForLeader(t, ns, 0)
+ leader := nodeWrapper.s
assert.NotNil(t, leader)
dcoord := leader.GetCoord()
leaderID := dcoord.GetMyID()
@@ -278,7 +500,8 @@ func TestLeaderLost(t *testing.T) {
waitForAllFullReady(t, ns, 0)
// wait balance
waitBalancedLeader(t, ns, 0)
- newLeader, _ := waitForLeader(t, ns, 0)
+ nodeWrapper, _ = waitForLeader(t, ns, 0)
+ newLeader := nodeWrapper.s
assert.NotNil(t, newLeader)
newLeaderID := newLeader.GetCoord().GetMyID()
assert.NotEqual(t, leaderID, newLeaderID)
@@ -292,20 +515,30 @@ func TestLeaderLost(t *testing.T) {
assert.False(t, nsNode.Node.IsLead())
}
- waitBalancedAndExpectedLeader(t, ns, 0, leaderID)
-
- // should keep leader
- nsNode = leader.GetNamespaceFromFullName(ns + "-0")
-
- assert.NotNil(t, nsNode)
- assert.True(t, nsNode.Node.IsLead())
- assert.NotEqual(t, oldRaftReplicaID, nsNode.Node.GetLocalMemberInfo().ID)
+ if balanceVer == "v2" {
+ time.Sleep(time.Second * 10)
+ waitBalancedLeader(t, ns, 0)
+ nodeWrapper, _ := waitForLeader(t, ns, 0)
+ nleader := nodeWrapper.s
+ assert.NotNil(t, nleader)
+ if nleader.GetCoord().GetMyID() == leaderID {
+ nsNode := nleader.GetNamespaceFromFullName(ns + "-0")
+ assert.NotNil(t, nsNode)
+ assert.NotEqual(t, oldRaftReplicaID, nsNode.Node.GetLocalMemberInfo().ID)
+ }
+ } else {
+ waitBalancedAndExpectedLeader(t, ns, 0, leaderID)
+ // should keep leader
+ nsNode = leader.GetNamespaceFromFullName(ns + "-0")
+ assert.NotNil(t, nsNode)
+ assert.True(t, nsNode.Node.IsLead())
+ assert.NotEqual(t, oldRaftReplicaID, nsNode.Node.GetLocalMemberInfo().ID)
+ }
}
func TestFollowerLost(t *testing.T) {
// test follower lost should keep old leader
- ensureClusterReady(t)
-
+ ensureClusterReady(t, 4)
time.Sleep(time.Second)
ns := "test_follower_lost"
partNum := 1
@@ -316,16 +549,18 @@ func TestFollowerLost(t *testing.T) {
enableAutoBalance(t, pduri, true)
ensureNamespace(t, pduri, ns, partNum, 3)
defer ensureDeleteNamespace(t, pduri, ns)
-
- leader, nsNode := waitForLeader(t, ns, 0)
+ dnw, nsNode := waitForLeader(t, ns, 0)
+ leader := dnw.s
assert.NotNil(t, leader)
// call this to propose some request to write raft logs
for i := 0; i < 50; i++ {
nsNode.Node.OptimizeDB("")
}
- follower, followerNode := getFollowerNode(t, ns, 0)
+ followerWrap, followerNode := getFollowerNode(t, ns, 0)
+ follower := followerWrap.s
oldFollowerReplicaID := followerNode.GetRaftID()
followerID := follower.GetCoord().GetMyID()
+ t.Logf("stopping %v, old replica id: %v", followerID, oldFollowerReplicaID)
follower.Stop()
// should keep leader
@@ -340,21 +575,97 @@ func TestFollowerLost(t *testing.T) {
waitForAllFullReady(t, ns, 0)
+ // stop for a while and wait the data migrate to others
+ // and then start this node to join the cluster and wait
+ // data migrate back to this node
+
// restart old follower and wait balance
// the follower should be balanced to join with different replica id
time.Sleep(time.Second * 5)
+ allInfo, _, err := gpdServer.pdCoord.GetAllNamespaces()
+ assert.Nil(t, err)
+ t.Logf("all namespace: %v", allInfo)
follower.Start()
- waitBalancedAndJoined(t, ns, 0, followerID)
+ time.Sleep(time.Second * 5)
+ waitForAllFullReady(t, ns, 0)
+ if balanceVer == "v2" {
+ // it may not move back to the origin node for v2 balance
+ time.Sleep(time.Second * 10)
+ } else {
+ waitBalancedAndJoined(t, ns, 0, followerID)
+ }
// should have different replica id
followerNode = follower.GetNamespaceFromFullName(ns + "-0")
+ if balanceVer == "v2" && followerNode == nil {
+ return
+ }
assert.NotEqual(t, followerNode.GetRaftID(), oldFollowerReplicaID)
}
+func waitRemoteClusterSync(t *testing.T, ns string, leaderNode *node.NamespaceNode, learnerSrvs []dataNodeWrapper, remoteSrvs []dataNodeWrapper) {
+ start := time.Now()
+ for {
+ time.Sleep(time.Second)
+ if time.Since(start) > time.Minute {
+ t.Errorf("timeout waiting add learner")
+ return
+ }
+ commitID := leaderNode.Node.GetAppliedIndex()
+ done := 0
+ for _, srv := range learnerSrvs {
+ nsNode := srv.s.GetNamespaceFromFullName(ns + "-0")
+ if nsNode != nil {
+ lrns := nsNode.GetLearners()
+ t.Logf("current learners: %v", lrns)
+ if len(lrns) == len(learnerSrvs) {
+ found := false
+ for _, l := range lrns {
+ t.Log(*l)
+ if l.NodeID == srv.s.GetCoord().GetMyRegID() {
+ found = true
+ assert.Equal(t, nsNode.GetRaftID(), l.ID)
+ }
+ }
+ assert.True(t, found, "should found myself in learners")
+ checkAllOK := true
+ for _, remote := range remoteSrvs {
+ remoteNode := remote.s.GetNamespaceFromFullName(ns + "-0")
+ assert.NotNil(t, remoteNode)
+ _, remoteIndex, _ := remoteNode.Node.GetRemoteClusterSyncedRaft(TestClusterName)
+ learnerCI := nsNode.Node.GetAppliedIndex()
+ t.Logf("commit %v , current remote :%v, learner: %v", commitID, remoteIndex, learnerCI)
+ if remoteIndex >= commitID && learnerCI == remoteIndex {
+ time.Sleep(time.Second)
+ } else {
+ checkAllOK = false
+ }
+ }
+ if checkAllOK {
+ done++
+ }
+ } else {
+ break
+ }
+ }
+ }
+ if done >= len(learnerSrvs) {
+ break
+ }
+ }
+ commitID := leaderNode.Node.GetAppliedIndex()
+ for _, srv := range learnerSrvs {
+ nsNode := srv.s.GetNamespaceFromFullName(ns + "-0")
+ assert.Equal(t, commitID, nsNode.Node.GetAppliedIndex())
+ stats := nsNode.Node.GetStats("", false)
+ assert.Equal(t, commitID, stats.InternalStats["synced_index"].(uint64))
+ }
+}
+
func TestAddRemoteClusterLogSyncLearner(t *testing.T) {
node.EnableForTest()
- ensureClusterReady(t)
+ ensureClusterReady(t, 4)
time.Sleep(time.Second)
ns := "test_add_learner"
@@ -366,7 +677,8 @@ func TestAddRemoteClusterLogSyncLearner(t *testing.T) {
ensureNamespace(t, pduri, ns, partNum, 3)
defer ensureDeleteNamespace(t, pduri, ns)
- leader, leaderNode := waitForLeader(t, ns, 0)
+ dnw, leaderNode := waitForLeader(t, ns, 0)
+ leader := dnw.s
assert.NotNil(t, leader)
remotePD, remoteSrvs, remoteTmpDir := startRemoteSyncTestCluster(t, 2)
@@ -401,108 +713,2024 @@ func TestAddRemoteClusterLogSyncLearner(t *testing.T) {
os.RemoveAll(tmpDir)
}
}()
- start := time.Now()
- remoteNode := remoteSrvs[0].s.GetNamespaceFromFullName(ns + "-0")
- assert.NotNil(t, remoteNode)
time.Sleep(time.Second * 3)
- for {
- time.Sleep(time.Second)
- if time.Since(start) > time.Minute {
- t.Errorf("timeout waiting add learner")
- break
- }
- commitID := leaderNode.Node.GetCommittedIndex()
- done := 0
- for _, srv := range learnerSrvs {
- nsNode := srv.s.GetNamespaceFromFullName(ns + "-0")
- if nsNode != nil {
- lrns := nsNode.GetLearners()
- t.Log(lrns)
- if len(lrns) == len(learnerSrvs) {
- found := false
- for _, l := range lrns {
- t.Log(*l)
- if l.NodeID == srv.s.GetCoord().GetMyRegID() {
- found = true
- assert.Equal(t, nsNode.GetRaftID(), l.ID)
- }
- }
- assert.True(t, found, "should found myself in learners")
- _, remoteIndex, _ := remoteNode.Node.GetRemoteClusterSyncedRaft(TestClusterName)
- learnerCI := nsNode.Node.GetCommittedIndex()
- t.Logf("commit %v , current remote :%v, learner: %v", commitID, remoteIndex, learnerCI)
- if remoteIndex >= commitID && learnerCI == remoteIndex {
- time.Sleep(time.Second)
- done++
- }
- } else {
- break
- }
- }
- }
- if done >= len(learnerSrvs) {
- break
- }
- }
- commitID := leaderNode.Node.GetCommittedIndex()
- for _, srv := range learnerSrvs {
- nsNode := srv.s.GetNamespaceFromFullName(ns + "-0")
- assert.Equal(t, commitID, nsNode.Node.GetCommittedIndex())
- stats := nsNode.Node.GetStats()
- assert.Equal(t, commitID, stats.InternalStats["synced_index"].(uint64))
- }
-}
-func TestMigrateLeader(t *testing.T) {
- // add new node and mark leader as removing.
- // leader should transfer leader first and then propose remove self
+ waitRemoteClusterSync(t, ns, leaderNode, learnerSrvs, remoteSrvs)
}
-func TestMigrateFollower(t *testing.T) {
- // add new node and mark follower as removing.
- // removing node should propose remove self
-}
+func TestRemoteClusterLearnerContinueAfterSrcRestart(t *testing.T) {
+ // test restart and leader changed will continue syncer
+ node.EnableForTest()
+ ensureClusterReady(t, 3)
-func TestTransferLeaderWhileReplicaNotReady(t *testing.T) {
- // TODO: test transfer leader while replica is restarting and not catchup fully.
- // should only transfer leader when replica has almost the newest raft logs
-}
+ time.Sleep(time.Second)
+ ns := "test_learner_continue_after_restart"
+ partNum := 1
-func TestMarkAsRemovingWhileNotEnoughAlives(t *testing.T) {
- // TODO:
- // should not mark as remove while there is not enough for replica
-}
+ pduri := "http://127.0.0.1:" + pdHttpPort
+ ensureDataNodesReady(t, pduri, len(gkvList))
+ enableAutoBalance(t, pduri, true)
+ ensureNamespace(t, pduri, ns, partNum, 3)
+ defer ensureDeleteNamespace(t, pduri, ns)
-func TestRestartWithMigrate(t *testing.T) {
- // TODO:
- // stop for a while and wait the data migrate to others
- // and then start this node to join the cluster and wait
- // data migrate back to this node
-}
+ dnw, leaderNode := waitForLeader(t, ns, 0)
+ leader := dnw.s
+ assert.NotNil(t, leader)
-func TestRestartCluster(t *testing.T) {
- // TODO:
- // stop all nodes in cluster and start one by one
-}
+ node.SetSyncerOnly(true)
+ defer node.SetSyncerOnly(false)
+ remotePD, remoteSrvs, remoteTmpDir := startRemoteSyncTestCluster(t, 1)
+ defer func() {
+ for _, kv := range remoteSrvs {
+ kv.s.Stop()
+ }
+ if remotePD != nil {
+ remotePD.Stop()
+ }
+ if strings.Contains(remoteTmpDir, "rocksdb-test") {
+ t.Logf("removing: %v", remoteTmpDir)
+ os.RemoveAll(remoteTmpDir)
+ }
+ }()
+ pduri = "http://127.0.0.1:" + pdRemoteHttpPort
+ for _, lrnSrv := range remoteSrvs {
+ lrnSrv.s.GetCoord().UpdateSyncerWriteOnly(true)
+ }
+ ensureDataNodesReady(t, pduri, len(remoteSrvs))
+ enableAutoBalance(t, pduri, true)
+ ensureNamespace(t, pduri, ns, partNum, 1)
+ defer ensureDeleteNamespace(t, pduri, ns)
-func TestRestartWithForceAlone(t *testing.T) {
- // TODO: test force restart with alone
-}
+ leaderNode.Node.OptimizeDB("")
-func TestInstallSnapshotFailed(t *testing.T) {
- // TODO: test the follower fall behind too much, and the leader send the snapshot to follower,
- // However, the follower failed to pull the snapshot data from leader. So the raft node should stop
- // and restart later.
+ learnerPD, learnerSrvs, tmpDir := startTestClusterForLearner(t, 1)
+ defer func() {
+ for _, kv := range learnerSrvs {
+ kv.s.Stop()
+ }
+ if learnerPD != nil {
+ learnerPD.Stop()
+ }
+ if strings.Contains(tmpDir, "learner-test") {
+ t.Logf("removing: %v", tmpDir)
+ os.RemoveAll(tmpDir)
+ }
+ }()
+ time.Sleep(time.Second * 3)
- // test case should make sure the snap will be not persisted to the stable storage since the snapshot data is failed to pull.
-}
+ leaderNode.Node.OptimizeDB("")
-func TestClusterBalanceWhileNewNodeAdding(t *testing.T) {
- // TODO: while replica is not enough, we will add new replica node while check namespace,
- // and then it should wait the new replica is raft synced before we can start to balance
-}
+ t.Logf("begin wait first before restart")
+ waitRemoteClusterSync(t, ns, leaderNode, learnerSrvs, remoteSrvs)
-func TestClusterAddReplicaOneByOne(t *testing.T) {
- // TODO: while replica is not enough, we will add new replica node while check namespace.
- // If two replica are removed, we need add new replica one by one to avoid 2 failed node in raft.
+ // restart leader
+ leader.Stop()
+ time.Sleep(time.Second)
+
+ leader.Start()
+ // start will reset syncer write only state from remote, so we need set it again to allow learner node running
+ node.SetSyncerOnly(true)
+ waitEnoughReplica(t, ns, 0)
+ waitForAllFullReady(t, ns, 0)
+
+ dnw, leaderNode = waitForLeader(t, ns, 0)
+ leader = dnw.s
+ assert.NotNil(t, leader)
+ leaderNode.Node.OptimizeDB("")
+ time.Sleep(time.Second * 3)
+
+ t.Logf("begin wait after source restart")
+ waitRemoteClusterSync(t, ns, leaderNode, learnerSrvs, remoteSrvs)
+}
+
+func TestRemoteClusterLearnerRestartAndRestoreBackup(t *testing.T) {
+ // restart, backup and restore from backup should keep remote cluster term-index
+ node.EnableForTest()
+ ensureClusterReady(t, 3)
+
+ time.Sleep(time.Second)
+ ns := "test_restart_learner"
+ partNum := 1
+
+ pduri := "http://127.0.0.1:" + pdHttpPort
+ ensureDataNodesReady(t, pduri, len(gkvList))
+ enableAutoBalance(t, pduri, true)
+ ensureNamespace(t, pduri, ns, partNum, 3)
+ defer ensureDeleteNamespace(t, pduri, ns)
+
+ dnw, leaderNode := waitForLeader(t, ns, 0)
+ leader := dnw.s
+ assert.NotNil(t, leader)
+
+ leaderNode.Node.OptimizeDB("")
+ c := getTestRedisConn(t, gkvList[0].redisPort)
+ defer c.Close()
+ key := fmt.Sprintf("%s:%s", ns, "test_remote_syncer:k1")
+ rsp, err := goredis.String(c.Do("set", key, key))
+ assert.Nil(t, err)
+ assert.Equal(t, "OK", rsp)
+ key2 := fmt.Sprintf("%s:%s", ns, "test_remote_syncer:k2")
+ rsp, err = goredis.String(c.Do("set", key2, key2))
+ assert.Nil(t, err)
+ assert.Equal(t, "OK", rsp)
+
+ remotePD, remoteSrvs, remoteTmpDir := startRemoteSyncTestCluster(t, 2)
+ defer func() {
+ for _, kv := range remoteSrvs {
+ kv.s.Stop()
+ }
+ if remotePD != nil {
+ remotePD.Stop()
+ }
+ if strings.Contains(remoteTmpDir, "rocksdb-test") {
+ t.Logf("removing: %v", remoteTmpDir)
+ os.RemoveAll(remoteTmpDir)
+ }
+ }()
+ pduri = "http://127.0.0.1:" + pdRemoteHttpPort
+ ensureDataNodesReady(t, pduri, len(remoteSrvs))
+ enableAutoBalance(t, pduri, true)
+ ensureNamespace(t, pduri, ns, partNum, 2)
+ defer ensureDeleteNamespace(t, pduri, ns)
+
+ leaderNode.Node.OptimizeDB("")
+
+ learnerPD, learnerSrvs, tmpDir := startTestClusterForLearner(t, 1)
+ defer func() {
+ for _, kv := range learnerSrvs {
+ kv.s.Stop()
+ }
+ if learnerPD != nil {
+ learnerPD.Stop()
+ }
+ if strings.Contains(tmpDir, "learner-test") {
+ t.Logf("removing: %v", tmpDir)
+ os.RemoveAll(tmpDir)
+ }
+ }()
+
+ leaderNode.Node.OptimizeDB("")
+ time.Sleep(time.Second * 3)
+
+ waitRemoteClusterSync(t, ns, leaderNode, learnerSrvs, remoteSrvs)
+
+ // restart remote cluster node one by one
+ for _, kv := range remoteSrvs {
+ kv.s.Stop()
+ time.Sleep(time.Second)
+ kv.s.Start()
+ time.Sleep(time.Second)
+ }
+
+ leaderNode.Node.OptimizeDB("")
+ waitRemoteClusterSync(t, ns, leaderNode, learnerSrvs, remoteSrvs)
+
+ addr := fmt.Sprintf("http://127.0.0.1:%v", remoteSrvs[0].httpPort)
+ enableStaleRead(t, addr, true)
+ defer enableStaleRead(t, addr, false)
+ remoteC := getTestRedisConn(t, remoteSrvs[0].redisPort)
+ defer remoteC.Close()
+ rsp, err = goredis.String(remoteC.Do("get", key))
+ assert.Nil(t, err)
+ assert.Equal(t, key, rsp)
+ rsp, err = goredis.String(remoteC.Do("get", key2))
+ assert.Nil(t, err)
+ assert.Equal(t, key2, rsp)
+}
+
+func TestRemoteClusterLearnerNotIgnoreDeleteRangeAsConfig(t *testing.T) {
+ testRemoteClusterLearnerIgnoreDeleteRangeAsConfig(t, false)
+}
+func TestRemoteClusterLearnerIgnoreDeleteRangeAsConfig(t *testing.T) {
+ testRemoteClusterLearnerIgnoreDeleteRangeAsConfig(t, true)
+}
+func testRemoteClusterLearnerIgnoreDeleteRangeAsConfig(t *testing.T, ignoreDelRange bool) {
+ // restart, backup and restore from backup should keep remote cluster term-index
+ node.EnableForTest()
+ ensureClusterReady(t, 3)
+
+ time.Sleep(time.Second)
+ ns := "test_remote_cluster_syncer_delrange"
+ partNum := 1
+
+ pduri := "http://127.0.0.1:" + pdHttpPort
+ ensureDataNodesReady(t, pduri, len(gkvList))
+ enableAutoBalance(t, pduri, true)
+ ensureNamespace(t, pduri, ns, partNum, 3)
+ defer ensureDeleteNamespace(t, pduri, ns)
+
+ dnw, leaderNode := waitForLeader(t, ns, 0)
+ leader := dnw.s
+ assert.NotNil(t, leader)
+
+ leaderNode.Node.OptimizeDB("")
+
+ // we set value before the remote syncer started, since the node syncer only is global var which will be changed if another is running as syncer
+ c := getTestRedisConn(t, gkvList[0].redisPort)
+ defer c.Close()
+ key := fmt.Sprintf("%s:%s", ns, "test_remote_syncer:k1")
+ rsp, err := goredis.String(c.Do("set", key, key))
+ assert.Nil(t, err)
+ assert.Equal(t, "OK", rsp)
+ key2 := fmt.Sprintf("%s:%s", ns, "test_remote_syncer:k2")
+ rsp, err = goredis.String(c.Do("set", key2, key2))
+ assert.Nil(t, err)
+ assert.Equal(t, "OK", rsp)
+
+ remotePD, remoteSrvs, remoteTmpDir := startRemoteSyncTestCluster(t, 1)
+ defer func() {
+ for _, kv := range remoteSrvs {
+ kv.s.Stop()
+ }
+ if remotePD != nil {
+ remotePD.Stop()
+ }
+ if strings.Contains(remoteTmpDir, "rocksdb-test") {
+ t.Logf("removing: %v", remoteTmpDir)
+ os.RemoveAll(remoteTmpDir)
+ }
+ }()
+ pduri = "http://127.0.0.1:" + pdRemoteHttpPort
+ ensureDataNodesReady(t, pduri, len(remoteSrvs))
+ enableAutoBalance(t, pduri, true)
+ ensureNamespace(t, pduri, ns, partNum, 1)
+ defer ensureDeleteNamespace(t, pduri, ns)
+
+ learnerPD, learnerSrvs, tmpDir := startTestClusterForLearner(t, 1)
+ defer func() {
+ for _, kv := range learnerSrvs {
+ kv.s.Stop()
+ }
+ if learnerPD != nil {
+ learnerPD.Stop()
+ }
+ if strings.Contains(tmpDir, "learner-test") {
+ t.Logf("removing: %v", tmpDir)
+ os.RemoveAll(tmpDir)
+ }
+ }()
+
+ leaderNode.Node.OptimizeDB("")
+
+ time.Sleep(time.Second * 3)
+
+ waitRemoteClusterSync(t, ns, leaderNode, learnerSrvs, remoteSrvs)
+
+ addr := fmt.Sprintf("http://127.0.0.1:%v", remoteSrvs[0].httpPort)
+ enableStaleRead(t, addr, true)
+ defer enableStaleRead(t, addr, false)
+ remoteC := getTestRedisConn(t, remoteSrvs[0].redisPort)
+ defer remoteC.Close()
+ rsp, err = goredis.String(remoteC.Do("get", key))
+ assert.Nil(t, err)
+ assert.Equal(t, key, rsp)
+ rsp, err = goredis.String(remoteC.Do("get", key2))
+ assert.Nil(t, err)
+ assert.Equal(t, key2, rsp)
+
+ err = leaderNode.Node.DeleteRange(node.DeleteTableRange{
+ Table: "test_remote_syncer",
+ DeleteAll: true,
+ NoReplayToRemoteCluster: ignoreDelRange,
+ })
+ assert.Nil(t, err)
+ time.Sleep(time.Second)
+ waitRemoteClusterSync(t, ns, leaderNode, learnerSrvs, remoteSrvs)
+
+ rsp, err = goredis.String(c.Do("get", key))
+ assert.Equal(t, goredis.ErrNil, err)
+ assert.Equal(t, "", rsp)
+ rsp, err = goredis.String(c.Do("get", key2))
+ assert.Equal(t, goredis.ErrNil, err)
+ assert.Equal(t, "", rsp)
+
+ rsp, err = goredis.String(remoteC.Do("get", key))
+ if ignoreDelRange {
+ assert.Nil(t, err)
+ assert.Equal(t, key, rsp)
+ } else {
+ assert.Equal(t, goredis.ErrNil, err)
+ assert.Equal(t, "", rsp)
+ }
+ rsp, err = goredis.String(remoteC.Do("get", key2))
+ if ignoreDelRange {
+ assert.Nil(t, err)
+ assert.Equal(t, key2, rsp)
+ } else {
+ assert.Equal(t, goredis.ErrNil, err)
+ assert.Equal(t, "", rsp)
+ }
+}
+
+func TestClusterBalanceAcrossMultiDC(t *testing.T) {
+ // TODO:
+}
+
+func TestClusterRemoveNodeNotLast(t *testing.T) {
+ testClusterRemoveNode(t, 4, "test_cluster_remove_node_by_api")
+}
+
+func testClusterRemoveNode(t *testing.T, leftNodeN int, ns string) {
+ // remove a node from api and wait all data balanced to others
+ ensureClusterReady(t, leftNodeN)
+
+ time.Sleep(time.Second)
+ partNum := 4
+
+ pduri := "http://127.0.0.1:" + pdHttpPort
+
+ ensureDataNodesReady(t, pduri, len(gkvList))
+ enableAutoBalance(t, pduri, true)
+
+ newDataNodes, dataDir := addMoreTestDataNodeToCluster(t, 1)
+ defer cleanDataNodes(newDataNodes, dataDir)
+ time.Sleep(time.Second)
+
+ ensureNamespace(t, pduri, ns, partNum, 3)
+ defer ensureDeleteNamespace(t, pduri, ns)
+ dnw, nsNode := waitForLeader(t, ns, 0)
+ leader := dnw.s
+ assert.NotNil(t, leader)
+ // call this to propose some request to write raft logs
+ for i := 0; i < 10; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ oldNsList := make([]cluster.PartitionMetaInfo, 0)
+ for i := 0; i < partNum; i++ {
+ oldNs := getNsInfo(t, ns, i)
+ t.Logf("part %v isr is %v", i, oldNs.GetISR())
+ oldNsList = append(oldNsList, oldNs)
+ waitBalancedLeader(t, ns, i)
+ }
+
+ nsNum := 0
+ for i := 0; i < partNum; i++ {
+ nsNode := newDataNodes[0].s.GetNamespaceFromFullName(ns + "-" + strconv.Itoa(i))
+ if nsNode != nil {
+ nsNum++
+ }
+ }
+ assert.True(t, nsNum > 0)
+ // remove node from api
+ removedNodeID := newDataNodes[0].s.GetCoord().GetMyID()
+ gpdServer.pdCoord.MarkNodeAsRemoving(removedNodeID)
+ // wait balance
+ start := time.Now()
+ for i := 0; i < partNum; i++ {
+ for {
+ if time.Since(start) > time.Minute*time.Duration(partNum) {
+ t.Errorf("timeout wait removing partition %v on removed node", i)
+ break
+ }
+ time.Sleep(time.Second * 5)
+ nsInfo := getNsInfo(t, ns, i)
+ if len(nsInfo.Removings) > 0 {
+ continue
+ }
+ if len(nsInfo.GetISR()) != 3 {
+ continue
+ }
+ waitRemove := false
+ for _, nid := range nsInfo.GetISR() {
+ if nid == removedNodeID {
+ waitRemove = true
+ t.Logf("still waiting remove node: %v, %v", nsInfo.GetDesp(), nsInfo.GetISR())
+ break
+ }
+ }
+ if waitRemove {
+ continue
+ }
+ break
+ }
+ waitBalancedLeader(t, ns, i)
+ }
+
+ time.Sleep(time.Second * 5)
+ for i := 0; i < partNum; i++ {
+ for {
+ time.Sleep(time.Second)
+ waitRemoveFromRemoving(t, ns, i)
+ waitEnoughReplica(t, ns, i)
+ waitForAllFullReady(t, ns, i)
+ waitBalancedLeader(t, ns, i)
+ newNs := getNsInfo(t, ns, i)
+ newISR := newNs.GetISR()
+ if len(newISR) != 3 || len(newNs.Removings) > 0 {
+ // wait remove unneed replica
+ continue
+ }
+ break
+ }
+ nsInfo := getNsInfo(t, ns, i)
+ for _, nid := range nsInfo.GetISR() {
+ assert.NotEqual(t, nid, removedNodeID)
+ }
+ }
+ for i := 0; i < partNum; i++ {
+ nsNode := newDataNodes[0].s.GetNamespaceFromFullName(ns + "-" + strconv.Itoa(i))
+ assert.Nil(t, nsNode)
+ }
+}
+
+func TestClusterRemoveNodeForLast(t *testing.T) {
+ testClusterRemoveNode(t, 3, "test_cluster_remove_lastnode_by_api")
+}
+
+func TestClusterNodeFailedTooLongBalance(t *testing.T) {
+ // one failed node and trigger rebalance on left nodes
+ ensureClusterReady(t, 3)
+
+ time.Sleep(time.Second)
+ ns := "test_cluster_failed_node_balance"
+ partNum := 8
+ replicator := 3
+ pduri := "http://127.0.0.1:" + pdHttpPort
+
+ ensureDataNodesReady(t, pduri, len(gkvList))
+ enableAutoBalance(t, pduri, true)
+
+ newDataNodes, dataDir := addMoreTestDataNodeToCluster(t, 1)
+ defer cleanDataNodes(newDataNodes, dataDir)
+ time.Sleep(time.Second)
+
+ ensureNamespace(t, pduri, ns, partNum, replicator)
+ defer ensureDeleteNamespace(t, pduri, ns)
+ dnw, nsNode := waitForLeader(t, ns, 0)
+ leader := dnw.s
+ assert.NotNil(t, leader)
+ // call this to propose some request to write raft logs
+ for i := 0; i < 10; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ oldNsList := make([]cluster.PartitionMetaInfo, 0)
+ for i := 0; i < partNum; i++ {
+ oldNs := getNsInfo(t, ns, i)
+ t.Logf("part %v isr is %v", i, oldNs.GetISR())
+ oldNsList = append(oldNsList, oldNs)
+ waitBalancedLeader(t, ns, i)
+ }
+
+ nsNum := 0
+ for i := 0; i < partNum; i++ {
+ nsNode := newDataNodes[0].s.GetNamespaceFromFullName(ns + "-" + strconv.Itoa(i))
+ if nsNode != nil {
+ nsNum++
+ }
+ }
+ assert.True(t, nsNum > 0)
+ // stop node to trigger balance
+ removedNodeID := newDataNodes[0].s.GetCoord().GetMyID()
+ newDataNodes[0].s.Stop()
+ time.Sleep(time.Second * 30)
+ // wait balance
+ start := time.Now()
+ for i := 0; i < partNum; i++ {
+ for {
+ if time.Since(start) > time.Minute*time.Duration(partNum/2) {
+ t.Errorf("timeout wait removing partition %v on removed node", i)
+ break
+ }
+ time.Sleep(time.Second * 5)
+ nsInfo := getNsInfo(t, ns, i)
+ if len(nsInfo.Removings) > 0 {
+ continue
+ }
+ if len(nsInfo.GetISR()) != replicator {
+ continue
+ }
+ waitRemove := false
+ for _, nid := range nsInfo.GetISR() {
+ if nid == removedNodeID {
+ waitRemove = true
+ t.Logf("still waiting remove node: %v, %v", nsInfo.GetDesp(), nsInfo.GetISR())
+ break
+ }
+ }
+ if waitRemove {
+ continue
+ }
+ break
+ }
+ waitBalancedLeader(t, ns, i)
+ }
+
+ time.Sleep(time.Second * 5)
+ start = time.Now()
+ for i := 0; i < partNum; i++ {
+ for {
+ if time.Since(start) > time.Minute*time.Duration(partNum/2) {
+ t.Errorf("timeout wait balance partition %v ", i)
+ break
+ }
+ time.Sleep(time.Second)
+ waitRemoveFromRemoving(t, ns, i)
+ waitEnoughReplica(t, ns, i)
+ waitForAllFullReady(t, ns, i)
+ waitBalancedLeader(t, ns, i)
+ newNs := getNsInfo(t, ns, i)
+ newISR := newNs.GetISR()
+ if len(newISR) != replicator || len(newNs.Removings) > 0 {
+ // wait remove unneed replica
+ continue
+ }
+ break
+ }
+ nsInfo := getNsInfo(t, ns, i)
+ t.Logf("after stopped, isr: %v", nsInfo.GetISR())
+ for _, nid := range nsInfo.GetISR() {
+ assert.NotEqual(t, nid, removedNodeID)
+ }
+ assert.Equal(t, 3, len(nsInfo.GetISR()))
+ assert.Equal(t, 0, len(nsInfo.Removings))
+ }
+ assert.True(t, checkPartitionNodesBalance(t, "v2", getCurrentPartitionNodes(t, ns)), "should balanced after stopped")
+ // start the failed node to make sure balance again
+ newDataNodes[0].s.Start()
+ time.Sleep(time.Second * 10)
+ start = time.Now()
+ for {
+ found := 0
+ for i := 0; i < partNum; i++ {
+ time.Sleep(time.Second)
+ waitEnoughReplica(t, ns, i)
+ waitForAllFullReady(t, ns, i)
+ waitBalancedLeader(t, ns, i)
+ newNs := getNsInfo(t, ns, i)
+ newISR := newNs.GetISR()
+ if len(newISR) != replicator || len(newNs.Removings) > 0 {
+ // wait remove unneed replica
+ continue
+ }
+ t.Logf("after restart stopped node, isr: %v", newNs.GetISR())
+ if cluster.FindSlice(newISR, removedNodeID) != -1 {
+ found++
+ t.Logf("found restarted node in part: %v", i)
+ }
+ assert.Equal(t, 3, len(newNs.GetISR()))
+ assert.Equal(t, 0, len(newNs.Removings))
+ }
+ t.Logf("found %v part for restarted node", found)
+ if time.Since(start) > time.Minute {
+ t.Errorf("timeout wait balance")
+ break
+ }
+ if found > partNum/2 {
+ localNsList, err := newDataNodes[0].s.GetNsMgr().GetNamespaceNodes(ns, false)
+ assert.Nil(t, err)
+ t.Logf("found %v part for restarted node, and local loaded: %v", found, localNsList)
+ if len(localNsList) < found {
+ continue
+ }
+ if checkPartitionNodesBalance(t, "v2", getCurrentPartitionNodes(t, ns)) {
+ break
+ }
+ }
+ }
+}
+
+func testMigrateNode(t *testing.T, migrateLeader bool) {
+ ensureClusterReady(t, 4)
+ time.Sleep(time.Second)
+ partNum := 1
+
+ pduri := "http://127.0.0.1:" + pdHttpPort
+ ns := "test_migrate_node"
+
+ ensureDataNodesReady(t, pduri, len(gkvList))
+ enableAutoBalance(t, pduri, true)
+
+ ensureNamespace(t, pduri, ns, partNum, 3)
+ defer ensureDeleteNamespace(t, pduri, ns)
+ dnw, nsNode := waitForLeader(t, ns, 0)
+ leader := dnw.s
+ assert.NotNil(t, leader)
+ // call this to propose some request to write raft logs
+ for i := 0; i < 10; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ oldNsList := make([]cluster.PartitionMetaInfo, 0)
+ for i := 0; i < partNum; i++ {
+ oldNs := getNsInfo(t, ns, i)
+ t.Logf("part %v isr is %v", i, oldNs.GetISR())
+ oldNsList = append(oldNsList, oldNs)
+ waitBalancedLeader(t, ns, i)
+ }
+ // remove node from api
+ removedNodeID := leader.GetCoord().GetMyID()
+ migratedNode := leader
+ if !migrateLeader {
+ followerWrap, _ := getFollowerNode(t, ns, 0)
+ removedNodeID = followerWrap.s.GetCoord().GetMyID()
+ migratedNode = followerWrap.s
+ }
+ gpdServer.pdCoord.MarkNodeAsRemoving(removedNodeID)
+ // wait balance
+ start := time.Now()
+ for i := 0; i < partNum; i++ {
+ for {
+ if time.Since(start) > time.Minute*time.Duration(partNum) {
+ t.Errorf("timeout wait removing partition %v on removed node", i)
+ break
+ }
+ time.Sleep(time.Second * 5)
+ nsInfo := getNsInfo(t, ns, i)
+ if len(nsInfo.Removings) > 0 {
+ continue
+ }
+ if len(nsInfo.GetISR()) != 3 {
+ continue
+ }
+ waitRemove := false
+ for _, nid := range nsInfo.GetISR() {
+ if nid == removedNodeID {
+ waitRemove = true
+ t.Logf("still waiting remove node: %v, %v", nsInfo.GetDesp(), nsInfo.GetISR())
+ break
+ }
+ }
+ if waitRemove {
+ continue
+ }
+ break
+ }
+ waitBalancedLeader(t, ns, i)
+ }
+
+ time.Sleep(time.Second * 5)
+ for i := 0; i < partNum; i++ {
+ for {
+ time.Sleep(time.Second)
+ waitRemoveFromRemoving(t, ns, i)
+ waitEnoughReplica(t, ns, i)
+ waitForAllFullReady(t, ns, i)
+ waitBalancedLeader(t, ns, i)
+ newNs := getNsInfo(t, ns, i)
+ newISR := newNs.GetISR()
+ if len(newISR) != 3 || len(newNs.Removings) > 0 {
+ // wait remove unneed replica
+ continue
+ }
+ break
+ }
+ nsInfo := getNsInfo(t, ns, i)
+ for _, nid := range nsInfo.GetISR() {
+ assert.NotEqual(t, nid, removedNodeID)
+ }
+ }
+ for i := 0; i < partNum; i++ {
+ nsNode := migratedNode.GetNamespaceFromFullName(ns + "-" + strconv.Itoa(i))
+ assert.Nil(t, nsNode)
+ }
+}
+
+// It should wait raft synced before we can start to balance
+func TestMigrateLeader(t *testing.T) {
+ // mark leader as removing.
+ // leader should transfer leader first and then propose remove self
+ // remove a node from api and wait all data balanced to others
+ testMigrateNode(t, true)
+}
+
+func TestMigrateFollower(t *testing.T) {
+ // mark follower as removing.
+ // removing node should propose remove self
+ testMigrateNode(t, false)
+}
+
+func TestTransferLeaderWhileReplicaNotReady(t *testing.T) {
+ // TODO: test transfer leader while replica is restarting and not catchup fully.
+ // should only transfer leader when replica has almost the newest raft logs
+}
+
+func TestTransferLeaderWhileReplicaApplyingSnapshot(t *testing.T) {
+ // apply snapshot and transfer leader should fail
+ defer node.EnableSnapBlockingForTest(false)
+
+ ensureClusterReady(t, 3)
+ time.Sleep(time.Second)
+ ns := "test_cluster_transfer_leader_snap_applying"
+ partNum := 1
+
+ pduri := "http://127.0.0.1:" + pdHttpPort
+
+ ensureDataNodesReady(t, pduri, len(gkvList))
+ enableAutoBalance(t, pduri, true)
+ ensureNamespace(t, pduri, ns, partNum, 3)
+ defer ensureDeleteNamespace(t, pduri, ns)
+ dnw, nsNode := waitForLeader(t, ns, 0)
+ leader := dnw.s
+ assert.NotNil(t, leader)
+ // call this to propose some request to write raft logs
+ for i := 0; i < 5; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ oldNs := getNsInfo(t, ns, 0)
+ t.Logf("old isr is: %v", oldNs)
+ assert.Equal(t, 3, len(oldNs.GetISR()))
+
+ foWrap, _ := getFollowerNode(t, ns, 0)
+ foWrap.s.Stop()
+
+ for i := 0; i < 50; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ c := getTestRedisConn(t, dnw.redisPort)
+ defer c.Close()
+ key := fmt.Sprintf("%s:%s", ns, "snap_apply:k1")
+ rsp, err := goredis.String(c.Do("set", key, "1234"))
+ assert.Nil(t, err)
+ assert.Equal(t, "OK", rsp)
+
+ for i := 0; i < 50; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ leaderV, err := goredis.String(c.Do("get", key))
+ assert.True(t, err == nil || err == goredis.ErrNil)
+ assert.Equal(t, "1234", leaderV)
+ time.Sleep(time.Second * 5)
+
+ // make sure the snapshot applying is blocked
+ // and then transfer leader to this follower
+ node.EnableSnapBlockingForTest(true)
+ foWrap.s.Start()
+ time.Sleep(time.Second)
+ node.PutSnapBlockingTime(time.Second * 20)
+ fn := foWrap.s.GetNamespaceFromFullName(ns + "-0")
+ assert.True(t, fn.Node.IsApplyingSnapshot())
+ foRaftID := fn.GetRaftID()
+ err = nsNode.Node.TransferLeadership(foRaftID)
+ assert.NotNil(t, err)
+ nsInfo := getNsInfo(t, ns, 0)
+ transferOK := leader.GetCoord().TransferMyNamespaceLeader(&nsInfo, foWrap.s.GetCoord().GetMyID(), false, true)
+ assert.False(t, transferOK, "should not transfer while snapshot applying")
+ transferOK = leader.GetCoord().TransferMyNamespaceLeader(&nsInfo, foWrap.s.GetCoord().GetMyID(), false, false)
+ assert.False(t, transferOK, "should not transfer while snapshot applying")
+ transferOK = leader.GetCoord().TransferMyNamespaceLeader(&nsInfo, foWrap.s.GetCoord().GetMyID(), true, false)
+ assert.False(t, transferOK, "should not transfer while snapshot applying")
+
+ time.Sleep(time.Second * 20)
+ assert.False(t, fn.Node.IsApplyingSnapshot())
+ transferOK = leader.GetCoord().TransferMyNamespaceLeader(&nsInfo, foWrap.s.GetCoord().GetMyID(), false, true)
+ assert.True(t, transferOK, "should transfer ok")
+ _, newLeaderNode := waitForLeader(t, ns, 0)
+ assert.Equal(t, foRaftID, newLeaderNode.GetRaftID())
+
+ waitForAllFullReady(t, ns, 0)
+ followerConn := getTestRedisConn(t, foWrap.redisPort)
+ defer followerConn.Close()
+
+ getV, err := goredis.String(followerConn.Do("get", key))
+ assert.True(t, err == nil || err == goredis.ErrNil)
+ assert.Equal(t, "1234", getV)
+}
+
+func TestTransferLeaderWhileReplicaLagToomuch(t *testing.T) {
+}
+func TestClusterRestartNodeCatchup(t *testing.T) {
+ // test restarted node catchup while writing
+ ensureClusterReady(t, 3)
+
+ time.Sleep(time.Second)
+ ns := "test_cluster_restart_catchup"
+ partNum := 1
+
+ pduri := "http://127.0.0.1:" + pdHttpPort
+
+ ensureDataNodesReady(t, pduri, len(gkvList))
+ enableAutoBalance(t, pduri, true)
+ ensureNamespace(t, pduri, ns, partNum, 3)
+ defer ensureDeleteNamespace(t, pduri, ns)
+ dnw, nsNode := waitForLeader(t, ns, 0)
+ leader := dnw.s
+ assert.NotNil(t, leader)
+ // call this to propose some request to write raft logs
+ for i := 0; i < 50; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ oldNs := getNsInfo(t, ns, 0)
+ gkvList[0].s.Stop()
+ done := make(chan bool, 0)
+ go func() {
+ for {
+ nsNode.Node.OptimizeDB("")
+ time.Sleep(time.Millisecond)
+ select {
+ case <-done:
+ return
+ default:
+ }
+ }
+ }()
+
+ time.Sleep(time.Second * 3)
+
+ gkvList[0].s.Start()
+
+ waitEnoughReplica(t, ns, 0)
+ waitForAllFullReady(t, ns, 0)
+ close(done)
+ waitBalancedAndExpectedLeader(t, ns, 0, leader.GetCoord().GetMyID())
+
+ c := getTestRedisConn(t, gkvList[0].redisPort)
+ defer c.Close()
+ key := fmt.Sprintf("%s:%s", ns, "restart_catchup:k1")
+ rsp, err := goredis.String(c.Do("set", key, "1234"))
+ assert.Nil(t, err)
+ assert.Equal(t, "OK", rsp)
+ time.Sleep(time.Second)
+
+ for i := 0; i < len(gkvList); i++ {
+ addr := fmt.Sprintf("http://127.0.0.1:%v", gkvList[i].httpPort)
+ enableStaleRead(t, addr, true)
+ followerConn := getTestRedisConn(t, gkvList[i].redisPort)
+ for i := 0; i < 10; i++ {
+ getV, err := goredis.String(followerConn.Do("get", key))
+ assert.True(t, err == nil || err == goredis.ErrNil)
+ t.Logf("read follower : %v", getV)
+ assert.True(t, getV == "1234")
+ }
+ enableStaleRead(t, addr, false)
+ followerConn.Close()
+ }
+
+ newNs := getNsInfo(t, ns, 0)
+ test.Equal(t, oldNs.GetISR(), newNs.GetISR())
+ test.Equal(t, oldNs.GetRealLeader(), newNs.GetRealLeader())
+}
+
+func TestMarkAsRemovingWhileNotEnoughAlives(t *testing.T) {
+ // should not mark as remove while there is not enough for replica (more than half is dead)
+ ensureClusterReady(t, 4)
+ newNodes, dataDir := addMoreTestDataNodeToCluster(t, 1)
+ defer cleanDataNodes(newNodes, dataDir)
+
+ pduri := "http://127.0.0.1:" + pdHttpPort
+ ensureDataNodesReady(t, pduri, len(gkvList)+1)
+
+ time.Sleep(time.Second)
+ ns := "test_mark_removing_no_enough"
+ partNum := 1
+
+ enableAutoBalance(t, pduri, true)
+ ensureNamespace(t, pduri, ns, partNum, 3)
+ defer ensureDeleteNamespace(t, pduri, ns)
+
+ allNodes := []dataNodeWrapper{}
+ allNodes = append(allNodes, gkvList...)
+ allNodes = append(allNodes, newNodes...)
+ nodeWrapper, nsNode := waitForLeaderFromNodes(t, ns, 0, allNodes)
+ followerWrap, _ := getFollowerNode(t, ns, 0)
+ follower := followerWrap.s
+ leader := nodeWrapper.s
+ assert.NotNil(t, leader)
+ dcoord := leader.GetCoord()
+ leaderID := dcoord.GetMyID()
+ assert.NotEqual(t, leaderID, follower.GetCoord().GetMyID())
+ // call this to propose some request to write raft logs
+ for i := 0; i < 5; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ oldNsInfo := getNsInfo(t, ns, 0)
+ time.Sleep(time.Second)
+ // make half replicas down and check if removing will happen
+ t.Logf("stopping follower node: %v", follower.GetCoord().GetMyID())
+ follower.Stop()
+ time.Sleep(time.Second)
+ t.Logf("stopping leader node: %v", leaderID)
+ leader.Stop()
+ gpdServer.pdCoord.SetClusterStableNodeNum(2)
+
+ removed := waitMarkAsRemovingUntilTimeout(t, ns, 0, time.Minute)
+ assert.Equal(t, 0, len(removed))
+ follower.Start()
+ leader.Start()
+
+ waitEnoughReplica(t, ns, 0)
+ waitForAllFullReady(t, ns, 0)
+
+ waitBalancedLeader(t, ns, 0)
+ newNsInfo := getNsInfo(t, ns, 0)
+ oldISR := oldNsInfo.GetISR()
+ sort.Strings(oldISR)
+ newISR := newNsInfo.GetISR()
+ sort.Strings(newISR)
+ assert.Equal(t, oldISR, newISR)
+ nodeWrapper, _ = waitForLeaderFromNodes(t, ns, 0, allNodes)
+ newLeader := nodeWrapper.s
+ assert.NotNil(t, newLeader)
+ if balanceVer == "v2" {
+ // v2 balance may change the leader after node restarted
+ } else {
+ newLeaderID := newLeader.GetCoord().GetMyID()
+ assert.Equal(t, leaderID, newLeaderID)
+ }
+}
+
+func TestMarkAsRemovingWhileOthersNotSynced(t *testing.T) {
+ // should not mark any failed node as removed while the other raft replicas are not synced (or have no leader)
+ ensureClusterReady(t, 4)
+ newNodes, dataDir := addMoreTestDataNodeToCluster(t, 1)
+ defer cleanDataNodes(newNodes, dataDir)
+
+ pduri := "http://127.0.0.1:" + pdHttpPort
+ ensureDataNodesReady(t, pduri, len(gkvList)+1)
+ // stop 2 node in cluster to make sure the replica will be placed on the new added node
+ for i := 0; i < 2; i++ {
+ gkvList[i].s.Stop()
+ }
+
+ time.Sleep(time.Second)
+ ns := "test_mark_removing_not_synced"
+ partNum := 1
+
+ enableAutoBalance(t, pduri, true)
+ ensureNamespace(t, pduri, ns, partNum, 3)
+ defer ensureDeleteNamespace(t, pduri, ns)
+
+ leaderWrapper, leaderNode := waitForLeader(t, ns, 0)
+ followerWrap, _ := getFollowerNode(t, ns, 0)
+ follower := followerWrap.s
+ leader := leaderWrapper.s
+ assert.NotNil(t, leader)
+ dcoord := leader.GetCoord()
+ leaderID := dcoord.GetMyID()
+ assert.NotEqual(t, leaderID, follower.GetCoord().GetMyID())
+ // call this to propose some request to write raft logs
+ for i := 0; i < 5; i++ {
+ leaderNode.Node.OptimizeDB("")
+ }
+ oldNsInfo := getNsInfo(t, ns, 0)
+ origNodes, _ := gpdServer.pdCoord.GetAllDataNodes()
+ time.Sleep(time.Second)
+ newNodeID := newNodes[0].s.GetCoord().GetMyID()
+ // stop the new node and another node
+ stoppedNode := follower
+ if newNodeID != follower.GetCoord().GetMyID() {
+ t.Logf("stopping follower node: %v", follower.GetCoord().GetMyID())
+ follower.Stop()
+ time.Sleep(time.Second)
+ } else if leaderID != newNodeID {
+ t.Logf("stopping leader node: %v", leader.GetCoord().GetMyID())
+ leader.Stop()
+ stoppedNode = leader
+ time.Sleep(time.Second)
+ }
+ allNodes, _ := gpdServer.pdCoord.GetAllDataNodes()
+ assert.Equal(t, len(origNodes)-1, len(allNodes))
+ // here we just stop the raft node and remove local data but keep the server running, this
+ // can make the raft group is not stable
+ t.Logf("stopping raft namespace node: %v", newNodeID)
+ newNodes[0].s.GetNamespaceFromFullName(ns + "-0").Destroy()
+ gpdServer.pdCoord.SetClusterStableNodeNum(2)
+
+ // should not remove any node since not full synced
+ removed := waitMarkAsRemovingUntilTimeout(t, ns, 0, time.Minute)
+ assert.Equal(t, 0, len(removed))
+ allNodes, _ = gpdServer.pdCoord.GetAllDataNodes()
+ assert.Equal(t, len(origNodes)-1, len(allNodes))
+
+ newNsInfo := getNsInfo(t, ns, 0)
+ assert.Equal(t, oldNsInfo.GetISR(), newNsInfo.GetISR())
+
+ stoppedNode.Start()
+ waitEnoughReplica(t, ns, 0)
+ allNodes, _ = gpdServer.pdCoord.GetAllDataNodes()
+ assert.Equal(t, len(origNodes), len(allNodes))
+
+ newNsInfo = getNsInfo(t, ns, 0)
+ assert.Equal(t, oldNsInfo.GetISR(), newNsInfo.GetISR())
+ nodeWrapper, _ := waitForLeader(t, ns, 0)
+ newLeader := nodeWrapper.s
+ assert.NotNil(t, newLeader)
+
+ newNodes[0].s.Stop()
+ for i := 0; i < 2; i++ {
+ gkvList[i].s.Start()
+ }
+
+ waitMarkAsRemoving(t, ns, 0, newNodeID)
+ waitRemoveFromRemoving(t, ns, 0)
+
+ waitForAllFullReady(t, ns, 0)
+ waitBalancedLeader(t, ns, 0)
+ newNsInfo = getNsInfo(t, ns, 0)
+ nodeWrapper, _ = waitForLeader(t, ns, 0)
+ newLeader = nodeWrapper.s
+ assert.NotNil(t, newLeader)
+ assert.Equal(t, newNsInfo.GetISR()[0], newLeader.GetCoord().GetMyID())
+ assert.NotEqual(t, oldNsInfo.GetISR(), newNsInfo.GetISR())
+ for _, nid := range newNsInfo.GetISR() {
+ assert.NotEqual(t, nid, newNodeID)
+ }
+}
+
+func TestRestartCluster(t *testing.T) {
+ // stop all nodes in cluster and start one by one
+ ensureClusterReady(t, 4)
+
+ time.Sleep(time.Second)
+ ns := "test_cluster_restart_all"
+ partNum := 1
+
+ pduri := "http://127.0.0.1:" + pdHttpPort
+
+ ensureDataNodesReady(t, pduri, len(gkvList))
+ enableAutoBalance(t, pduri, true)
+ ensureNamespace(t, pduri, ns, partNum, 3)
+ defer ensureDeleteNamespace(t, pduri, ns)
+ dnw, nsNode := waitForLeader(t, ns, 0)
+ leader := dnw.s
+ assert.NotNil(t, leader)
+ // call this to propose some request to write raft logs
+ for i := 0; i < 50; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ oldNs := getNsInfo(t, ns, 0)
+ for _, kv := range gkvList {
+ kv.s.Stop()
+ }
+
+ time.Sleep(time.Second * 10)
+
+ for _, kv := range gkvList {
+ kv.s.Start()
+ }
+
+ waitEnoughReplica(t, ns, 0)
+ waitForAllFullReady(t, ns, 0)
+ waitBalancedAndJoined(t, ns, 0, leader.GetCoord().GetMyID())
+
+ newNs := getNsInfo(t, ns, 0)
+ test.Equal(t, oldNs.GetISR(), newNs.GetISR())
+ if balanceVer == "v2" {
+ // it may happend the leader changed after restart all
+ assert.True(t, newNs.GetRealLeader() != "", newNs.GetRealLeader())
+ } else {
+ test.Equal(t, oldNs.GetRealLeader(), newNs.GetRealLeader())
+ }
+}
+
+func getDeletedNs(t *testing.T, scanDir string) map[string]int64 {
+ // scan all local undeleted ns-part dirs and read the magic code
+ dirList, err := filepath.Glob(path.Join(scanDir, "*-*"))
+ assert.Nil(t, err)
+ magicList := make(map[string]int64)
+ for _, dir := range dirList {
+ t.Logf("found local dir in data root: %v", dir)
+ grpName := path.Base(dir)
+ if strings.Contains(grpName, "deleted") {
+ code, _ := node.LoadMagicCode(path.Join(dir, "magic_"+grpName))
+ magicList[grpName] = code
+ }
+ }
+ return magicList
+}
+
+func TestClusterBalanceToNewNodeAndBack(t *testing.T) {
+ // It should wait raft synced before we can start to balance
+ // and new data should be balanced to new node
+ ensureClusterReady(t, 4)
+
+ time.Sleep(time.Second)
+ ns := "test_cluster_balance_add_new_node"
+ partNum := 4
+
+ pduri := "http://127.0.0.1:" + pdHttpPort
+
+ ensureDataNodesReady(t, pduri, len(gkvList))
+ enableAutoBalance(t, pduri, true)
+ ensureNamespace(t, pduri, ns, partNum, 3)
+ defer ensureDeleteNamespace(t, pduri, ns)
+ dnw, nsNode := waitForLeader(t, ns, 0)
+ leader := dnw.s
+ assert.NotNil(t, leader)
+ // call this to propose some request to write raft logs
+ for i := 0; i < 10; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ oldNsList := make([]cluster.PartitionMetaInfo, 0)
+ for i := 0; i < partNum; i++ {
+ oldNs := getNsInfo(t, ns, i)
+ t.Logf("part %v isr is %v", i, oldNs.GetISR())
+ oldNsList = append(oldNsList, oldNs)
+ }
+ t.Logf("cluster data dir: %v", gtmpDir)
+ serverDatas := make(map[string]map[string]int64)
+ for _, srv := range gkvList {
+ serverDatas[srv.dataPath] = srv.s.GetNsMgr().CheckLocalNamespaces()
+ deletedNs := getDeletedNs(t, srv.dataPath)
+ t.Logf("%v deleted ns: %v", srv.s.GetCoord().GetMyID(), deletedNs)
+ assert.Equal(t, 0, len(deletedNs))
+ }
+ t.Logf("server datas: %v", serverDatas)
+
+ newDataNodes, dataDir := addMoreTestDataNodeToCluster(t, 2)
+ defer cleanDataNodes(newDataNodes, dataDir)
+
+ // wait balance
+ time.Sleep(time.Second * 10)
+
+ start := time.Now()
+ for {
+ time.Sleep(time.Second)
+ needWait := false
+ for _, nn := range newDataNodes {
+ hasReplica := false
+ for i := 0; i < partNum; i++ {
+ nsNode := nn.s.GetNamespaceFromFullName(ns + "-" + strconv.Itoa(i))
+ if nsNode != nil {
+ hasReplica = true
+ t.Logf("new node %v has replica for namespace: %v", nn.s.GetCoord().GetMyID(), nsNode.FullName())
+ break
+ }
+ }
+ if !hasReplica {
+ needWait = true
+ t.Logf("node %v has no replica for namespace", nn.s.GetCoord().GetMyID())
+ }
+ }
+ if !needWait {
+ break
+ }
+ if time.Since(start) > time.Minute {
+ t.Errorf("timeout wait cluster balance")
+ break
+ }
+ }
+
+ for i := 0; i < partNum; i++ {
+ waitEnoughReplica(t, ns, i)
+ waitForAllFullReady(t, ns, i)
+ waitBalancedLeader(t, ns, i)
+ }
+ newNsList := make([]cluster.PartitionMetaInfo, 0)
+ notChangedPart := 0
+ for i := 0; i < partNum; i++ {
+ for {
+ time.Sleep(time.Second * 3)
+ newNs := getNsInfo(t, ns, i)
+ t.Logf("part %v new isr is %v", i, newNs.GetISR())
+ newNsList = append(newNsList, newNs)
+ oldISR := oldNsList[i].GetISR()
+ sort.Sort(sort.StringSlice(oldISR))
+ newISR := newNs.GetISR()
+ sort.Sort(sort.StringSlice(newISR))
+ if len(newISR) != 3 || len(newNs.Removings) > 0 {
+ // wait remove unneed replica
+ continue
+ }
+ // maybe some part can be un moved
+ eq := assert.ObjectsAreEqual(oldISR, newISR)
+ if eq {
+ // not moved partition
+ notChangedPart++
+ t.Logf("un moved partition: %v, %v", i, newISR)
+ }
+ break
+ }
+ }
+ assert.True(t, notChangedPart <= partNum/2, "half partitions should be balanced to new")
+ time.Sleep(time.Second * 5)
+ for i := 0; i < partNum; i++ {
+ for {
+ waitRemoveFromRemoving(t, ns, i)
+ waitEnoughReplica(t, ns, i)
+ waitForAllFullReady(t, ns, i)
+ waitBalancedLeader(t, ns, i)
+ newNs := getNsInfo(t, ns, i)
+ newISR := newNs.GetISR()
+ if len(newISR) != 3 || len(newNs.Removings) > 0 {
+ // wait remove unneed replica
+ continue
+ }
+ break
+ }
+ }
+ totalDeleted := 0
+ for _, srv := range gkvList {
+ deletedNs := getDeletedNs(t, srv.dataPath)
+ t.Logf("%v deleted ns: %v", srv.s.GetCoord().GetMyID(), deletedNs)
+ totalDeleted += len(deletedNs)
+ }
+
+ t.Logf("after balanced server datas deleted: %v", totalDeleted)
+ assert.True(t, totalDeleted > 1)
+
+ for _, nn := range newDataNodes {
+ t.Logf("begin stopping new added node: %v", nn.s.GetCoord().GetMyID())
+ nn.s.Stop()
+
+ for i := 0; i < partNum; i++ {
+ start := time.Now()
+ for {
+ if time.Since(start) > time.Minute*2 {
+ t.Errorf("timeout wait cluster balance for stopped node")
+ break
+ }
+ time.Sleep(time.Second * 5)
+ needWait := false
+ nsInfo := getNsInfo(t, ns, i)
+ newISR := nsInfo.GetISR()
+ for _, nid := range nsInfo.GetISR() {
+ if nid == nn.s.GetCoord().GetMyID() {
+ needWait = true
+ t.Logf("stopped new node %v still has replica for namespace: %v, %v", nn.s.GetCoord().GetMyID(), nsInfo.GetISR(), nsInfo.GetDesp())
+ break
+ }
+ }
+ if _, ok := nsInfo.Removings[nn.s.GetCoord().GetMyID()]; ok {
+ needWait = true
+ t.Logf("stopped new node %v still waiting removing for namespace: %v", nn.s.GetCoord().GetMyID(), nsInfo.GetDesp())
+ }
+ if !needWait {
+ if len(newISR) != 3 || len(nsInfo.Removings) > 0 {
+ // wait remove unneed replica
+ continue
+ }
+ t.Logf("%v balanced isr: %v", nsInfo.GetDesp(), newISR)
+ break
+ }
+ }
+ waitBalancedLeader(t, ns, i)
+ }
+
+ time.Sleep(time.Second * 5)
+ for i := 0; i < partNum; i++ {
+ start := time.Now()
+ for {
+ if time.Since(start) > time.Minute*2 {
+ t.Errorf("timeout waiting balance for stopped")
+ break
+ }
+ waitRemoveFromRemoving(t, ns, i)
+ waitEnoughReplica(t, ns, i)
+ waitForAllFullReady(t, ns, i)
+ waitBalancedLeader(t, ns, i)
+ newNs := getNsInfo(t, ns, i)
+ newISR := newNs.GetISR()
+ if len(newISR) != 3 || len(newNs.Removings) > 0 {
+ // wait remove unneed replica
+ continue
+ }
+ t.Logf("%v balanced isr: %v", newNs.GetDesp(), newISR)
+ break
+ }
+ }
+ }
+
+ time.Sleep(time.Second * 5)
+ for i := 0; i < partNum; i++ {
+ waitEnoughReplica(t, ns, i)
+ waitForAllFullReady(t, ns, i)
+ waitBalancedLeader(t, ns, i)
+ }
+ newNsList = make([]cluster.PartitionMetaInfo, 0)
+ for i := 0; i < partNum; i++ {
+ newNs := getNsInfo(t, ns, i)
+ t.Logf("part %v final new isr is %v", i, newNs.GetISR())
+ newNsList = append(newNsList, newNs)
+ oldISR := oldNsList[i].GetISR()
+ sort.Sort(sort.StringSlice(oldISR))
+ newISR := newNs.GetISR()
+ sort.Sort(sort.StringSlice(newISR))
+ if balanceVer == "v2" {
+ // to reduce data migrate in v2, it may happend the old isr is not the same
+ // so we only check if any old not removed
+ for _, nn := range newDataNodes {
+ nid := nn.s.GetCoord().GetMyID()
+ for _, n := range newISR {
+ assert.NotEqual(t, n, nid)
+ }
+ }
+ } else {
+ assert.Equal(t, oldISR, newISR)
+ }
+ }
+
+ for i := 0; i < partNum; i++ {
+ waitBalancedLeader(t, ns, i)
+ newNs := getNsInfo(t, ns, i)
+ t.Logf("new info for part %v: %v, %v", i, newNs.GetRealLeader(), newNs.GetISR())
+ }
+}
+
+func TestClusterIncrReplicaOneByOne(t *testing.T) {
+ // While increase replicas, we need add new replica one by one to avoid 2 failed node in raft.
+ ensureClusterReady(t, 4)
+
+ time.Sleep(time.Second)
+ ns := "test_cluster_increase_replicas"
+ partNum := 1
+
+ pduri := "http://127.0.0.1:" + pdHttpPort
+
+ ensureDataNodesReady(t, pduri, len(gkvList))
+ enableAutoBalance(t, pduri, true)
+ ensureNamespace(t, pduri, ns, partNum, 2)
+ defer ensureDeleteNamespace(t, pduri, ns)
+ dnw, nsNode := waitForLeader(t, ns, 0)
+ leader := dnw.s
+ assert.NotNil(t, leader)
+ // call this to propose some request to write raft logs
+ for i := 0; i < 50; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ oldNs := getNsInfo(t, ns, 0)
+ t.Logf("old isr is: %v", oldNs)
+ assert.Equal(t, 2, len(oldNs.GetISR()))
+
+ err := gpdServer.pdCoord.ChangeNamespaceMetaParam(ns, 4, "", 0)
+ assert.Nil(t, err)
+
+ lastNs := oldNs
+ for {
+ time.Sleep(time.Second)
+ newNs := getNsInfo(t, ns, 0)
+ t.Logf("new isr is: %v", newNs)
+ assert.True(t, len(newNs.GetISR()) <= len(lastNs.GetISR())+1)
+ lastNs = newNs
+ waitForAllFullReady(t, ns, 0)
+ if len(newNs.GetISR()) == 4 {
+ break
+ }
+ }
+ waitEnoughReplica(t, ns, 0)
+ waitForAllFullReady(t, ns, 0)
+ waitBalancedLeader(t, ns, 0)
+
+ newNs := getNsInfo(t, ns, 0)
+ t.Logf("new isr is: %v", newNs)
+ assert.Equal(t, 4, len(newNs.GetISR()))
+ for _, old := range oldNs.GetISR() {
+ found := false
+ for _, nid := range newNs.GetISR() {
+ if old == nid {
+ found = true
+ break
+ }
+ }
+ assert.True(t, found)
+ }
+}
+
+func TestClusterDecrReplicaOneByOne(t *testing.T) {
+ // While decrease replicas, we need remove replica one by one to avoid 2 failed node in raft.
+ ensureClusterReady(t, 4)
+
+ time.Sleep(time.Second)
+ ns := "test_cluster_decrease_replicas"
+ partNum := 1
+
+ pduri := "http://127.0.0.1:" + pdHttpPort
+
+ ensureDataNodesReady(t, pduri, len(gkvList))
+ enableAutoBalance(t, pduri, true)
+ ensureNamespace(t, pduri, ns, partNum, 4)
+ defer ensureDeleteNamespace(t, pduri, ns)
+ dnw, nsNode := waitForLeader(t, ns, 0)
+ leader := dnw.s
+ assert.NotNil(t, leader)
+ // call this to propose some request to write raft logs
+ for i := 0; i < 50; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ oldNs := getNsInfo(t, ns, 0)
+ t.Logf("old isr is: %v", oldNs)
+ assert.Equal(t, 4, len(oldNs.GetISR()))
+
+ err := gpdServer.pdCoord.ChangeNamespaceMetaParam(ns, 2, "", 0)
+ assert.Nil(t, err)
+
+ lastNs := oldNs
+ for {
+ time.Sleep(time.Second)
+ newNs := getNsInfo(t, ns, 0)
+ t.Logf("new isr is: %v", newNs)
+ assert.True(t, len(newNs.GetISR()) >= len(lastNs.GetISR())-1)
+ lastNs = newNs
+ if len(newNs.Removings) > 0 {
+ continue
+ }
+ waitForAllFullReady(t, ns, 0)
+ if len(newNs.GetISR()) == 2 {
+ break
+ }
+ }
+ waitEnoughReplica(t, ns, 0)
+ waitForAllFullReady(t, ns, 0)
+ waitBalancedLeader(t, ns, 0)
+
+ newNs := getNsInfo(t, ns, 0)
+ t.Logf("new isr is: %v", newNs)
+ assert.Equal(t, 2, len(newNs.GetISR()))
+ assert.Equal(t, 0, len(newNs.Removings))
+ for _, nid := range newNs.GetISR() {
+ found := false
+ for _, old := range oldNs.GetISR() {
+ if old == nid {
+ found = true
+ break
+ }
+ }
+ assert.True(t, found)
+ }
+}
+
+func TestRestartWithForceAloneWithLearnerAndRemovedNode(t *testing.T) {
+ // test force restart with alone
+ // test force start as alone for normal node and for learner node
+ // and test force restart as alone for have removed node before
+ node.EnableForTest()
+ ensureClusterReady(t, 3)
+
+ time.Sleep(time.Second)
+ ns := "test_force_restart_alone_after_add_learner_remove_node"
+ partNum := 1
+
+ pduri := "http://127.0.0.1:" + pdHttpPort
+ ensureDataNodesReady(t, pduri, len(gkvList))
+ enableAutoBalance(t, pduri, true)
+ ensureNamespace(t, pduri, ns, partNum, 3)
+ defer ensureDeleteNamespace(t, pduri, ns)
+
+ dnw, leaderNode := waitForLeader(t, ns, 0)
+ leader := dnw.s
+ assert.NotNil(t, leader)
+ leaderNode.Node.OptimizeDB("")
+
+ node.SetSyncerOnly(true)
+ defer node.SetSyncerOnly(false)
+ remotePD, remoteSrvs, remoteTmpDir := startRemoteSyncTestCluster(t, 1)
+ defer func() {
+ for _, kv := range remoteSrvs {
+ kv.s.Stop()
+ }
+ if remotePD != nil {
+ remotePD.Stop()
+ }
+ if strings.Contains(remoteTmpDir, "rocksdb-test") {
+ t.Logf("removing: %v", remoteTmpDir)
+ os.RemoveAll(remoteTmpDir)
+ }
+ }()
+ pduri = "http://127.0.0.1:" + pdRemoteHttpPort
+ for _, lrnSrv := range remoteSrvs {
+ lrnSrv.s.GetCoord().UpdateSyncerWriteOnly(true)
+ }
+ ensureDataNodesReady(t, pduri, len(remoteSrvs))
+ enableAutoBalance(t, pduri, true)
+ ensureNamespace(t, pduri, ns, partNum, 1)
+ defer ensureDeleteNamespace(t, pduri, ns)
+
+ learnerPD, learnerSrvs, tmpDir := startTestClusterForLearner(t, 1)
+ defer func() {
+ for _, kv := range learnerSrvs {
+ kv.s.Stop()
+ }
+ if learnerPD != nil {
+ learnerPD.Stop()
+ }
+ if strings.Contains(tmpDir, "learner-test") {
+ t.Logf("removing: %v", tmpDir)
+ os.RemoveAll(tmpDir)
+ }
+ }()
+ time.Sleep(time.Second * 3)
+
+ t.Logf("begin wait first before restart")
+ waitRemoteClusterSync(t, ns, leaderNode, learnerSrvs, remoteSrvs)
+ oldNs := getNsInfo(t, ns, 0)
+ t.Logf("new isr is: %v", oldNs)
+ assert.Equal(t, 1, len(oldNs.LearnerNodes))
+
+ err := gpdServer.pdCoord.ChangeNamespaceMetaParam(ns, 1, "", 0)
+ assert.Nil(t, err)
+
+ time.Sleep(time.Second * 10)
+ for {
+ time.Sleep(time.Second)
+ newNs := getNsInfo(t, ns, 0)
+ t.Logf("new isr is: %v", newNs)
+ waitForAllFullReady(t, ns, 0)
+ if len(newNs.GetISR()) < 2 {
+ break
+ }
+ }
+
+ waitEnoughReplica(t, ns, 0)
+ waitForAllFullReady(t, ns, 0)
+ waitBalancedLeader(t, ns, 0)
+
+ // restart leader as alone
+ leader.RestartAsStandalone(common.GetNsDesp(ns, 0))
+ time.Sleep(time.Second)
+
+ waitEnoughReplica(t, ns, 0)
+ waitForAllFullReady(t, ns, 0)
+
+ dnw, leaderNode = waitForLeader(t, ns, 0)
+ leader = dnw.s
+ assert.NotNil(t, leader)
+ leaderNode.Node.OptimizeDB("")
+ time.Sleep(time.Second * 3)
+
+ err = gpdServer.pdCoord.ChangeNamespaceMetaParam(ns, 2, "", 0)
+ assert.Nil(t, err)
+
+ for {
+ time.Sleep(time.Second)
+ newNs := getNsInfo(t, ns, 0)
+ t.Logf("new isr is: %v", newNs)
+ waitForAllFullReady(t, ns, 0)
+ if len(newNs.GetISR()) == 2 {
+ break
+ }
+ }
+ waitEnoughReplica(t, ns, 0)
+ waitForAllFullReady(t, ns, 0)
+ waitBalancedLeader(t, ns, 0)
+}
+
+func TestInstallSnapshotOnFollower(t *testing.T) {
+ // Test the follower fall behind too much, and the leader send the snapshot to follower,
+ ensureClusterReady(t, 4)
+
+ time.Sleep(time.Second)
+ ns := "test_cluster_snap_install"
+ partNum := 1
+
+ pduri := "http://127.0.0.1:" + pdHttpPort
+
+ ensureDataNodesReady(t, pduri, len(gkvList))
+ enableAutoBalance(t, pduri, true)
+ ensureNamespace(t, pduri, ns, partNum, 3)
+ defer ensureDeleteNamespace(t, pduri, ns)
+ dnw, nsNode := waitForLeader(t, ns, 0)
+ leader := dnw.s
+ assert.NotNil(t, leader)
+ // call this to propose some request to write raft logs
+ for i := 0; i < 5; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ c := getTestRedisConn(t, dnw.redisPort)
+ defer c.Close()
+ for i := 0; i < 100; i++ {
+ key := fmt.Sprintf("%s:%s:%v", ns, "snap_transfer:k1", i)
+ rsp, err := goredis.String(c.Do("set", key, key))
+ assert.Nil(t, err)
+ assert.Equal(t, "OK", rsp)
+ }
+ for i := 0; i < 50; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ oldNs := getNsInfo(t, ns, 0)
+ t.Logf("old isr is: %v", oldNs)
+ assert.Equal(t, 3, len(oldNs.GetISR()))
+ foWrap, _ := getFollowerNode(t, ns, 0)
+ foWrap.s.Stop()
+
+ for i := 0; i < 50; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ for i := 0; i < 50; i++ {
+ key := fmt.Sprintf("%s:%s:%v", ns, "snap_transfer:k1", i)
+ rsp, err := goredis.String(c.Do("set", key, "updated"+key))
+ assert.Nil(t, err)
+ assert.Equal(t, "OK", rsp)
+ }
+ for i := 0; i < 50; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ for i := 0; i < 100; i++ {
+ key := fmt.Sprintf("%s:%s:%v", ns, "snap_transfer:k1", i)
+ leaderV, err := goredis.String(c.Do("get", key))
+ assert.True(t, err == nil || err == goredis.ErrNil)
+ if i < 50 {
+ assert.Equal(t, "updated"+key, leaderV)
+ } else {
+ assert.Equal(t, key, leaderV)
+ }
+ }
+
+ foWrap.s.Start()
+ time.Sleep(time.Second * 10)
+ addr := fmt.Sprintf("http://127.0.0.1:%v", foWrap.httpPort)
+ enableStaleRead(t, addr, true)
+ followerConn := getTestRedisConn(t, foWrap.redisPort)
+ defer followerConn.Close()
+
+ waitForAllFullReady(t, ns, 0)
+ time.Sleep(time.Second * 3)
+
+ for i := 0; i < 100; i++ {
+ key := fmt.Sprintf("%s:%s:%v", ns, "snap_transfer:k1", i)
+ getV, err := goredis.String(followerConn.Do("get", key))
+ assert.True(t, err == nil || err == goredis.ErrNil)
+ if i < 50 {
+ assert.Equal(t, "updated"+key, getV)
+ } else {
+ assert.Equal(t, key, getV)
+ }
+ }
+ enableStaleRead(t, addr, false)
+}
+
+func TestInstallSnapshotTransferFailed(t *testing.T) {
+ // Test the follower fall behind too much, and the leader send the snapshot to follower,
+ // However, the follower failed to pull the snapshot data from leader. So the raft node should stop
+ // and restart pull snapshot data later.
+
+ // test case should make sure the snap will be not persisted to the stable storage since the snapshot data is failed to pull.
+ // check data write after snapshot should not be read until the snapshot fail is recovered
+ node.EnableSnapForTest(true, false, false, false)
+ defer node.EnableSnapForTest(false, false, false, false)
+
+ ensureClusterReady(t, 4)
+
+ time.Sleep(time.Second)
+ ns := "test_cluster_snap_transfer_failed"
+ partNum := 1
+
+ pduri := "http://127.0.0.1:" + pdHttpPort
+
+ ensureDataNodesReady(t, pduri, len(gkvList))
+ enableAutoBalance(t, pduri, true)
+ ensureNamespace(t, pduri, ns, partNum, 3)
+ defer ensureDeleteNamespace(t, pduri, ns)
+ dnw, nsNode := waitForLeader(t, ns, 0)
+ leader := dnw.s
+ assert.NotNil(t, leader)
+ // call this to propose some request to write raft logs
+ for i := 0; i < 5; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ oldNs := getNsInfo(t, ns, 0)
+ t.Logf("old isr is: %v", oldNs)
+ assert.Equal(t, 3, len(oldNs.GetISR()))
+ foWrap, _ := getFollowerNode(t, ns, 0)
+ foWrap.s.Stop()
+
+ for i := 0; i < 50; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ c := getTestRedisConn(t, dnw.redisPort)
+ defer c.Close()
+ key := fmt.Sprintf("%s:%s", ns, "snap_transfer:k1")
+ rsp, err := goredis.String(c.Do("set", key, "1234"))
+ assert.Nil(t, err)
+ assert.Equal(t, "OK", rsp)
+
+ for i := 0; i < 50; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ leaderV, err := goredis.String(c.Do("get", key))
+ assert.True(t, err == nil || err == goredis.ErrNil)
+ assert.Equal(t, "1234", leaderV)
+
+ foWrap.s.Start()
+ time.Sleep(time.Second * 10)
+ addr := fmt.Sprintf("http://127.0.0.1:%v", foWrap.httpPort)
+ enableStaleRead(t, addr, true)
+ // snapshort should failed
+ followerConn := getTestRedisConn(t, foWrap.redisPort)
+ defer followerConn.Close()
+ for i := 0; i < 10; i++ {
+ getV, err := goredis.String(followerConn.Do("get", key))
+ assert.NotNil(t, err)
+ t.Logf("read follower should failed: %v", err.Error())
+ assert.True(t, getV == "")
+ time.Sleep(time.Second)
+ }
+
+ node.EnableSnapForTest(false, false, false, false)
+ waitForAllFullReady(t, ns, 0)
+ time.Sleep(time.Second * 3)
+
+ getV, err := goredis.String(followerConn.Do("get", key))
+ assert.True(t, err == nil || err == goredis.ErrNil)
+ assert.Equal(t, "1234", getV)
+ enableStaleRead(t, addr, false)
+}
+
+func TestInstallSnapshotSaveRaftFailed(t *testing.T) {
+ // test the snapshot transfer to follower success, but the follower save snapshot meta to raft storage failed
+ // should restart to re-apply
+ // the hardstate and snapshot saving is not atomic, enable this test if we can make that.
+ defer node.EnableSnapForTest(false, false, false, false)
+
+ ensureClusterReady(t, 4)
+
+ time.Sleep(time.Second)
+ ns := "test_cluster_snap_save_failed"
+ partNum := 1
+
+ pduri := "http://127.0.0.1:" + pdHttpPort
+
+ ensureDataNodesReady(t, pduri, len(gkvList))
+ enableAutoBalance(t, pduri, true)
+ ensureNamespace(t, pduri, ns, partNum, 3)
+ defer ensureDeleteNamespace(t, pduri, ns)
+ dnw, nsNode := waitForLeader(t, ns, 0)
+ leader := dnw.s
+ assert.NotNil(t, leader)
+ // call this to propose some request to write raft logs
+ for i := 0; i < 5; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ oldNs := getNsInfo(t, ns, 0)
+ t.Logf("old isr is: %v", oldNs)
+ assert.Equal(t, 3, len(oldNs.GetISR()))
+
+ foWrap, _ := getFollowerNode(t, ns, 0)
+ foWrap.s.Stop()
+
+ for i := 0; i < 50; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ c := getTestRedisConn(t, dnw.redisPort)
+ defer c.Close()
+ key := fmt.Sprintf("%s:%s", ns, "snap_save:k1")
+ rsp, err := goredis.String(c.Do("set", key, "1234"))
+ assert.Nil(t, err)
+ assert.Equal(t, "OK", rsp)
+
+ for i := 0; i < 50; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ leaderV, err := goredis.String(c.Do("get", key))
+ assert.True(t, err == nil || err == goredis.ErrNil)
+ assert.Equal(t, "1234", leaderV)
+ time.Sleep(time.Second * 5)
+
+ node.EnableSnapForTest(false, true, false, false)
+ foWrap.s.Start()
+ time.Sleep(time.Second * 10)
+ addr := fmt.Sprintf("http://127.0.0.1:%v", foWrap.httpPort)
+ enableStaleRead(t, addr, true)
+ // snapshort should failed
+ followerConn := getTestRedisConn(t, foWrap.redisPort)
+ defer followerConn.Close()
+ for i := 0; i < 10; i++ {
+ getV, err := goredis.String(followerConn.Do("get", key))
+ assert.NotNil(t, err)
+ t.Logf("read follower should failed: %v", err.Error())
+ assert.True(t, getV == "")
+ time.Sleep(time.Second)
+ }
+
+ node.EnableSnapForTest(false, false, false, false)
+ waitForAllFullReady(t, ns, 0)
+ time.Sleep(time.Second * 3)
+
+ getV, err := goredis.String(followerConn.Do("get", key))
+ assert.True(t, err == nil || err == goredis.ErrNil)
+ assert.Equal(t, "1234", getV)
+ enableStaleRead(t, addr, false)
+}
+
+func TestInstallSnapshotApplyFailed(t *testing.T) {
+ // test the snapshot transfer to follower success, but the follower apply failed
+ // should restart to re-apply, while restart the snapshot will be restored success and no need apply in raft loop
+ defer node.EnableSnapForTest(false, false, false, false)
+
+ ensureClusterReady(t, 4)
+
+ time.Sleep(time.Second)
+ ns := "test_cluster_snap_apply_failed"
+ partNum := 1
+
+ pduri := "http://127.0.0.1:" + pdHttpPort
+
+ ensureDataNodesReady(t, pduri, len(gkvList))
+ enableAutoBalance(t, pduri, true)
+ ensureNamespace(t, pduri, ns, partNum, 3)
+ defer ensureDeleteNamespace(t, pduri, ns)
+ dnw, nsNode := waitForLeader(t, ns, 0)
+ leader := dnw.s
+ assert.NotNil(t, leader)
+ // call this to propose some request to write raft logs
+ for i := 0; i < 5; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ oldNs := getNsInfo(t, ns, 0)
+ t.Logf("old isr is: %v", oldNs)
+ assert.Equal(t, 3, len(oldNs.GetISR()))
+
+ foWrap, _ := getFollowerNode(t, ns, 0)
+ foWrap.s.Stop()
+
+ for i := 0; i < 50; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ c := getTestRedisConn(t, dnw.redisPort)
+ defer c.Close()
+ key := fmt.Sprintf("%s:%s", ns, "snap_apply:k1")
+ rsp, err := goredis.String(c.Do("set", key, "1234"))
+ assert.Nil(t, err)
+ assert.Equal(t, "OK", rsp)
+
+ for i := 0; i < 50; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ leaderV, err := goredis.String(c.Do("get", key))
+ assert.True(t, err == nil || err == goredis.ErrNil)
+ assert.Equal(t, "1234", leaderV)
+ time.Sleep(time.Second * 5)
+
+ node.EnableSnapForTest(false, false, true, false)
+ foWrap.s.Start()
+ // apply failed and restart success, so we can be ready after restart
+ waitForAllFullReady(t, ns, 0)
+ time.Sleep(time.Second * 3)
+ addr := fmt.Sprintf("http://127.0.0.1:%v", foWrap.httpPort)
+ enableStaleRead(t, addr, true)
+ followerConn := getTestRedisConn(t, foWrap.redisPort)
+ defer followerConn.Close()
+
+ getV, err := goredis.String(followerConn.Do("get", key))
+ assert.True(t, err == nil || err == goredis.ErrNil)
+ assert.Equal(t, "1234", getV)
+ enableStaleRead(t, addr, false)
+}
+
+func TestInstallSnapshotApplyRestoreFailed(t *testing.T) {
+ // restore failed will make sure apply snapshot and restart both failed to restore
+ defer node.EnableSnapForTest(false, false, false, false)
+
+ ensureClusterReady(t, 4)
+
+ time.Sleep(time.Second)
+ ns := "test_cluster_snap_apply_restore_failed"
+ partNum := 1
+
+ pduri := "http://127.0.0.1:" + pdHttpPort
+
+ ensureDataNodesReady(t, pduri, len(gkvList))
+ enableAutoBalance(t, pduri, true)
+ ensureNamespace(t, pduri, ns, partNum, 3)
+ defer ensureDeleteNamespace(t, pduri, ns)
+ dnw, nsNode := waitForLeader(t, ns, 0)
+ leader := dnw.s
+ assert.NotNil(t, leader)
+ // call this to propose some request to write raft logs
+ for i := 0; i < 5; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ oldNs := getNsInfo(t, ns, 0)
+ t.Logf("old isr is: %v", oldNs)
+ assert.Equal(t, 3, len(oldNs.GetISR()))
+
+ foWrap, _ := getFollowerNode(t, ns, 0)
+ foWrap.s.Stop()
+
+ for i := 0; i < 50; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ c := getTestRedisConn(t, dnw.redisPort)
+ defer c.Close()
+ key := fmt.Sprintf("%s:%s", ns, "snap_apply:k1")
+ rsp, err := goredis.String(c.Do("set", key, "1234"))
+ assert.Nil(t, err)
+ assert.Equal(t, "OK", rsp)
+
+ for i := 0; i < 50; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ leaderV, err := goredis.String(c.Do("get", key))
+ assert.True(t, err == nil || err == goredis.ErrNil)
+ assert.Equal(t, "1234", leaderV)
+ time.Sleep(time.Second * 5)
+
+ node.EnableSnapForTest(false, false, false, true)
+ foWrap.s.Start()
+ time.Sleep(time.Second * 10)
+
+ addr := fmt.Sprintf("http://127.0.0.1:%v", foWrap.httpPort)
+ enableStaleRead(t, addr, true)
+ followerConn := getTestRedisConn(t, foWrap.redisPort)
+ defer followerConn.Close()
+
+ for i := 0; i < 10; i++ {
+ getV, err := goredis.String(followerConn.Do("get", key))
+ assert.NotNil(t, err)
+ t.Logf("read follower should failed: %v", err.Error())
+ assert.True(t, getV == "")
+ time.Sleep(time.Second)
+ }
+
+ node.EnableSnapForTest(false, false, false, false)
+ // apply failed and restart success, so we can be ready after restart
+ waitForAllFullReady(t, ns, 0)
+ time.Sleep(time.Second * 5)
+
+ getV, err := goredis.String(followerConn.Do("get", key))
+ assert.True(t, err == nil || err == goredis.ErrNil)
+ assert.Equal(t, "1234", getV)
+ enableStaleRead(t, addr, false)
+}
+
+func TestSyncerWriteOnlyInitTrueLoadFromRegister(t *testing.T) {
+ testSyncerWriteOnlyLoadFromRegister(t, true)
+}
+
+func TestSyncerWriteOnlyInitFalseLoadFromRegister(t *testing.T) {
+ testSyncerWriteOnlyLoadFromRegister(t, false)
+}
+
+func testSyncerWriteOnlyLoadFromRegister(t *testing.T, syncerOnly bool) {
+ clusterName := "unit-test_syncer_write_only"
+ pd, dataNodes, dataDir := startTestCluster(t, syncerOnly, clusterName, pdHttpPort, 1, baseRedisPort+2000)
+ defer os.RemoveAll(dataDir)
+ defer pd.Stop()
+ defer dataNodes[0].s.Stop()
+
+ time.Sleep(time.Second)
+ ns := "test_cluster_start_init_syncer_write_only"
+ partNum := 1
+ pduri := "http://127.0.0.1:" + pdHttpPort
+
+ ensureDataNodesReady(t, pduri, len(dataNodes))
+ enableAutoBalance(t, pduri, true)
+ ensureNamespace(t, pduri, ns, partNum, 1)
+ defer ensureDeleteNamespace(t, pduri, ns)
+ dnw, _ := waitForLeaderFromNodes(t, ns, 0, dataNodes)
+ leader := dnw.s
+ assert.NotNil(t, leader)
+ test.Equal(t, syncerOnly, node.IsSyncerOnly())
+ // test restart (should ignore local config)
+ dataNodes[0].s.Stop()
+ dataNodes[0].s.Start()
+ dnw, _ = waitForLeaderFromNodes(t, ns, 0, dataNodes)
+ leader = dnw.s
+ assert.NotNil(t, leader)
+ test.Equal(t, syncerOnly, node.IsSyncerOnly())
+
+ dataNodes[0].s.Stop()
+ dataNodes[0].s.GetCoord().UpdateSyncerWriteOnly(!syncerOnly)
+ dataNodes[0].s.Start()
+ dnw, _ = waitForLeaderFromNodes(t, ns, 0, dataNodes)
+ leader = dnw.s
+ assert.NotNil(t, leader)
+ test.Equal(t, !syncerOnly, node.IsSyncerOnly())
+ dataNodes[0].s.Stop()
+
+ dataNodes[0].s.Start()
+ dnw, _ = waitForLeaderFromNodes(t, ns, 0, dataNodes)
+ leader = dnw.s
+ assert.NotNil(t, leader)
+ test.Equal(t, !syncerOnly, node.IsSyncerOnly())
+}
+
+func TestNamespaceMagicCodeChangedAfterRecreate(t *testing.T) {
+ ensureClusterReady(t, 3)
+ time.Sleep(time.Second)
+ ns := "test_cluster_ns_recreate"
+ partNum := 1
+
+ pduri := "http://127.0.0.1:" + pdHttpPort
+ ensureDataNodesReady(t, pduri, len(gkvList))
+ enableAutoBalance(t, pduri, true)
+ ensureNamespace(t, pduri, ns, partNum, 3)
+ defer ensureDeleteNamespace(t, pduri, ns)
+ dnw, nsNode := waitForLeader(t, ns, 0)
+ leader := dnw.s
+ assert.NotNil(t, leader)
+ // call this to propose some request to write raft logs
+ for i := 0; i < 10; i++ {
+ nsNode.Node.OptimizeDB("")
+ }
+ serverDatas := make(map[string]map[string]int64)
+ for _, srv := range gkvList {
+ serverDatas[srv.dataPath] = srv.s.GetNsMgr().CheckLocalNamespaces()
+ }
+ t.Logf("server datas: %v", serverDatas)
+
+ ensureDeleteNamespace(t, pduri, ns)
+ time.Sleep(time.Second * 5)
+
+ serverDatas2 := make(map[string]map[string]int64)
+ for _, srv := range gkvList {
+ serverDatas2[srv.dataPath] = srv.s.GetNsMgr().CheckLocalNamespaces()
+ assert.Equal(t, 0, len(serverDatas2[srv.dataPath]))
+ deletedNs := getDeletedNs(t, srv.dataPath)
+ t.Logf("%v server ns deleted: %v", srv.dataPath, deletedNs)
+ assert.Equal(t, len(serverDatas[srv.dataPath]), len(deletedNs))
+ }
+ t.Logf("server datas after ns deleted: %v", serverDatas2)
+
+ ensureNamespace(t, pduri, ns, partNum, 3)
+ waitForLeader(t, ns, 0)
+
+ for _, srv := range gkvList {
+ recreatedNsList := srv.s.GetNsMgr().CheckLocalNamespaces()
+ oldNs := serverDatas[srv.dataPath]
+ for name, nsMagic := range recreatedNsList {
+ oldMagic := oldNs[name]
+ t.Logf("ns %v magic: %v, %v", name, oldMagic, nsMagic)
+ assert.NotEqual(t, int64(0), oldMagic)
+ assert.NotEqual(t, oldMagic, nsMagic)
+ }
+ assert.True(t, len(recreatedNsList) > 0)
+ }
}
diff --git a/pkg/fileutil/fileutil.go b/pkg/fileutil/fileutil.go
index fce5126c..b6520deb 100644
--- a/pkg/fileutil/fileutil.go
+++ b/pkg/fileutil/fileutil.go
@@ -37,6 +37,10 @@ var (
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/fileutil")
)
+func init() {
+ capnslog.SetFormatter(capnslog.NewDefaultFormatter(os.Stdout))
+}
+
// IsDirWriteable checks if dir is writable by writing and removing a file
// to dir. It returns nil if dir is writable.
func IsDirWriteable(dir string) error {
diff --git a/pkg/fileutil/purge.go b/pkg/fileutil/purge.go
index 92fceab0..4579fda9 100644
--- a/pkg/fileutil/purge.go
+++ b/pkg/fileutil/purge.go
@@ -23,13 +23,22 @@ import (
)
func PurgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error {
- return purgeFile(dirname, suffix, max, interval, stop, nil)
+ return purgeFile(dirname, suffix, max, interval, stop, nil, nil)
+}
+
+func PurgeFileWithDoneNotify(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) (<-chan struct{}, <-chan error) {
+ doneC := make(chan struct{})
+ errC := purgeFile(dirname, suffix, max, interval, stop, nil, doneC)
+ return doneC, errC
}
// purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil.
-func purgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string) <-chan error {
+func purgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string, donec chan<- struct{}) <-chan error {
errC := make(chan error, 1)
go func() {
+ if donec != nil {
+ defer close(donec)
+ }
for {
fnames, err := ReadDir(dirname)
if err != nil {
diff --git a/pkg/fileutil/purge_test.go b/pkg/fileutil/purge_test.go
index addd8e82..2f66146a 100644
--- a/pkg/fileutil/purge_test.go
+++ b/pkg/fileutil/purge_test.go
@@ -43,7 +43,7 @@ func TestPurgeFile(t *testing.T) {
stop, purgec := make(chan struct{}), make(chan string, 10)
// keep 3 most recent files
- errch := purgeFile(dir, "test", 3, time.Millisecond, stop, purgec)
+ errch := purgeFile(dir, "test", 3, time.Millisecond, stop, purgec, nil)
select {
case f := <-purgec:
t.Errorf("unexpected purge on %q", f)
@@ -114,7 +114,7 @@ func TestPurgeFileHoldingLockFile(t *testing.T) {
}
stop, purgec := make(chan struct{}), make(chan string, 10)
- errch := purgeFile(dir, "test", 3, time.Millisecond, stop, purgec)
+ errch := purgeFile(dir, "test", 3, time.Millisecond, stop, purgec, nil)
for i := 0; i < 5; i++ {
select {
diff --git a/pkg/ioutil/util.go b/pkg/ioutil/util.go
index af2765f4..51946280 100644
--- a/pkg/ioutil/util.go
+++ b/pkg/ioutil/util.go
@@ -18,7 +18,7 @@ import (
"io"
"os"
- "github.com/absolute8511/ZanRedisDB/pkg/fileutil"
+ "github.com/youzan/ZanRedisDB/pkg/fileutil"
)
// WriteAndSyncFile behaves just like ioutil.WriteFile in the standard library,
diff --git a/pkg/pbutil/pbutil.go b/pkg/pbutil/pbutil.go
index d70f98dd..e4fef2d3 100644
--- a/pkg/pbutil/pbutil.go
+++ b/pkg/pbutil/pbutil.go
@@ -39,15 +39,15 @@ func MustMarshal(m Marshaler) []byte {
func MustUnmarshal(um Unmarshaler, data []byte) {
if err := um.Unmarshal(data); err != nil {
- plog.Panicf("unmarshal should never fail (%v)", err)
+ plog.Panicf("unmarshal should never fail (%v). %v", err, data)
}
}
-func MaybeUnmarshal(um Unmarshaler, data []byte) bool {
+func MaybeUnmarshal(um Unmarshaler, data []byte) error {
if err := um.Unmarshal(data); err != nil {
- return false
+ return err
}
- return true
+ return nil
}
func GetBool(v *bool) (vv bool, set bool) {
diff --git a/pkg/testutil/leak.go b/pkg/testutil/leak.go
index 4740e654..e26658eb 100644
--- a/pkg/testutil/leak.go
+++ b/pkg/testutil/leak.go
@@ -21,7 +21,7 @@ CheckLeakedGoroutine verifies tests do not leave any leaky
goroutines. It returns true when there are goroutines still
running(leaking) after all tests.
- import "github.com/absolute8511/ZanRedisDB/pkg/testutil"
+ import "github.com/youzan/ZanRedisDB/pkg/testutil"
func TestMain(m *testing.M) {
v := m.Run()
@@ -125,8 +125,8 @@ func interestingGoroutines() (gs []string) {
strings.Contains(stack, "created by testing.RunTests") ||
strings.Contains(stack, "testing.Main(") ||
strings.Contains(stack, "runtime.goexit") ||
- strings.Contains(stack, "github.com/absolute8511/ZanRedisDB/pkg/testutil.interestingGoroutines") ||
- strings.Contains(stack, "github.com/absolute8511/ZanRedisDB/pkg/logutil.(*MergeLogger).outputLoop") ||
+ strings.Contains(stack, "github.com/youzan/ZanRedisDB/pkg/testutil.interestingGoroutines") ||
+ strings.Contains(stack, "github.com/youzan/ZanRedisDB/pkg/logutil.(*MergeLogger).outputLoop") ||
strings.Contains(stack, "github.com/golang/glog.(*loggingT).flushDaemon") ||
strings.Contains(stack, "created by runtime.gc") ||
strings.Contains(stack, "runtime.MHeap_Scavenger") {
diff --git a/pkg/transport/listener.go b/pkg/transport/listener.go
index ff814bf3..f1703ddb 100644
--- a/pkg/transport/listener.go
+++ b/pkg/transport/listener.go
@@ -31,7 +31,7 @@ import (
"strings"
"time"
- "github.com/absolute8511/ZanRedisDB/pkg/tlsutil"
+ "github.com/youzan/ZanRedisDB/pkg/tlsutil"
)
func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err error) {
diff --git a/pkg/types/urls_test.go b/pkg/types/urls_test.go
index 93d6d220..06a781c5 100644
--- a/pkg/types/urls_test.go
+++ b/pkg/types/urls_test.go
@@ -18,7 +18,7 @@ import (
"reflect"
"testing"
- "github.com/absolute8511/ZanRedisDB/pkg/testutil"
+ "github.com/youzan/ZanRedisDB/pkg/testutil"
)
func TestNewURLs(t *testing.T) {
diff --git a/pkg/types/urlsmap_test.go b/pkg/types/urlsmap_test.go
index 14e16986..003ebf9e 100644
--- a/pkg/types/urlsmap_test.go
+++ b/pkg/types/urlsmap_test.go
@@ -15,7 +15,7 @@
package types
import (
- "github.com/absolute8511/ZanRedisDB/pkg/testutil"
+ "github.com/youzan/ZanRedisDB/pkg/testutil"
"reflect"
"testing"
)
diff --git a/pkg/wait/wait.go b/pkg/wait/wait.go
index 9b1df419..c7db503b 100644
--- a/pkg/wait/wait.go
+++ b/pkg/wait/wait.go
@@ -21,69 +21,124 @@ import (
"sync"
)
+type WaitResult interface {
+ GetResult() interface{}
+ WaitC() <-chan struct{}
+}
+
// Wait is an interface that provides the ability to wait and trigger events that
// are associated with IDs.
type Wait interface {
// Register waits returns a chan that waits on the given ID.
// The chan will be triggered when Trigger is called with
// the same ID.
- Register(id uint64) <-chan interface{}
+ Register(id uint64) WaitResult
// Trigger triggers the waiting chans with the given ID.
Trigger(id uint64, x interface{})
IsRegistered(id uint64) bool
+ RegisterWithC(id uint64, done chan struct{}) WaitResult
+}
+
+type multList [32]*list
+
+type resultData struct {
+ value interface{}
+ done chan struct{}
+}
+
+func newResultData(done chan struct{}) *resultData {
+ if done == nil {
+ return &resultData{
+ done: make(chan struct{}, 1),
+ }
+ }
+ return &resultData{
+ done: done,
+ }
+}
+
+func (rd *resultData) GetResult() interface{} {
+ return rd.value
+}
+
+func (rd *resultData) WaitC() <-chan struct{} {
+ return rd.done
}
type list struct {
- l sync.RWMutex
- m map[uint64]chan interface{}
+ l sync.Mutex
+ m map[uint64]*resultData
}
// New creates a Wait.
func New() Wait {
- return &list{m: make(map[uint64]chan interface{})}
+ ml := multList{}
+ for i, _ := range ml {
+ ml[i] = &list{
+ m: make(map[uint64]*resultData),
+ }
+ }
+ return ml
}
-func (w *list) Register(id uint64) <-chan interface{} {
+func (mw multList) RegisterWithC(id uint64, done chan struct{}) WaitResult {
+ w := mw[id%uint64(len(mw))]
+ e := newResultData(done)
w.l.Lock()
defer w.l.Unlock()
- ch := w.m[id]
- if ch == nil {
- ch = make(chan interface{}, 1)
- w.m[id] = ch
+ rd := w.m[id]
+ if rd == nil {
+ rd = e
+ w.m[id] = rd
} else {
log.Panicf("dup id %x", id)
}
- return ch
+ return rd
+}
+
+func (mw multList) Register(id uint64) WaitResult {
+ return mw.RegisterWithC(id, nil)
}
-func (w *list) Trigger(id uint64, x interface{}) {
+func (mw multList) Trigger(id uint64, x interface{}) {
+ w := mw[id%uint64(len(mw))]
w.l.Lock()
- ch := w.m[id]
+ rd := w.m[id]
delete(w.m, id)
w.l.Unlock()
- if ch != nil {
- ch <- x
- close(ch)
+ if rd != nil {
+ rd.value = x
+ //close(rd.done)
+ select {
+ case rd.done <- struct{}{}:
+ default:
+ log.Panicf("done chan is full: %v", id)
+ }
}
}
-func (w *list) IsRegistered(id uint64) bool {
- w.l.RLock()
- defer w.l.RUnlock()
+func (mw multList) IsRegistered(id uint64) bool {
+ w := mw[id%uint64(len(mw))]
+ w.l.Lock()
_, ok := w.m[id]
+ w.l.Unlock()
return ok
}
type waitWithResponse struct {
- ch <-chan interface{}
+ wr *resultData
}
func NewWithResponse(ch <-chan interface{}) Wait {
- return &waitWithResponse{ch: ch}
+ return &waitWithResponse{wr: newResultData(nil)}
+}
+
+func (w *waitWithResponse) RegisterWithC(id uint64, done chan struct{}) WaitResult {
+ return w.wr
}
-func (w *waitWithResponse) Register(id uint64) <-chan interface{} {
- return w.ch
+func (w *waitWithResponse) Register(id uint64) WaitResult {
+ return w.wr
}
func (w *waitWithResponse) Trigger(id uint64, x interface{}) {}
func (w *waitWithResponse) IsRegistered(id uint64) bool {
diff --git a/pkg/wait/wait_test.go b/pkg/wait/wait_test.go
index 54395cb3..6a5c85bc 100644
--- a/pkg/wait/wait_test.go
+++ b/pkg/wait/wait_test.go
@@ -16,6 +16,7 @@ package wait
import (
"fmt"
+ "sync/atomic"
"testing"
"time"
)
@@ -25,20 +26,17 @@ func TestWait(t *testing.T) {
wt := New()
ch := wt.Register(eid)
wt.Trigger(eid, "foo")
- v := <-ch
+ <-ch.WaitC()
+ v := ch.GetResult()
if g, w := fmt.Sprintf("%v (%T)", v, v), "foo (string)"; g != w {
t.Errorf("<-ch = %v, want %v", g, w)
}
-
- if g := <-ch; g != nil {
- t.Errorf("unexpected non-nil value: %v (%T)", g, g)
- }
}
func TestRegisterDupPanic(t *testing.T) {
const eid = 1
wt := New()
- ch1 := wt.Register(eid)
+ ch1 := wt.Register(eid).WaitC()
panicC := make(chan struct{}, 1)
@@ -64,18 +62,16 @@ func TestRegisterDupPanic(t *testing.T) {
func TestTriggerDupSuppression(t *testing.T) {
const eid = 1
wt := New()
- ch := wt.Register(eid)
+ wr := wt.Register(eid)
wt.Trigger(eid, "foo")
wt.Trigger(eid, "bar")
- v := <-ch
+ <-wr.WaitC()
+ v := wr.GetResult()
if g, w := fmt.Sprintf("%v (%T)", v, v), "foo (string)"; g != w {
t.Errorf("<-ch = %v, want %v", g, w)
}
- if g := <-ch; g != nil {
- t.Errorf("unexpected non-nil value: %v (%T)", g, g)
- }
}
func TestIsRegistered(t *testing.T) {
@@ -100,3 +96,40 @@ func TestIsRegistered(t *testing.T) {
t.Errorf("event ID 0 is already triggered, shouldn't be registered")
}
}
+
+func BenchmarkWaitRegister(b *testing.B) {
+ wt := New()
+
+ id := uint64(0)
+ b.SetParallelism(3)
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ rid := atomic.AddUint64(&id, 1)
+ w := wt.Register(rid)
+ wt.IsRegistered(rid)
+ wt.Trigger(rid, rid)
+ <-w.WaitC()
+ w.GetResult()
+ wt.IsRegistered(rid)
+ }
+ })
+}
+
+func BenchmarkWaitRegisterWithChan(b *testing.B) {
+ wt := New()
+
+ id := uint64(0)
+ b.SetParallelism(3)
+ b.RunParallel(func(pb *testing.PB) {
+ done := make(chan struct{}, 1)
+ for pb.Next() {
+ rid := atomic.AddUint64(&id, 1)
+ w := wt.RegisterWithC(rid, done)
+ wt.IsRegistered(rid)
+ wt.Trigger(rid, rid)
+ <-w.WaitC()
+ w.GetResult()
+ wt.IsRegistered(rid)
+ }
+ })
+}
diff --git a/pre-dist.sh b/pre-dist.sh
index 336c914f..e52c2327 100755
--- a/pre-dist.sh
+++ b/pre-dist.sh
@@ -4,12 +4,5 @@ set -e
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
rm -rf $DIR/dist/docker
-rm -rf $DIR/.godeps/src/github.com
-mkdir -p $DIR/.godeps
mkdir -p $DIR/dist
-export GOPATH=$DIR/.godeps:$(go env GOPATH)
-GOPATH=$DIR/.godeps gpm get
-
-arch=$(go env GOARCH)
-
-#go test -tags=embed -race ./...
+dep ensure
diff --git a/raft/bootstrap.go b/raft/bootstrap.go
new file mode 100644
index 00000000..56d11992
--- /dev/null
+++ b/raft/bootstrap.go
@@ -0,0 +1,97 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "errors"
+
+ pb "github.com/youzan/ZanRedisDB/raft/raftpb"
+)
+
+// Bootstrap initializes the RawNode for first use by appending configuration
+// changes for the supplied peers. This method returns an error if the Storage
+// is nonempty.
+//
+// It is recommended that instead of calling this method, applications bootstrap
+// their state manually by setting up a Storage that has a first index > 1 and
+// which stores the desired ConfState as its InitialState.
+func (rn *RawNode) Bootstrap(peers []Peer) error {
+ if len(peers) == 0 {
+ return errors.New("must provide at least one peer to Bootstrap")
+ }
+ lastIndex, err := rn.raft.raftLog.storage.LastIndex()
+ if err != nil {
+ return err
+ }
+
+ if lastIndex != 0 {
+ return errors.New("can't bootstrap a nonempty Storage")
+ }
+
+ // We've faked out initial entries above, but nothing has been
+ // persisted. Start with an empty HardState (thus the first Ready will
+ // emit a HardState update for the app to persist).
+ rn.prevHardSt = emptyState
+
+ // TODO(tbg): remove StartNode and give the application the right tools to
+ // bootstrap the initial membership in a cleaner way.
+ rn.raft.becomeFollower(1, None)
+ ents := make([]pb.Entry, len(peers))
+ for i, peer := range peers {
+ cc := pb.ConfChange{Type: pb.ConfChangeAddNode, ReplicaID: peer.ReplicaID,
+ NodeGroup: pb.Group{NodeId: peer.NodeID, Name: rn.raft.group.Name, GroupId: rn.raft.group.GroupId,
+ RaftReplicaId: peer.ReplicaID},
+ Context: peer.Context}
+ data, err := cc.Marshal()
+ if err != nil {
+ return err
+ }
+ // TODO(tbg): this should append the ConfChange for the own node first
+ // and also call applyConfChange below for that node first. Otherwise
+ // we have a Raft group (for a little while) that doesn't have itself
+ // in its config, which is bad.
+ // This whole way of setting things up is rickety. The app should just
+ // populate the initial ConfState appropriately and then all of this
+ // goes away.
+ e := pb.Entry{
+ Type: pb.EntryConfChange,
+ Term: 1,
+ Index: uint64(i + 1),
+ Data: data,
+ }
+ ents[i] = e
+ }
+ rn.raft.raftLog.append(ents...)
+
+ // Now apply them, mainly so that the application can call Campaign
+ // immediately after StartNode in tests. Note that these nodes will
+ // be added to raft twice: here and when the application's Ready
+ // loop calls ApplyConfChange. The calls to addNode must come after
+ // all calls to raftLog.append so progress.next is set after these
+ // bootstrapping entries (it is an error if we try to append these
+ // entries since they have already been committed).
+ // We do not set raftLog.applied so the application will be able
+ // to observe all conf changes via Ready.CommittedEntries.
+ //
+ // TODO(bdarnell): These entries are still unstable; do we need to preserve
+ // the invariant that committed < unstable?
+ rn.raft.raftLog.committed = uint64(len(ents))
+ for _, peer := range peers {
+ grp := pb.Group{NodeId: peer.NodeID, Name: rn.raft.group.Name, GroupId: rn.raft.group.GroupId,
+ RaftReplicaId: peer.ReplicaID}
+ rn.raft.addNode(peer.ReplicaID, grp)
+ }
+ return nil
+}
diff --git a/raft/example_test.go b/raft/example_test.go
index 75484988..153cc941 100644
--- a/raft/example_test.go
+++ b/raft/example_test.go
@@ -15,7 +15,7 @@
package raft
import (
- pb "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ pb "github.com/youzan/ZanRedisDB/raft/raftpb"
)
func applyToStore(ents []pb.Entry) {}
@@ -34,7 +34,11 @@ func ExampleNode() {
var prev pb.HardState
for {
// Ready blocks until there is new state ready.
- rd := <-n.Ready()
+ <-n.EventNotifyCh()
+ rd, hasEvent := n.StepNode(true, false)
+ if !hasEvent {
+ continue
+ }
if !isHardStateEqual(prev, rd.HardState) {
saveStateToDisk(rd.HardState)
prev = rd.HardState
diff --git a/raft/log.go b/raft/log.go
index a51652a5..3ad97dc0 100644
--- a/raft/log.go
+++ b/raft/log.go
@@ -18,7 +18,7 @@ import (
"fmt"
"log"
- pb "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ pb "github.com/youzan/ZanRedisDB/raft/raftpb"
)
type raftLog struct {
@@ -38,17 +38,28 @@ type raftLog struct {
applied uint64
logger Logger
+ // maxNextEntsSize is the maximum number aggregate byte size of the messages
+ // returned from calls to nextEnts.
+ maxNextEntsSize uint64
}
-// newLog returns log using the given storage. It recovers the log to the state
-// that it just commits and applies the latest snapshot.
+// newLog returns log using the given storage and default options. It
+// recovers the log to the state that it just commits and applies the
+// latest snapshot.
func newLog(storage Storage, logger Logger) *raftLog {
+ return newLogWithSize(storage, logger, noLimit)
+}
+
+// newLogWithSize returns a log using the given storage and max
+// message size.
+func newLogWithSize(storage Storage, logger Logger, maxNextEntsSize uint64) *raftLog {
if storage == nil {
log.Panic("storage must not be nil")
}
log := &raftLog{
- storage: storage,
- logger: logger,
+ storage: storage,
+ logger: logger,
+ maxNextEntsSize: maxNextEntsSize,
}
firstIndex, err := storage.FirstIndex()
if err != nil {
@@ -139,7 +150,7 @@ func (l *raftLog) unstableEntries() []pb.Entry {
func (l *raftLog) nextEnts() (ents []pb.Entry) {
off := max(l.applied+1, l.firstIndex())
if l.committed+1 > off {
- ents, err := l.slice(off, l.committed+1, noLimit)
+ ents, err := l.slice(off, l.committed+1, l.maxNextEntsSize)
if err != nil {
l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err)
}
@@ -155,6 +166,15 @@ func (l *raftLog) hasNextEnts() bool {
return l.committed+1 > off
}
+func (l *raftLog) hasMoreNextEnts(appliedTo uint64) bool {
+ return l.committed > appliedTo
+}
+
+// hasPendingSnapshot returns if there is pending snapshot waiting for applying.
+func (l *raftLog) hasPendingSnapshot() bool {
+ return l.unstable.snapshot != nil && !IsEmptySnap(*l.unstable.snapshot)
+}
+
func (l *raftLog) snapshot() (pb.Snapshot, error) {
if l.unstable.snapshot != nil {
return *l.unstable.snapshot, nil
@@ -320,8 +340,10 @@ func (l *raftLog) slice(lo, hi, maxSize uint64) ([]pb.Entry, error) {
if hi > l.unstable.offset {
unstable := l.unstable.slice(max(lo, l.unstable.offset), hi)
if len(ents) > 0 {
- ents = append([]pb.Entry{}, ents...)
- ents = append(ents, unstable...)
+ combined := make([]pb.Entry, len(ents)+len(unstable))
+ n := copy(combined, ents)
+ copy(combined[n:], unstable)
+ ents = combined
} else {
ents = unstable
}
diff --git a/raft/log_test.go b/raft/log_test.go
index 4b898c59..456b65c9 100644
--- a/raft/log_test.go
+++ b/raft/log_test.go
@@ -18,7 +18,7 @@ import (
"reflect"
"testing"
- pb "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ pb "github.com/youzan/ZanRedisDB/raft/raftpb"
)
func TestFindConflict(t *testing.T) {
@@ -46,6 +46,7 @@ func TestFindConflict(t *testing.T) {
for i, tt := range tests {
raftLog := newLog(NewMemoryStorage(), raftLogger)
+ defer raftLog.storage.(IExtRaftStorage).Close()
raftLog.append(previousEnts...)
gconflict := raftLog.findConflict(tt.ents)
@@ -58,6 +59,7 @@ func TestFindConflict(t *testing.T) {
func TestIsUpToDate(t *testing.T) {
previousEnts := []pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}, {Index: 3, Term: 3}}
raftLog := newLog(NewMemoryStorage(), raftLogger)
+ defer raftLog.storage.(IExtRaftStorage).Close()
raftLog.append(previousEnts...)
tests := []struct {
lastIndex uint64
@@ -124,6 +126,7 @@ func TestAppend(t *testing.T) {
for i, tt := range tests {
storage := NewMemoryStorage()
+ defer storage.Close()
storage.Append(previousEnts)
raftLog := newLog(storage, raftLogger)
@@ -237,6 +240,7 @@ func TestLogMaybeAppend(t *testing.T) {
for i, tt := range tests {
raftLog := newLog(NewMemoryStorage(), raftLogger)
+ defer raftLog.storage.(IExtRaftStorage).Close()
raftLog.append(previousEnts...)
raftLog.committed = commit
func() {
@@ -281,6 +285,7 @@ func TestCompactionSideEffects(t *testing.T) {
unstableIndex := uint64(750)
lastTerm := lastIndex
storage := NewMemoryStorage()
+ defer storage.Close()
for i = 1; i <= unstableIndex; i++ {
storage.Append([]pb.Entry{{Term: uint64(i), Index: uint64(i)}})
}
@@ -357,6 +362,7 @@ func TestHasNextEnts(t *testing.T) {
}
for i, tt := range tests {
storage := NewMemoryStorage()
+ defer storage.Close()
storage.ApplySnapshot(snap)
raftLog := newLog(storage, raftLogger)
raftLog.append(ents...)
@@ -390,6 +396,7 @@ func TestNextEnts(t *testing.T) {
}
for i, tt := range tests {
storage := NewMemoryStorage()
+ defer storage.Close()
storage.ApplySnapshot(snap)
raftLog := newLog(storage, raftLogger)
raftLog.append(ents...)
@@ -418,6 +425,7 @@ func TestUnstableEnts(t *testing.T) {
for i, tt := range tests {
// append stable entries to storage
storage := NewMemoryStorage()
+ defer storage.Close()
storage.Append(previousEnts[:tt.unstable-1])
// append unstable entries to raftlog
@@ -460,6 +468,7 @@ func TestCommitTo(t *testing.T) {
}
}()
raftLog := newLog(NewMemoryStorage(), raftLogger)
+ defer raftLog.storage.(IExtRaftStorage).Close()
raftLog.append(previousEnts...)
raftLog.committed = commit
raftLog.commitTo(tt.commit)
@@ -483,6 +492,7 @@ func TestStableTo(t *testing.T) {
}
for i, tt := range tests {
raftLog := newLog(NewMemoryStorage(), raftLogger)
+ defer raftLog.storage.(IExtRaftStorage).Close()
raftLog.append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}}...)
raftLog.stableTo(tt.stablei, tt.stablet)
if raftLog.unstable.offset != tt.wunstable {
@@ -518,6 +528,7 @@ func TestStableToWithSnap(t *testing.T) {
}
for i, tt := range tests {
s := NewMemoryStorage()
+ defer s.Close()
s.ApplySnapshot(pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: snapi, Term: snapt}})
raftLog := newLog(s, raftLogger)
raftLog.append(tt.newEnts...)
@@ -554,6 +565,7 @@ func TestCompaction(t *testing.T) {
}()
storage := NewMemoryStorage()
+ defer storage.Close()
for i := uint64(1); i <= tt.lastIndex; i++ {
storage.Append([]pb.Entry{{Index: i}})
}
@@ -582,6 +594,7 @@ func TestLogRestore(t *testing.T) {
term := uint64(1000)
snap := pb.SnapshotMetadata{Index: index, Term: term}
storage := NewMemoryStorage()
+ defer storage.Close()
storage.ApplySnapshot(pb.Snapshot{Metadata: snap})
raftLog := newLog(storage, raftLogger)
@@ -606,6 +619,7 @@ func TestIsOutOfBounds(t *testing.T) {
offset := uint64(100)
num := uint64(100)
storage := NewMemoryStorage()
+ defer storage.Close()
storage.ApplySnapshot(pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: offset}})
l := newLog(storage, raftLogger)
for i := uint64(1); i <= num; i++ {
@@ -689,6 +703,7 @@ func TestTerm(t *testing.T) {
num := uint64(100)
storage := NewMemoryStorage()
+ defer storage.Close()
storage.ApplySnapshot(pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: offset, Term: 1}})
l := newLog(storage, raftLogger)
for i = 1; i < num; i++ {
@@ -719,6 +734,7 @@ func TestTermWithUnstableSnapshot(t *testing.T) {
unstablesnapi := storagesnapi + 5
storage := NewMemoryStorage()
+ defer storage.Close()
storage.ApplySnapshot(pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: storagesnapi, Term: 1}})
l := newLog(storage, raftLogger)
l.restore(pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: unstablesnapi, Term: 1}})
@@ -753,6 +769,7 @@ func TestSlice(t *testing.T) {
halfe := pb.Entry{Index: half, Term: half}
storage := NewMemoryStorage()
+ defer storage.Close()
storage.ApplySnapshot(pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: offset}})
for i = 1; i < num/2; i++ {
storage.Append([]pb.Entry{{Index: offset + i, Term: offset + i}})
diff --git a/raft/log_unstable.go b/raft/log_unstable.go
index 392fce1b..b0ddd178 100644
--- a/raft/log_unstable.go
+++ b/raft/log_unstable.go
@@ -14,7 +14,7 @@
package raft
-import pb "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+import pb "github.com/youzan/ZanRedisDB/raft/raftpb"
// unstable.entries[i] has raft log position i+unstable.offset.
// Note that unstable.offset may be less than the highest log
diff --git a/raft/log_unstable_test.go b/raft/log_unstable_test.go
index c0c19e34..20c13725 100644
--- a/raft/log_unstable_test.go
+++ b/raft/log_unstable_test.go
@@ -18,7 +18,7 @@ import (
"reflect"
"testing"
- pb "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ pb "github.com/youzan/ZanRedisDB/raft/raftpb"
)
func TestUnstableMaybeFirstIndex(t *testing.T) {
diff --git a/raft/logger.go b/raft/logger.go
index ff78efcc..e20eaa51 100644
--- a/raft/logger.go
+++ b/raft/logger.go
@@ -44,7 +44,7 @@ type Logger interface {
func SetLogger(l Logger) { raftLogger = l }
var (
- defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags|log.Lshortfile)}
+ defaultLogger = &DefaultLogger{Logger: log.New(os.Stdout, "raft", log.LstdFlags|log.Lmicroseconds|log.Lshortfile)}
discardLogger = &DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)}
raftLogger = Logger(defaultLogger)
)
@@ -114,7 +114,7 @@ func (l *DefaultLogger) Fatalf(format string, v ...interface{}) {
}
func (l *DefaultLogger) Panic(v ...interface{}) {
- l.Logger.Panic(v)
+ l.Logger.Panic(v...)
}
func (l *DefaultLogger) Panicf(format string, v ...interface{}) {
diff --git a/raft/message_queue.go b/raft/message_queue.go
new file mode 100644
index 00000000..b5d03ec6
--- /dev/null
+++ b/raft/message_queue.go
@@ -0,0 +1,151 @@
+// Copyright 2017-2019 Lei Ni (nilei81@gmail.com)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "sync"
+
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+)
+
+// MessageQueue is the queue used to hold Raft messages.
+type MessageQueue struct {
+ size uint64
+ ch chan struct{}
+ left []raftpb.Message
+ right []raftpb.Message
+ snapshot []raftpb.Message
+ leftInWrite bool
+ stopped bool
+ idx uint64
+ oldIdx uint64
+ cycle uint64
+ lazyFreeCycle uint64
+ mu sync.Mutex
+}
+
+// NewMessageQueue creates a new MessageQueue instance.
+func NewMessageQueue(size uint64, ch bool, lazyFreeCycle uint64) *MessageQueue {
+ q := &MessageQueue{
+ size: size,
+ lazyFreeCycle: lazyFreeCycle,
+ left: make([]raftpb.Message, size),
+ right: make([]raftpb.Message, size),
+ snapshot: make([]raftpb.Message, 0),
+ }
+ if ch {
+ q.ch = make(chan struct{}, 1)
+ }
+ return q
+}
+
+// Close closes the queue so no further messages can be added.
+func (q *MessageQueue) Close() {
+ q.mu.Lock()
+ defer q.mu.Unlock()
+ q.stopped = true
+}
+
+// Notify notifies the notification channel listener that a new message is now
+// available in the queue.
+func (q *MessageQueue) Notify() {
+ if q.ch != nil {
+ select {
+ case q.ch <- struct{}{}:
+ default:
+ }
+ }
+}
+
+// Ch returns the notification channel.
+func (q *MessageQueue) Ch() <-chan struct{} {
+ return q.ch
+}
+
+func (q *MessageQueue) targetQueue() []raftpb.Message {
+ var t []raftpb.Message
+ if q.leftInWrite {
+ t = q.left
+ } else {
+ t = q.right
+ }
+ return t
+}
+
+// Add adds the specified message to the queue.
+func (q *MessageQueue) Add(msg raftpb.Message) (bool, bool) {
+ q.mu.Lock()
+ if q.idx >= q.size {
+ q.mu.Unlock()
+ return false, q.stopped
+ }
+ if q.stopped {
+ q.mu.Unlock()
+ return false, true
+ }
+ w := q.targetQueue()
+ w[q.idx] = msg
+ q.idx++
+ q.mu.Unlock()
+ return true, false
+}
+
+// AddSnapshot adds the specified snapshot to the queue.
+func (q *MessageQueue) AddSnapshot(msg raftpb.Message) bool {
+ if msg.Type != raftpb.MsgSnap {
+ panic("not a snapshot message")
+ }
+ q.mu.Lock()
+ defer q.mu.Unlock()
+ if q.stopped {
+ return false
+ }
+ q.snapshot = append(q.snapshot, msg)
+ return true
+}
+
+func (q *MessageQueue) gc() {
+ if q.lazyFreeCycle > 0 {
+ oldq := q.targetQueue()
+ if q.lazyFreeCycle == 1 {
+ for i := uint64(0); i < q.oldIdx; i++ {
+ oldq[i].Entries = nil
+ }
+ } else if q.cycle%q.lazyFreeCycle == 0 {
+ for i := uint64(0); i < q.size; i++ {
+ oldq[i].Entries = nil
+ }
+ }
+ }
+}
+
+// Get returns everything current in the queue.
+func (q *MessageQueue) Get() []raftpb.Message {
+ q.mu.Lock()
+ defer q.mu.Unlock()
+ q.cycle++
+ sz := q.idx
+ q.idx = 0
+ t := q.targetQueue()
+ q.leftInWrite = !q.leftInWrite
+ q.gc()
+ q.oldIdx = sz
+ if len(q.snapshot) == 0 {
+ return t[:sz]
+ }
+ ssm := q.snapshot
+ q.snapshot = make([]raftpb.Message, 0)
+ return append(ssm, t[:sz]...)
+}
diff --git a/raft/message_test.go b/raft/message_test.go
new file mode 100644
index 00000000..82588894
--- /dev/null
+++ b/raft/message_test.go
@@ -0,0 +1,140 @@
+// Copyright 2017-2019 Lei Ni (nilei81@gmail.com)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "testing"
+
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+)
+
+func TestMessageQueueCanBeCreated(t *testing.T) {
+ q := NewMessageQueue(8, false, 0)
+ if len(q.left) != 8 || len(q.right) != 8 {
+ t.Errorf("unexpected size")
+ }
+}
+
+func TestMessageCanBeAddedAndGet(t *testing.T) {
+ q := NewMessageQueue(8, false, 0)
+ for i := 0; i < 8; i++ {
+ added, stopped := q.Add(raftpb.Message{})
+ if !added || stopped {
+ t.Errorf("failed to add")
+ }
+ }
+ add, stopped := q.Add(raftpb.Message{})
+ add2, stopped2 := q.Add(raftpb.Message{})
+ if add || add2 {
+ t.Errorf("failed to drop message")
+ }
+ if stopped || stopped2 {
+ t.Errorf("unexpectedly stopped")
+ }
+ if q.idx != 8 {
+ t.Errorf("unexpected idx %d", q.idx)
+ }
+ lr := q.leftInWrite
+ q.Get()
+ if q.idx != 0 {
+ t.Errorf("unexpected idx %d", q.idx)
+ }
+ if lr == q.leftInWrite {
+ t.Errorf("lr flag not updated")
+ }
+ add, stopped = q.Add(raftpb.Message{})
+ add2, stopped2 = q.Add(raftpb.Message{})
+ if !add || !add2 {
+ t.Errorf("failed to add message")
+ }
+ if stopped || stopped2 {
+ t.Errorf("unexpectedly stopped")
+ }
+}
+
+func TestNonSnapshotMsgByCallingAddSnapshotWillPanic(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ return
+ }
+ t.Errorf("didn't panic")
+ }()
+ q := NewMessageQueue(8, false, 0)
+ q.AddSnapshot(raftpb.Message{})
+}
+
+func TestSnapshotCanAlwaysBeAdded(t *testing.T) {
+ q := NewMessageQueue(8, false, 0)
+ for i := 0; i < 1024; i++ {
+ if !q.AddSnapshot(raftpb.Message{Type: raftpb.MsgSnap}) {
+ t.Errorf("failed to add snapshot")
+ }
+ }
+}
+
+func TestAddedSnapshotWillBeReturned(t *testing.T) {
+ q := NewMessageQueue(8, false, 0)
+ if !q.AddSnapshot(raftpb.Message{Type: raftpb.MsgSnap}) {
+ t.Errorf("failed to add snapshot")
+ }
+ for i := 0; i < 4; i++ {
+ added, stopped := q.Add(raftpb.Message{})
+ if !added || stopped {
+ t.Errorf("failed to add")
+ }
+ }
+ if !q.AddSnapshot(raftpb.Message{Type: raftpb.MsgSnap}) {
+ t.Errorf("failed to add snapshot")
+ }
+ for i := 0; i < 4; i++ {
+ added, stopped := q.Add(raftpb.Message{})
+ if !added || stopped {
+ t.Errorf("failed to add")
+ }
+ }
+ if !q.AddSnapshot(raftpb.Message{Type: raftpb.MsgSnap}) {
+ t.Errorf("failed to add snapshot")
+ }
+ msgs := q.Get()
+ if len(msgs) != 11 {
+ t.Errorf("failed to return all messages")
+ }
+ count := 0
+ for _, msg := range msgs {
+ if msg.Type == raftpb.MsgSnap {
+ count++
+ }
+ }
+ if count != 3 {
+ t.Errorf("failed to get all snapshot messages")
+ }
+ if len(q.snapshot) != 0 {
+ t.Errorf("snapshot list not empty")
+ }
+}
+
+func TestMessageQueueCanBeStopped(t *testing.T) {
+ q := NewMessageQueue(8, false, 0)
+ q.Close()
+ for i := 0; i < 4; i++ {
+ added, stopped := q.Add(raftpb.Message{})
+ if added || !stopped {
+ t.Errorf("unexpectedly added msg")
+ }
+ }
+ if q.AddSnapshot(raftpb.Message{Type: raftpb.MsgSnap}) {
+ t.Errorf("unexpectedly added snapshot")
+ }
+}
diff --git a/raft/node.go b/raft/node.go
index fe3eda0a..17a242a5 100644
--- a/raft/node.go
+++ b/raft/node.go
@@ -16,25 +16,30 @@ package raft
import (
"errors"
- "runtime"
+ "fmt"
+ "time"
- pb "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ pb "github.com/youzan/ZanRedisDB/raft/raftpb"
"golang.org/x/net/context"
)
type SnapshotStatus int
const (
- SnapshotFinish SnapshotStatus = 1
- SnapshotFailure SnapshotStatus = 2
+ SnapshotFinish SnapshotStatus = 1
+ SnapshotFailure SnapshotStatus = 2
+ queueWaitTime = time.Millisecond * 10
+ recvQueueLen = 1024 * 16
+ proposalQueueLen = 1024 * 4
)
var (
emptyState = pb.HardState{}
// ErrStopped is returned by methods on Nodes that have been stopped.
- ErrStopped = errors.New("raft: stopped")
- errMsgDropped = errors.New("raft message dropped")
+ ErrStopped = errors.New("raft: stopped")
+ errMsgDropped = errors.New("raft message dropped")
+ errProposalAddFailed = errors.New("add proposal to queue failed")
)
// SoftState provides state that is useful for logging and debugging.
@@ -79,7 +84,8 @@ type Ready struct {
// store/state-machine. These have previously been committed to stable
// store.
CommittedEntries []pb.Entry
-
+ // Whether there are more committed entries ready to be applied.
+ MoreCommittedEntries bool
// Messages specifies outbound messages to be sent AFTER Entries are
// committed to stable storage.
// If it contains a MsgSnap message, the application MUST report back to raft
@@ -111,6 +117,19 @@ func (rd Ready) containsUpdates() bool {
len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 || len(rd.ReadStates) != 0
}
+// appliedCursor extracts from the Ready the highest index the client has
+// applied (once the Ready is confirmed via Advance). If no information is
+// contained in the Ready, returns zero.
+func (rd Ready) appliedCursor() uint64 {
+ if n := len(rd.CommittedEntries); n > 0 {
+ return rd.CommittedEntries[n-1].Index
+ }
+ if index := rd.Snapshot.Metadata.Index; index > 0 {
+ return index
+ }
+ return 0
+}
+
type msgWithDrop struct {
m pb.Message
dropCB context.CancelFunc
@@ -120,13 +139,14 @@ type msgWithDrop struct {
type Node interface {
// Tick increments the internal logical clock for the Node by a single tick. Election
// timeouts and heartbeat timeouts are in units of ticks.
- Tick()
+ Tick() bool
// Campaign causes the Node to transition to candidate state and start campaigning to become leader.
Campaign(ctx context.Context) error
// Propose proposes that data be appended to the log.
Propose(ctx context.Context, data []byte) error
// Propose proposed that data be appended to the log and cancel it if dropped
ProposeWithDrop(ctx context.Context, data []byte, cancel context.CancelFunc) error
+ ProposeEntryWithDrop(ctx context.Context, e pb.Entry, cancel context.CancelFunc) error
// ProposeConfChange proposes config change.
// At most one ConfChange can be in the process of going through consensus.
// Application needs to call ApplyConfChange when applying EntryConfChange type entry.
@@ -134,13 +154,20 @@ type Node interface {
// Step advances the state machine using the given message. ctx.Err() will be returned, if any.
Step(ctx context.Context, msg pb.Message) error
- // Ready returns a channel that returns the current point-in-time state.
- // Users of the Node must call Advance after retrieving the state returned by Ready.
+ // StepNode handle raft events and returns the current point-in-time state.
+ // Users of the Node must call Advance after retrieving the state returned by StepNode.
//
// NOTE: No committed entries from the next Ready may be applied until all committed entries
// and snapshots from the previous one have finished.
- Ready() <-chan Ready
-
+ StepNode(moreApplyEntries bool, busySnap bool) (Ready, bool)
+ // EventNotifyCh is used to notify or receive the notify for the raft event
+ EventNotifyCh() chan bool
+ // NotifyEventCh will notify the raft loop event to check new event
+ NotifyEventCh()
+
+ ConfChangedCh() <-chan pb.ConfChange
+ // HandleConfChanged will handle configure change event
+ HandleConfChanged(cc pb.ConfChange)
// Advance notifies the Node that the application has saved progress up to the last Ready.
// It prepares the node to return the next available Ready.
//
@@ -150,7 +177,7 @@ type Node interface {
// commands. For example. when the last Ready contains a snapshot, the application might take
// a long time to apply the snapshot data. To continue receiving Ready without blocking raft
// progress, it can call Advance before finishing applying the last ready.
- Advance()
+ Advance(rd Ready)
// ApplyConfChange applies config change to the local node.
// Returns an opaque ConfState protobuf which must be recorded
// in snapshots. Will never return nil; it returns a pointer only
@@ -174,6 +201,7 @@ type Node interface {
ReportSnapshot(id uint64, group pb.Group, status SnapshotStatus)
// Stop performs any necessary termination of the Node.
Stop()
+ DebugString() string
}
type Peer struct {
@@ -182,49 +210,40 @@ type Peer struct {
Context []byte
}
+type prevState struct {
+ prevLead uint64
+}
+
+func newPrevState(r *RawNode) *prevState {
+ return &prevState{
+ prevLead: None,
+ }
+}
+
// StartNode returns a new Node given configuration and a list of raft peers.
// It appends a ConfChangeAddNode entry for each given peer to the initial log.
func StartNode(c *Config, peers []Peer, isLearner bool) Node {
- if isLearner {
- c.learners = append(c.learners, c.Group)
- }
- r := newRaft(c)
- // become the follower at term 1 and apply initial configuration
- // entries of term 1
- r.becomeFollower(1, None)
- for _, peer := range peers {
- cc := pb.ConfChange{Type: pb.ConfChangeAddNode, ReplicaID: peer.ReplicaID,
- NodeGroup: pb.Group{NodeId: peer.NodeID, Name: r.group.Name, GroupId: r.group.GroupId,
- RaftReplicaId: peer.ReplicaID},
- Context: peer.Context}
- d, err := cc.Marshal()
- if err != nil {
- panic("unexpected marshal error")
- }
- e := pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: r.raftLog.lastIndex() + 1, Data: d}
- r.raftLog.append(e)
- }
- // Mark these initial entries as committed.
- // TODO(bdarnell): These entries are still unstable; do we need to preserve
- // the invariant that committed < unstable?
- r.raftLog.committed = r.raftLog.lastIndex()
- // Now apply them, mainly so that the application can call Campaign
- // immediately after StartNode in tests. Note that these nodes will
- // be added to raft twice: here and when the application's Ready
- // loop calls ApplyConfChange. The calls to addNode must come after
- // all calls to raftLog.append so progress.next is set after these
- // bootstrapping entries (it is an error if we try to append these
- // entries since they have already been committed).
- // We do not set raftLog.applied so the application will be able
- // to observe all conf changes via Ready.CommittedEntries.
- for _, peer := range peers {
- r.addNode(peer.ReplicaID, pb.Group{NodeId: peer.NodeID, Name: r.group.Name, GroupId: r.group.GroupId,
- RaftReplicaId: peer.ReplicaID})
+ //if isLearner {
+ // c.learners = append(c.learners, c.Group)
+ //}
+ if len(peers) == 0 {
+ panic("no peers given; use RestartNode instead")
+ }
+ //r := newRaft(c)
+
+ rn, err := NewRawNode(c)
+ if err != nil {
+ panic(err)
}
+ rn.Bootstrap(peers)
n := newNode()
n.logger = c.Logger
- go n.run(r)
+ n.r = rn
+ n.prevS = newPrevState(rn)
+ off := max(rn.raft.raftLog.applied+1, rn.raft.raftLog.firstIndex())
+ n.lastSteppedIndex = off
+ n.NotifyEventCh()
return &n
}
@@ -233,252 +252,364 @@ func StartNode(c *Config, peers []Peer, isLearner bool) Node {
// If the caller has an existing state machine, pass in the last log index that
// has been applied to it; otherwise use zero.
func RestartNode(c *Config) Node {
- r := newRaft(c)
+ rn, err := NewRawNode(c)
+ if err != nil {
+ panic(err)
+ }
n := newNode()
n.logger = c.Logger
- go n.run(r)
+ n.r = rn
+ n.prevS = newPrevState(rn)
+ off := max(rn.raft.raftLog.applied+1, rn.raft.raftLog.firstIndex())
+ n.lastSteppedIndex = off
+ n.NotifyEventCh()
return &n
}
// node is the canonical implementation of the Node interface
type node struct {
- propc chan msgWithDrop
- recvc chan msgWithDrop
- confc chan pb.ConfChange
- confstatec chan pb.ConfState
- readyc chan Ready
- advancec chan struct{}
- tickc chan struct{}
- done chan struct{}
- stop chan struct{}
- status chan chan Status
+ propQ *ProposalQueue
+ msgQ *MessageQueue
+ confc chan pb.ConfChange
+ confstatec chan pb.ConfState
+ tickc chan struct{}
+ done chan struct{}
+ stop chan struct{}
+ status chan chan Status
+ eventNotifyCh chan bool
+ r *RawNode
+ prevS *prevState
+ newReadyFunc func(*raft, *SoftState, pb.HardState, bool) Ready
+ needAdvance bool
+ lastSteppedIndex uint64
logger Logger
}
func newNode() node {
return node{
- propc: make(chan msgWithDrop),
- recvc: make(chan msgWithDrop),
- confc: make(chan pb.ConfChange),
- confstatec: make(chan pb.ConfState),
- readyc: make(chan Ready),
- advancec: make(chan struct{}),
+ propQ: NewProposalQueue(proposalQueueLen, 1),
+ msgQ: NewMessageQueue(recvQueueLen, false, 1),
+ confc: make(chan pb.ConfChange, 1),
+ confstatec: make(chan pb.ConfState, 1),
// make tickc a buffered chan, so raft node can buffer some ticks when the node
// is busy processing raft messages. Raft node will resume process buffered
// ticks when it becomes idle.
- tickc: make(chan struct{}, 128),
- done: make(chan struct{}),
- stop: make(chan struct{}),
- status: make(chan chan Status),
+ tickc: make(chan struct{}, 128),
+ done: make(chan struct{}),
+ stop: make(chan struct{}),
+ status: make(chan chan Status, 1),
+ eventNotifyCh: make(chan bool, 1),
+ newReadyFunc: newReady,
}
}
+func (n *node) EventNotifyCh() chan bool {
+ return n.eventNotifyCh
+}
+
func (n *node) Stop() {
select {
- case n.stop <- struct{}{}:
- // Not already stopped, so trigger it
case <-n.done:
- // Node has already been stopped - no need to do anything
+ // already closed
return
+ default:
+ close(n.done)
+ }
+}
+
+func (n *node) StepNode(moreEntriesToApply bool, busySnap bool) (Ready, bool) {
+ if n.needAdvance {
+ return Ready{}, false
+ }
+ var hasEvent bool
+ msgs := n.msgQ.Get()
+ for i, m := range msgs {
+ hasEvent = true
+ if busySnap && m.Type == pb.MsgApp {
+ // ignore msg app while busy snapshot
+ } else {
+ n.handleReceivedMessage(n.r.raft, m)
+ }
+ msgs[i].Entries = nil
+ }
+ if n.handleTicks(n.r.raft) {
+ hasEvent = true
+ }
+ needHandleProposal := n.handleLeaderUpdate(n.r.raft)
+ var ev bool
+ ev, needHandleProposal = n.handleConfChanged(n.r.raft, needHandleProposal)
+ if ev {
+ hasEvent = ev
}
- // Block until the stop has been acknowledged by run()
- <-n.done
-}
-
-func (n *node) run(r *raft) {
- var propc chan msgWithDrop
- var readyc chan Ready
- var advancec chan struct{}
- var prevLastUnstablei, prevLastUnstablet uint64
- var havePrevLastUnstablei bool
- var prevSnapi uint64
- var rd Ready
-
- lead := None
- prevSoftSt := r.softState()
- prevHardSt := emptyState
- defer func() {
- if e := recover(); e != nil {
- buf := make([]byte, 4096)
- n := runtime.Stack(buf, false)
- buf = buf[0:n]
- r.logger.Infof("handle raft loop panic: %s:%v", buf, e)
+ if needHandleProposal {
+ props := n.propQ.Get()
+ for _, p := range props {
+ hasEvent = true
+ n.handleProposal(n.r.raft, p)
}
+ }
+ n.handleStatus(n.r.raft)
+ _ = hasEvent
+ rd := n.r.readyWithoutAccept(moreEntriesToApply)
+ if rd.containsUpdates() {
+ n.needAdvance = true
+ var stepIndex uint64
+ if !IsEmptySnap(rd.Snapshot) {
+ stepIndex = rd.Snapshot.Metadata.Index
+ }
+ if len(rd.CommittedEntries) > 0 {
+ fi := rd.CommittedEntries[0].Index
+ if n.lastSteppedIndex != 0 && fi > n.lastSteppedIndex+1 {
+ e := fmt.Sprintf("raft.node: %x(%v) index not continued: %v, %v, %v, snap:%v, prev: %v, logs: %v ",
+ n.r.raft.id, n.r.raft.group, fi, n.lastSteppedIndex, stepIndex, rd.Snapshot.Metadata.String(), n.prevS,
+ n.r.raft.raftLog.String())
+ n.logger.Error(e)
+ }
+ stepIndex = rd.CommittedEntries[len(rd.CommittedEntries)-1].Index
+ }
+ n.lastSteppedIndex = stepIndex
+ return rd, true
+ }
+ return Ready{}, false
+}
- close(n.done)
- close(n.readyc)
- }()
+func (n *node) DebugString() string {
+ ents := n.r.raft.raftLog.allEntries()
+ e := fmt.Sprintf("raft.node: %x(%v) index not continued: %v, prev: %v, logs: %v, %v ",
+ n.r.raft.id, n.r.raft.group, n.lastSteppedIndex, n.prevS, len(ents),
+ n.r.raft.raftLog.String())
+ return e
+}
- for {
- if advancec != nil {
- readyc = nil
- } else {
- rd = newReady(r, prevSoftSt, prevHardSt)
- if rd.containsUpdates() {
- readyc = n.readyc
+func (n *node) handleLeaderUpdate(r *raft) bool {
+ lead := n.prevS.prevLead
+ needHandleProposal := lead != None
+ if lead != r.lead {
+ if r.hasLeader() {
+ if lead == None {
+ r.logger.Infof("raft.node: %x(%v) elected leader %x at term %d", r.id, r.group.Name, r.lead, r.Term)
} else {
- readyc = nil
+ r.logger.Infof("raft.node: %x(%v) changed leader from %x to %x at term %d", r.id, r.group.Name, lead, r.lead, r.Term)
}
+ needHandleProposal = true
+ } else {
+ r.logger.Infof("raft.node: %x(%v) lost leader %x at term %d", r.id, r.group.Name, lead, r.Term)
+ needHandleProposal = false
}
+ lead = r.lead
+ n.prevS.prevLead = lead
+ }
+ return needHandleProposal
+}
- if lead != r.lead {
- if r.hasLeader() {
- if lead == None {
- r.logger.Infof("raft.node: %x(%v) elected leader %x at term %d", r.id, r.group.Name, r.lead, r.Term)
- } else {
- r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, r.group.Name, lead, r.lead, r.Term)
- }
- propc = n.propc
- } else {
- r.logger.Infof("raft.node: %x(%v) lost leader %x at term %d", r.id, r.group.Name, lead, r.Term)
- propc = nil
+func (n *node) NotifyEventCh() {
+ select {
+ case n.eventNotifyCh <- true:
+ default:
+ }
+}
+
+func (n *node) addProposalToQueue(ctx context.Context, p msgWithDrop, to time.Duration, stopC chan struct{}) error {
+ if added, stopped, err := n.propQ.AddWait(ctx, p, to, stopC); !added || stopped {
+ if n.logger != nil {
+ n.logger.Warningf("dropped an incoming proposal: %v", p.m.String())
+ }
+ if err != nil {
+ return err
+ }
+ if stopped {
+ return ErrStopped
+ }
+ if !added {
+ return errProposalAddFailed
+ }
+ }
+
+ n.NotifyEventCh()
+ return nil
+}
+
+func (n *node) addReqMessageToQueue(req pb.Message) {
+ if req.Type == pb.MsgSnap {
+ n.msgQ.AddSnapshot(req)
+ } else {
+ if added, stopped := n.msgQ.Add(req); !added || stopped {
+ if n.logger != nil {
+ n.logger.Warningf("dropped an incoming message: %v", req.String())
}
- lead = r.lead
+ return
}
+ }
+
+ n.NotifyEventCh()
+}
+
+func (n *node) Advance(rd Ready) {
+ n.r.acceptReady(rd)
+ n.r.Advance(rd)
+ n.needAdvance = false
+}
+func (n *node) ConfChangedCh() <-chan pb.ConfChange {
+ return n.confc
+}
+
+func (n *node) HandleConfChanged(cc pb.ConfChange) {
+ n.processConfChanged(n.r.raft, cc, true)
+}
+
+func (n *node) handleConfChanged(r *raft, needHandleProposal bool) (bool, bool) {
+ if len(n.confc) == 0 {
+ return false, needHandleProposal
+ }
+ select {
+ case cc := <-n.confc:
+ needHandleProposal = n.processConfChanged(r, cc, needHandleProposal)
+ return true, needHandleProposal
+ default:
+ return false, needHandleProposal
+ }
+}
+
+func (n *node) processConfChanged(r *raft, cc pb.ConfChange, needHandleProposal bool) bool {
+ if cc.ReplicaID == None {
+ r.resetPendingConf()
select {
- // TODO: maybe buffer the config propose if there exists one (the way
- // described in raft dissertation)
- // Currently it is dropped in Step silently.
- case mdrop := <-propc:
- m := mdrop.m
- m.From = r.id
- m.FromGroup = r.group
- err := r.Step(m)
- if err == errMsgDropped && mdrop.dropCB != nil {
- mdrop.dropCB()
- }
- case mdrop := <-n.recvc:
- m := mdrop.m
- // filter out response message from unknown From.
- from := r.getProgress(m.From)
- if from != nil || !IsResponseMsg(m.Type) {
- if m.Type == pb.MsgTransferLeader {
- if m.FromGroup.NodeId == 0 {
- if from == nil {
- if m.From == r.id {
- m.FromGroup = r.group
- } else {
- n.logger.Errorf("no replica found %v while processing : %v",
- m.From, m.String())
- continue
- }
- } else {
- m.FromGroup = from.group
- }
- }
- if m.ToGroup.NodeId == 0 {
- pr := r.getProgress(m.To)
- if pr == nil {
- if m.To == r.id {
- m.ToGroup = r.group
- } else {
- n.logger.Errorf("no replica found %v while processing : %v",
- m.To, m.String())
- continue
- }
- } else {
- m.ToGroup = pr.group
- }
- }
+ case n.confstatec <- pb.ConfState{Nodes: r.nodes(),
+ Groups: r.groups(),
+ Learners: r.learnerNodes(),
+ LearnerGroups: r.learnerGroups()}:
+ case <-n.done:
+ }
+ return needHandleProposal
+ }
+ switch cc.Type {
+ case pb.ConfChangeAddNode:
+ r.addNode(cc.ReplicaID, cc.NodeGroup)
+ case pb.ConfChangeAddLearnerNode:
+ r.addLearner(cc.ReplicaID, cc.NodeGroup)
+ case pb.ConfChangeRemoveNode:
+ // block incoming proposal when local node is
+ // removed
+ if cc.ReplicaID == r.id {
+ needHandleProposal = false
+ }
+ r.removeNode(cc.ReplicaID)
+ case pb.ConfChangeUpdateNode:
+ r.updateNode(cc.ReplicaID, cc.NodeGroup)
+ default:
+ panic("unexpected conf type")
+ }
+ select {
+ case n.confstatec <- pb.ConfState{Nodes: r.nodes(),
+ Groups: r.groups(),
+ Learners: r.learnerNodes(),
+ LearnerGroups: r.learnerGroups()}:
+ case <-n.done:
+ }
+ return needHandleProposal
+}
+
+func (n *node) handleTicks(r *raft) bool {
+ tdone := false
+ hasEvent := false
+ for !tdone {
+ select {
+ case <-n.tickc:
+ hasEvent = true
+ r.tick()
+ default:
+ tdone = true
+ }
+ }
+ return hasEvent
+}
+
+func (n *node) handleStatus(r *raft) {
+ select {
+ case c := <-n.status:
+ c <- getStatus(r)
+ default:
+ }
+}
+
+func (n *node) handleReceivedMessage(r *raft, m pb.Message) {
+ from := r.getProgress(m.From)
+ // filter out response message from unknown From.
+ if from == nil && IsResponseMsg(m.Type) {
+ m.Entries = nil
+ return
+ }
+ if m.Type == pb.MsgTransferLeader {
+ if m.FromGroup.NodeId == 0 {
+ if from == nil {
+ if m.From == r.id {
+ m.FromGroup = r.group
} else {
- // if we missing the peer node group info, try update it from
- // raft message
- if from != nil && from.group.NodeId == 0 && m.FromGroup.NodeId > 0 &&
- m.FromGroup.GroupId == r.group.GroupId {
- from.group = m.FromGroup
+ if n.logger != nil {
+ n.logger.Errorf("no replica found %v while processing : %v",
+ m.From, m.String())
}
+ return
}
- err := r.Step(m)
- if err == errMsgDropped && mdrop.dropCB != nil {
- mdrop.dropCB()
- }
- }
- case cc := <-n.confc:
- if cc.ReplicaID == None {
- r.resetPendingConf()
- select {
- case n.confstatec <- pb.ConfState{Nodes: r.nodes(),
- Groups: r.groups(),
- Learners: r.learnerNodes(),
- LearnerGroups: r.learnerGroups()}:
- case <-n.done:
- }
- break
+ } else {
+ m.FromGroup = from.group
}
- switch cc.Type {
- case pb.ConfChangeAddNode:
- r.addNode(cc.ReplicaID, cc.NodeGroup)
- case pb.ConfChangeAddLearnerNode:
- r.addLearner(cc.ReplicaID, cc.NodeGroup)
- case pb.ConfChangeRemoveNode:
- // block incoming proposal when local node is
- // removed
- if cc.ReplicaID == r.id {
- propc = nil
+ }
+ if m.ToGroup.NodeId == 0 {
+ pr := r.getProgress(m.To)
+ if pr == nil {
+ if m.To == r.id {
+ m.ToGroup = r.group
+ } else {
+ if n.logger != nil {
+ n.logger.Errorf("no replica found %v while processing : %v",
+ m.To, m.String())
+ }
+ return
}
- r.removeNode(cc.ReplicaID)
- case pb.ConfChangeUpdateNode:
- r.updateNode(cc.ReplicaID, cc.NodeGroup)
- default:
- panic("unexpected conf type")
- }
- select {
- case n.confstatec <- pb.ConfState{Nodes: r.nodes(),
- Groups: r.groups(),
- Learners: r.learnerNodes(),
- LearnerGroups: r.learnerGroups()}:
- case <-n.done:
- }
- case <-n.tickc:
- r.tick()
- case readyc <- rd:
- if rd.SoftState != nil {
- prevSoftSt = rd.SoftState
- }
- if len(rd.Entries) > 0 {
- prevLastUnstablei = rd.Entries[len(rd.Entries)-1].Index
- prevLastUnstablet = rd.Entries[len(rd.Entries)-1].Term
- havePrevLastUnstablei = true
- }
- if !IsEmptyHardState(rd.HardState) {
- prevHardSt = rd.HardState
- }
- if !IsEmptySnap(rd.Snapshot) {
- prevSnapi = rd.Snapshot.Metadata.Index
- }
-
- r.msgs = nil
- r.readStates = nil
- advancec = n.advancec
- case <-advancec:
- if prevHardSt.Commit != 0 {
- r.raftLog.appliedTo(prevHardSt.Commit)
- }
- if havePrevLastUnstablei {
- r.raftLog.stableTo(prevLastUnstablei, prevLastUnstablet)
- havePrevLastUnstablei = false
+ } else {
+ m.ToGroup = pr.group
}
- r.raftLog.stableSnapTo(prevSnapi)
- advancec = nil
- case c := <-n.status:
- c <- getStatus(r)
- case <-n.stop:
- return
}
+ } else {
+ // if we missing the peer node group info, try update it from
+ // raft message
+ if from != nil && from.group.NodeId == 0 && m.FromGroup.NodeId > 0 &&
+ m.FromGroup.GroupId == r.group.GroupId {
+ from.group = m.FromGroup
+ }
+ }
+ r.Step(m)
+ m.Entries = nil
+}
+
+func (n *node) handleProposal(r *raft, mdrop msgWithDrop) {
+ m := mdrop.m
+ m.From = r.id
+ m.FromGroup = r.group
+ err := r.Step(m)
+ if err == errMsgDropped && mdrop.dropCB != nil {
+ mdrop.dropCB()
}
}
// Tick increments the internal logical clock for this Node. Election timeouts
// and heartbeat timeouts are in units of ticks.
-func (n *node) Tick() {
+func (n *node) Tick() bool {
select {
case n.tickc <- struct{}{}:
+ n.NotifyEventCh()
+ return true
case <-n.done:
+ return true
default:
- n.logger.Warningf("A tick missed to fire. Node blocks too long!")
+ if n.logger != nil {
+ n.logger.Warningf("A tick missed to fire. Node blocks too long!")
+ }
+ return false
}
}
@@ -488,6 +619,10 @@ func (n *node) Propose(ctx context.Context, data []byte) error {
return n.step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})
}
+func (n *node) ProposeEntryWithDrop(ctx context.Context, e pb.Entry, cancel context.CancelFunc) error {
+ return n.stepWithDrop(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{e}}, cancel)
+}
+
func (n *node) ProposeWithDrop(ctx context.Context, data []byte, cancel context.CancelFunc) error {
return n.stepWithDrop(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}}, cancel)
}
@@ -510,19 +645,13 @@ func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChange) error {
}
func (n *node) stepWithDrop(ctx context.Context, m pb.Message, cancel context.CancelFunc) error {
- ch := n.recvc
- if m.Type == pb.MsgProp {
- ch = n.propc
- }
-
- select {
- case ch <- msgWithDrop{m: m, dropCB: cancel}:
+ if m.Type != pb.MsgProp {
+ n.addReqMessageToQueue(m)
return nil
- case <-ctx.Done():
- return ctx.Err()
- case <-n.done:
- return ErrStopped
}
+
+ err := n.addProposalToQueue(ctx, msgWithDrop{m: m, dropCB: cancel}, queueWaitTime, n.done)
+ return err
}
// Step advances the state machine using msgs. The ctx.Err() will be returned,
@@ -531,21 +660,16 @@ func (n *node) step(ctx context.Context, m pb.Message) error {
return n.stepWithDrop(ctx, m, nil)
}
-func (n *node) Ready() <-chan Ready { return n.readyc }
-
-func (n *node) Advance() {
- select {
- case n.advancec <- struct{}{}:
- case <-n.done:
- }
-}
-
func (n *node) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
var cs pb.ConfState
select {
case n.confc <- cc:
case <-n.done:
+ return &cs
}
+ // notify event
+ n.NotifyEventCh()
+
select {
case cs = <-n.confstatec:
case <-n.done:
@@ -554,49 +678,56 @@ func (n *node) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
}
func (n *node) Status() Status {
- c := make(chan Status)
+ c := make(chan Status, 1)
+ to := time.NewTimer(time.Second)
+ defer to.Stop()
select {
case n.status <- c:
- return <-c
case <-n.done:
return Status{}
+ case <-to.C:
+ return Status{}
}
-}
-
-func (n *node) ReportUnreachable(id uint64, group pb.Group) {
+ n.NotifyEventCh()
select {
- case n.recvc <- msgWithDrop{m: pb.Message{Type: pb.MsgUnreachable, From: id, FromGroup: group}, dropCB: nil}:
+ case s := <-c:
+ return s
case <-n.done:
+ return Status{}
+ case <-to.C:
+ return Status{}
}
}
+func (n *node) ReportUnreachable(id uint64, group pb.Group) {
+ n.addReqMessageToQueue(pb.Message{Type: pb.MsgUnreachable, From: id, FromGroup: group})
+}
+
func (n *node) ReportSnapshot(id uint64, gp pb.Group, status SnapshotStatus) {
rej := status == SnapshotFailure
-
- select {
- case n.recvc <- msgWithDrop{m: pb.Message{Type: pb.MsgSnapStatus, From: id, FromGroup: gp, Reject: rej}, dropCB: nil}:
- case <-n.done:
- }
+ n.addReqMessageToQueue(pb.Message{Type: pb.MsgSnapStatus, From: id, FromGroup: gp, Reject: rej})
}
func (n *node) TransferLeadership(ctx context.Context, lead, transferee uint64) {
- select {
// manually set 'from' and 'to', so that leader can voluntarily transfers its leadership
- case n.recvc <- msgWithDrop{m: pb.Message{Type: pb.MsgTransferLeader, From: transferee, To: lead}, dropCB: nil}:
- case <-n.done:
- case <-ctx.Done():
- }
+ n.addReqMessageToQueue(pb.Message{Type: pb.MsgTransferLeader, From: transferee, To: lead})
}
func (n *node) ReadIndex(ctx context.Context, rctx []byte) error {
return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
}
-func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready {
+func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState, moreEntriesToApply bool) Ready {
rd := Ready{
- Entries: r.raftLog.unstableEntries(),
- CommittedEntries: r.raftLog.nextEnts(),
- Messages: r.msgs,
+ Entries: r.raftLog.unstableEntries(),
+ Messages: r.msgs,
+ }
+ if moreEntriesToApply {
+ rd.CommittedEntries = r.raftLog.nextEnts()
+ }
+ if len(rd.CommittedEntries) > 0 {
+ lastIndex := rd.CommittedEntries[len(rd.CommittedEntries)-1].Index
+ rd.MoreCommittedEntries = r.raftLog.hasMoreNextEnts(lastIndex)
}
if softSt := r.softState(); !softSt.equal(prevSoftSt) {
rd.SoftState = softSt
@@ -610,7 +741,8 @@ func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready {
if len(r.readStates) != 0 {
rd.ReadStates = r.readStates
}
- rd.MustSync = MustSync(rd.HardState, prevHardSt, len(rd.Entries))
+ // see: https://github.com/etcd-io/etcd/pull/10106
+ rd.MustSync = MustSync(r.hardState(), prevHardSt, len(rd.Entries))
return rd
}
diff --git a/raft/node_bench_test.go b/raft/node_bench_test.go
index 4e60634a..243ceff4 100644
--- a/raft/node_bench_test.go
+++ b/raft/node_bench_test.go
@@ -27,9 +27,10 @@ func BenchmarkOneNode(b *testing.B) {
n := newNode()
s := NewMemoryStorage()
- r := newTestRaft(1, []uint64{1}, 10, 1, s)
- go n.run(r)
-
+ defer s.Close()
+ r := newTestRawNode(1, []uint64{1}, 10, 1, s)
+ n.r = r
+ n.prevS = newPrevState(r)
defer n.Stop()
n.Campaign(ctx)
@@ -40,11 +41,15 @@ func BenchmarkOneNode(b *testing.B) {
}()
for {
- rd := <-n.Ready()
+ <-n.EventNotifyCh()
+ rd, hasEvent := n.StepNode(true, false)
+ if !hasEvent {
+ continue
+ }
s.Append(rd.Entries)
// a reasonable disk sync latency
time.Sleep(1 * time.Millisecond)
- n.Advance()
+ n.Advance(rd)
if rd.HardState.Commit == uint64(b.N+1) {
return
}
diff --git a/raft/node_test.go b/raft/node_test.go
index 76ab825e..b695dcc3 100644
--- a/raft/node_test.go
+++ b/raft/node_test.go
@@ -16,43 +16,59 @@ package raft
import (
"bytes"
+ "math"
+ "os"
"reflect"
+ "strings"
"testing"
"time"
- "github.com/absolute8511/ZanRedisDB/pkg/testutil"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/engine"
+ "github.com/youzan/ZanRedisDB/pkg/testutil"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
"golang.org/x/net/context"
)
+func TestMain(m *testing.M) {
+ engine.SetLogger(int32(common.LOG_INFO), nil)
+ ret := m.Run()
+ os.Exit(ret)
+}
+
+type ignoreSizeHintMemStorage struct {
+ *MemoryStorage
+}
+
+func (s *ignoreSizeHintMemStorage) Entries(lo, hi uint64, maxSize uint64) ([]raftpb.Entry, error) {
+ return s.MemoryStorage.Entries(lo, hi, math.MaxUint64)
+}
+
// TestNodeStep ensures that node.Step sends msgProp to propc chan
// and other kinds of messages to recvc chan.
func TestNodeStep(t *testing.T) {
for i, msgn := range raftpb.MessageType_name {
n := &node{
- propc: make(chan msgWithDrop, 1),
- recvc: make(chan msgWithDrop, 1),
+ propQ: NewProposalQueue(1024, 1),
+ msgQ: NewMessageQueue(1024, false, 1),
}
msgt := raftpb.MessageType(i)
n.Step(context.TODO(), raftpb.Message{Type: msgt})
// Proposal goes to proc chan. Others go to recvc chan.
if msgt == raftpb.MsgProp {
- select {
- case <-n.propc:
- default:
+ props := n.propQ.Get()
+ if len(props) == 0 {
t.Errorf("%d: cannot receive %s on propc chan", msgt, msgn)
}
} else {
if IsLocalMsg(msgt) {
- select {
- case <-n.recvc:
+ msgs := n.msgQ.Get()
+ if len(msgs) != 0 {
t.Errorf("%d: step should ignore %s", msgt, msgn)
- default:
}
} else {
- select {
- case <-n.recvc:
- default:
+ msgs := n.msgQ.Get()
+ if len(msgs) == 0 {
t.Errorf("%d: cannot receive %s on recvc chan", msgt, msgn)
}
}
@@ -64,7 +80,7 @@ func TestNodeStep(t *testing.T) {
func TestNodeStepUnblock(t *testing.T) {
// a node without buffer to block step
n := &node{
- propc: make(chan msgWithDrop),
+ propQ: NewProposalQueue(1, 1),
done: make(chan struct{}),
}
@@ -79,6 +95,9 @@ func TestNodeStepUnblock(t *testing.T) {
{cancel, context.Canceled},
}
+ // fill one to make it full
+ n.Step(ctx, raftpb.Message{Type: raftpb.MsgProp})
+
for i, tt := range tests {
errc := make(chan error, 1)
go func() {
@@ -116,21 +135,35 @@ func TestNodePropose(t *testing.T) {
n := newNode()
s := NewMemoryStorage()
- r := newTestRaft(1, []uint64{1}, 10, 1, s)
- go n.run(r)
+ defer s.Close()
+ rn := newTestRawNode(1, []uint64{1}, 10, 1, s)
+ n.r = rn
+ r := rn.raft
+ n.prevS = newPrevState(rn)
+ n.NotifyEventCh()
n.Campaign(context.TODO())
for {
- rd := <-n.Ready()
+ select {
+ case <-n.EventNotifyCh():
+ case <-n.done:
+ return
+ }
+ rd, hasEvent := n.StepNode(true, false)
+ if !hasEvent {
+ continue
+ }
s.Append(rd.Entries)
// change the step function to appendStep until this raft becomes leader
if rd.SoftState.Lead == r.id {
r.step = appendStep
- n.Advance()
+ n.Advance(rd)
break
}
- n.Advance()
+ n.Advance(rd)
}
n.Propose(context.TODO(), []byte("somedata"))
+ n.StepNode(true, false)
+ time.Sleep(time.Millisecond)
n.Stop()
if len(msgs) != 1 {
@@ -156,29 +189,43 @@ func TestNodeReadIndex(t *testing.T) {
n := newNode()
s := NewMemoryStorage()
- r := newTestRaft(1, []uint64{1}, 10, 1, s)
+ defer s.Close()
+ rn := newTestRawNode(1, []uint64{1}, 10, 1, s)
+ r := rn.raft
r.readStates = wrs
+ n.r = rn
+ n.prevS = newPrevState(rn)
- go n.run(r)
n.Campaign(context.TODO())
+ time.Sleep(time.Millisecond)
for {
- rd := <-n.Ready()
+ select {
+ case <-n.EventNotifyCh():
+ case <-n.done:
+ return
+ }
+ rd, hasEvent := n.StepNode(true, false)
+ if !hasEvent {
+ continue
+ }
if !reflect.DeepEqual(rd.ReadStates, wrs) {
t.Errorf("ReadStates = %v, want %v", rd.ReadStates, wrs)
}
s.Append(rd.Entries)
- if rd.SoftState.Lead == r.id {
- n.Advance()
+ if rd.SoftState != nil && rd.SoftState.Lead == r.id {
+ n.Advance(rd)
break
}
- n.Advance()
+ n.Advance(rd)
}
r.step = appendStep
wrequestCtx := []byte("somedata2")
n.ReadIndex(context.TODO(), wrequestCtx)
+ n.StepNode(true, false)
+ time.Sleep(time.Millisecond)
n.Stop()
if len(msgs) != 1 {
@@ -201,6 +248,7 @@ func TestDisableProposalForwarding(t *testing.T) {
cfg3.DisableProposalForwarding = true
r3 := newRaft(cfg3)
nt := newNetwork(r1, r2, r3)
+ defer nt.closeAll()
// elect r1 as leader
nt.send(raftpb.Message{From: 1, To: 1, Type: raftpb.MsgHup})
@@ -232,6 +280,7 @@ func TestNodeReadIndexToOldLeader(t *testing.T) {
r3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
nt := newNetwork(r1, r2, r3)
+ defer nt.closeAll()
// elect r1 as leader
nt.send(raftpb.Message{From: 1, To: 1, Type: raftpb.MsgHup})
@@ -299,19 +348,31 @@ func TestNodeProposeConfig(t *testing.T) {
n := newNode()
s := NewMemoryStorage()
- r := newTestRaft(1, []uint64{1}, 10, 1, s)
- go n.run(r)
+ defer s.Close()
+ rn := newTestRawNode(1, []uint64{1}, 10, 1, s)
+ r := rn.raft
+ n.r = rn
+ n.prevS = newPrevState(rn)
+ n.NotifyEventCh()
n.Campaign(context.TODO())
for {
- rd := <-n.Ready()
+ select {
+ case <-n.EventNotifyCh():
+ case <-n.done:
+ return
+ }
+ rd, hasEvent := n.StepNode(true, false)
+ if !hasEvent {
+ continue
+ }
s.Append(rd.Entries)
// change the step function to appendStep until this raft becomes leader
if rd.SoftState.Lead == r.id {
r.step = appendStep
- n.Advance()
+ n.Advance(rd)
break
}
- n.Advance()
+ n.Advance(rd)
}
cc := raftpb.ConfChange{Type: raftpb.ConfChangeAddNode, ReplicaID: 1}
ccdata, err := cc.Marshal()
@@ -319,6 +380,8 @@ func TestNodeProposeConfig(t *testing.T) {
t.Fatal(err)
}
n.ProposeConfChange(context.TODO(), cc)
+ n.StepNode(true, false)
+ time.Sleep(time.Millisecond)
n.Stop()
if len(msgs) != 1 {
@@ -337,8 +400,10 @@ func TestNodeProposeConfig(t *testing.T) {
func TestNodeProposeAddDuplicateNode(t *testing.T) {
n := newNode()
s := NewMemoryStorage()
- r := newTestRaft(1, []uint64{1}, 10, 1, s)
- go n.run(r)
+ defer s.Close()
+ rn := newTestRawNode(1, []uint64{1}, 10, 1, s)
+ n.r = rn
+ n.prevS = newPrevState(rn)
n.Campaign(context.TODO())
rdyEntries := make([]raftpb.Entry, 0)
ticker := time.NewTicker(time.Millisecond * 100)
@@ -355,7 +420,11 @@ func TestNodeProposeAddDuplicateNode(t *testing.T) {
return
case <-ticker.C:
n.Tick()
- case rd := <-n.Ready():
+ case <-n.EventNotifyCh():
+ rd, hasEvent := n.StepNode(true, false)
+ if !hasEvent {
+ continue
+ }
s.Append(rd.Entries)
for _, e := range rd.Entries {
rdyEntries = append(rdyEntries, e)
@@ -364,11 +433,13 @@ func TestNodeProposeAddDuplicateNode(t *testing.T) {
case raftpb.EntryConfChange:
var cc raftpb.ConfChange
cc.Unmarshal(e.Data)
- n.ApplyConfChange(cc)
- applyConfChan <- struct{}{}
+ go func() {
+ n.ApplyConfChange(cc)
+ applyConfChan <- struct{}{}
+ }()
}
}
- n.Advance()
+ n.Advance(rd)
}
}
}()
@@ -408,8 +479,11 @@ func TestNodeProposeAddDuplicateNode(t *testing.T) {
// who is the current leader.
func TestBlockProposal(t *testing.T) {
n := newNode()
- r := newTestRaft(1, []uint64{1}, 10, 1, NewMemoryStorage())
- go n.run(r)
+ rn := newTestRawNode(1, []uint64{1}, 10, 1, NewMemoryStorage())
+ n.r = rn
+ r := rn.raft
+ n.prevS = newPrevState(rn)
+ defer closeAndFreeRaft(r)
defer n.Stop()
errc := make(chan error, 1)
@@ -417,6 +491,7 @@ func TestBlockProposal(t *testing.T) {
errc <- n.Propose(context.TODO(), []byte("somedata"))
}()
+ return
testutil.WaitSchedule()
select {
case err := <-errc:
@@ -440,12 +515,16 @@ func TestBlockProposal(t *testing.T) {
func TestNodeTick(t *testing.T) {
n := newNode()
s := NewMemoryStorage()
- r := newTestRaft(1, []uint64{1}, 10, 1, s)
- go n.run(r)
+ defer s.Close()
+ rn := newTestRawNode(1, []uint64{1}, 10, 1, s)
+ n.r = rn
+ r := rn.raft
+ n.prevS = newPrevState(rn)
elapsed := r.electionElapsed
n.Tick()
for len(n.tickc) != 0 {
time.Sleep(100 * time.Millisecond)
+ n.StepNode(true, false)
}
n.Stop()
if r.electionElapsed != elapsed+1 {
@@ -458,15 +537,29 @@ func TestNodeTick(t *testing.T) {
func TestNodeStop(t *testing.T) {
n := newNode()
s := NewMemoryStorage()
- r := newTestRaft(1, []uint64{1}, 10, 1, s)
+ defer s.Close()
+ rn := newTestRawNode(1, []uint64{1}, 10, 1, s)
+ n.r = rn
+ n.prevS = newPrevState(rn)
+ n.NotifyEventCh()
donec := make(chan struct{})
go func() {
- n.run(r)
+ for {
+ select {
+ case <-n.EventNotifyCh():
+ n.StepNode(true, false)
+ case <-n.done:
+ return
+ }
+ }
+ }()
+ go func() {
close(donec)
}()
status := n.Status()
+ n.StepNode(true, false)
n.Stop()
select {
@@ -546,6 +639,7 @@ func TestNodeStart(t *testing.T) {
},
}
storage := NewMemoryStorage()
+ defer storage.Close()
c := &Config{
ID: 1,
ElectionTick: 10,
@@ -557,30 +651,35 @@ func TestNodeStart(t *testing.T) {
}
n := StartNode(c, []Peer{{NodeID: 1, ReplicaID: 1}}, false)
defer n.Stop()
- g := <-n.Ready()
+ g, _ := n.StepNode(true, false)
if !reflect.DeepEqual(g, wants[0]) {
t.Fatalf("#%d: g = %+v,\n w %+v", 1, g, wants[0])
} else {
storage.Append(g.Entries)
- n.Advance()
+ n.Advance(g)
}
- n.Campaign(ctx)
- rd := <-n.Ready()
+ if err := n.Campaign(ctx); err != nil {
+ t.Fatal(err)
+ }
+ rd, _ := n.StepNode(true, false)
storage.Append(rd.Entries)
- n.Advance()
+ n.Advance(rd)
n.Propose(ctx, []byte("foo"))
- if g2 := <-n.Ready(); !reflect.DeepEqual(g2, wants[1]) {
+ if g2, _ := n.StepNode(true, false); !reflect.DeepEqual(g2, wants[1]) {
t.Errorf("#%d: g = %+v,\n w %+v", 2, g2, wants[1])
} else {
storage.Append(g2.Entries)
- n.Advance()
+ n.Advance(g2)
}
select {
- case rd := <-n.Ready():
- t.Errorf("unexpected Ready: %+v", rd)
+ case <-n.EventNotifyCh():
+ rd, hasEvent := n.StepNode(true, false)
+ if hasEvent {
+ t.Errorf("unexpected Ready: %+v", rd)
+ }
case <-time.After(time.Millisecond):
}
}
@@ -593,13 +692,16 @@ func TestNodeRestart(t *testing.T) {
st := raftpb.HardState{Term: 1, Commit: 1}
want := Ready{
- HardState: st,
+ // No HardState is emitted because there was no change.
+ HardState: raftpb.HardState{},
// commit up to index commit index in st
CommittedEntries: entries[:st.Commit],
- MustSync: true,
+ // MustSync is false because no HardState or new entries are provided.
+ MustSync: false,
}
storage := NewMemoryStorage()
+ defer storage.Close()
storage.SetHardState(st)
storage.Append(entries)
grp := raftpb.Group{
@@ -619,14 +721,19 @@ func TestNodeRestart(t *testing.T) {
}
n := RestartNode(c)
defer n.Stop()
- if g := <-n.Ready(); !reflect.DeepEqual(g, want) {
+ <-n.EventNotifyCh()
+ g, _ := n.StepNode(true, false)
+ if !reflect.DeepEqual(g, want) {
t.Errorf("g = %+v,\n w %+v", g, want)
}
- n.Advance()
+ n.Advance(g)
select {
- case rd := <-n.Ready():
- t.Errorf("unexpected Ready: %+v", rd)
+ case <-n.EventNotifyCh():
+ rd, hasEvent := n.StepNode(true, false)
+ if hasEvent {
+ t.Errorf("unexpected Ready: %+v", rd)
+ }
case <-time.After(time.Millisecond):
}
}
@@ -655,13 +762,18 @@ func TestNodeRestartFromSnapshot(t *testing.T) {
st := raftpb.HardState{Term: 1, Commit: 3}
want := Ready{
- HardState: st,
+ // No HardState is emitted because nothing changed relative to what is
+ // already persisted.
+ HardState: raftpb.HardState{},
// commit up to index commit index in st
CommittedEntries: entries,
- MustSync: true,
+ // MustSync is only true when there is a new HardState or new entries;
+ // neither is the case here.
+ MustSync: false,
}
s := NewMemoryStorage()
+ defer s.Close()
s.SetHardState(st)
s.ApplySnapshot(snap)
s.Append(entries)
@@ -682,15 +794,19 @@ func TestNodeRestartFromSnapshot(t *testing.T) {
}
n := RestartNode(c)
defer n.Stop()
- if g := <-n.Ready(); !reflect.DeepEqual(g, want) {
+ g, _ := n.StepNode(true, false)
+ if !reflect.DeepEqual(g, want) {
t.Errorf("g = %+v,\n w %+v", g, want)
} else {
- n.Advance()
+ n.Advance(g)
}
select {
- case rd := <-n.Ready():
- t.Errorf("unexpected Ready: %+v", rd)
+ case <-n.EventNotifyCh():
+ rd, hasEvent := n.StepNode(true, false)
+ if hasEvent {
+ t.Errorf("unexpected Ready: %+v", rd)
+ }
case <-time.After(time.Millisecond):
}
}
@@ -700,6 +816,7 @@ func TestNodeAdvance(t *testing.T) {
defer cancel()
storage := NewMemoryStorage()
+ defer storage.Close()
grp := raftpb.Group{
NodeId: 1,
RaftReplicaId: 1,
@@ -717,25 +834,38 @@ func TestNodeAdvance(t *testing.T) {
}
n := StartNode(c, []Peer{{NodeID: 1, ReplicaID: 1}}, false)
defer n.Stop()
- rd := <-n.Ready()
+ <-n.EventNotifyCh()
+ rd, _ := n.StepNode(true, false)
storage.Append(rd.Entries)
- n.Advance()
+ n.Advance(rd)
n.Campaign(ctx)
- <-n.Ready()
+ <-n.EventNotifyCh()
+ n.StepNode(true, false)
n.Propose(ctx, []byte("foo"))
+ hasEvent := false
select {
- case rd = <-n.Ready():
- t.Fatalf("unexpected Ready before Advance: %+v", rd)
+ case <-n.EventNotifyCh():
+ rd, hasEvent = n.StepNode(true, false)
+ if hasEvent {
+ t.Fatalf("unexpected Ready before Advance: %+v", rd)
+ }
case <-time.After(time.Millisecond):
}
storage.Append(rd.Entries)
- n.Advance()
+ n.Advance(rd)
select {
- case <-n.Ready():
+ case <-n.EventNotifyCh():
+ rd, hasEvent = n.StepNode(true, false)
+ if !hasEvent {
+ t.Errorf("expect Ready after Advance, but there is no Ready available")
+ }
case <-time.After(100 * time.Millisecond):
- t.Errorf("expect Ready after Advance, but there is no Ready available")
+ rd, hasEvent = n.StepNode(true, false)
+ if !hasEvent {
+ t.Errorf("expect Ready after Advance, but there is no Ready available")
+ }
}
}
@@ -784,8 +914,10 @@ func TestNodeProposeAddLearnerNode(t *testing.T) {
defer ticker.Stop()
n := newNode()
s := NewMemoryStorage()
- r := newTestRaft(1, []uint64{1}, 10, 1, s)
- go n.run(r)
+ defer s.Close()
+ r := newTestRawNode(1, []uint64{1}, 10, 1, s)
+ n.r = r
+ n.prevS = newPrevState(r)
n.Campaign(context.TODO())
stop := make(chan struct{})
done := make(chan struct{})
@@ -798,28 +930,34 @@ func TestNodeProposeAddLearnerNode(t *testing.T) {
return
case <-ticker.C:
n.Tick()
- case rd := <-n.Ready():
+ case <-n.EventNotifyCh():
+ rd, hasEvent := n.StepNode(true, false)
+ if !hasEvent {
+ continue
+ }
s.Append(rd.Entries)
t.Logf("raft: %v", rd.Entries)
for _, ent := range rd.Entries {
if ent.Type == raftpb.EntryConfChange {
var cc raftpb.ConfChange
cc.Unmarshal(ent.Data)
- state := n.ApplyConfChange(cc)
- if len(state.Learners) == 0 || state.Learners[0] != cc.ReplicaID {
- t.Fatalf("apply conf change should return new added learner: %v", state.String())
- }
- if len(state.LearnerGroups) == 0 || state.LearnerGroups[0].String() != cc.NodeGroup.String() {
- t.Fatalf("apply conf change should return new added learner group: %v", state.String())
- }
- if len(state.Nodes) != 1 {
- t.Fatalf("add learner should not change the nodes: %v", state.String())
- }
- t.Logf("apply raft conf %v changed to: %v", cc, state.String())
- applyConfChan <- struct{}{}
+ go func() {
+ state := n.ApplyConfChange(cc)
+ if len(state.Learners) == 0 || state.Learners[0] != cc.ReplicaID {
+ t.Fatalf("apply conf change should return new added learner: %v", state.String())
+ }
+ if len(state.LearnerGroups) == 0 || state.LearnerGroups[0].String() != cc.NodeGroup.String() {
+ t.Fatalf("apply conf change should return new added learner group: %v", state.String())
+ }
+ if len(state.Nodes) != 1 {
+ t.Fatalf("add learner should not change the nodes: %v", state.String())
+ }
+ t.Logf("apply raft conf %v changed to: %v", cc, state.String())
+ applyConfChan <- struct{}{}
+ }()
}
}
- n.Advance()
+ n.Advance(rd)
}
}
}()
@@ -829,3 +967,225 @@ func TestNodeProposeAddLearnerNode(t *testing.T) {
close(stop)
<-done
}
+
+func TestAppendPagination(t *testing.T) {
+ const maxSizePerMsg = 2048
+ n := newNetworkWithConfig(func(c *Config) {
+ c.MaxSizePerMsg = maxSizePerMsg
+ }, nil, nil, nil)
+
+ seenFullMessage := false
+ // Inspect all messages to see that we never exceed the limit, but
+ // we do see messages of larger than half the limit.
+ n.msgHook = func(m raftpb.Message) bool {
+ if m.Type == raftpb.MsgApp {
+ size := 0
+ for _, e := range m.Entries {
+ size += len(e.Data)
+ }
+ if size > maxSizePerMsg {
+ t.Errorf("sent MsgApp that is too large: %d bytes", size)
+ }
+ if size > maxSizePerMsg/2 {
+ seenFullMessage = true
+ }
+ }
+ return true
+ }
+
+ n.send(raftpb.Message{From: 1, To: 1, Type: raftpb.MsgHup})
+
+ // Partition the network while we make our proposals. This forces
+ // the entries to be batched into larger messages.
+ n.isolate(1)
+ blob := []byte(strings.Repeat("a", 1000))
+ for i := 0; i < 5; i++ {
+ n.send(raftpb.Message{From: 1, To: 1, Type: raftpb.MsgProp, Entries: []raftpb.Entry{{Data: blob}}})
+ }
+ n.recover()
+
+ // After the partition recovers, tick the clock to wake everything
+ // back up and send the messages.
+ n.send(raftpb.Message{From: 1, To: 1, Type: raftpb.MsgBeat})
+ if !seenFullMessage {
+ t.Error("didn't see any messages more than half the max size; something is wrong with this test")
+ }
+}
+
+func TestCommitPagination(t *testing.T) {
+ s := NewMemoryStorage()
+ cfg := newTestConfig(1, []uint64{1}, 10, 1, s)
+ cfg.MaxSizePerMsg = 2048
+ r, err := NewRawNode(cfg)
+ if err != nil {
+ t.Fatal(err)
+ }
+ n := newNode()
+ n.r = r
+ n.prevS = newPrevState(r)
+ n.NotifyEventCh()
+ n.Campaign(context.TODO())
+
+ rd, _ := n.StepNode(true, false)
+ if len(rd.CommittedEntries) != 1 {
+ t.Fatalf("expected 1 (empty) entry, got %d", len(rd.CommittedEntries))
+ }
+ s.Append(rd.Entries)
+ n.Advance(rd)
+
+ blob := []byte(strings.Repeat("a", 1000))
+ for i := 0; i < 3; i++ {
+ if err := n.Propose(context.TODO(), blob); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ // The 3 proposals will commit in two batches.
+ rd, _ = n.StepNode(true, false)
+ if len(rd.CommittedEntries) != 2 {
+ t.Fatalf("expected 2 entries in first batch, got %d", len(rd.CommittedEntries))
+ }
+ lastIndex := rd.CommittedEntries[len(rd.CommittedEntries)-1].Index
+ s.Append(rd.Entries)
+ n.Advance(rd)
+ rd, _ = n.StepNode(true, false)
+ if len(rd.CommittedEntries) != 1 {
+ t.Fatalf("expected 1 entry in second batch, got %d", len(rd.CommittedEntries))
+ }
+ s.Append(rd.Entries)
+ n.Advance(rd)
+ if rd.CommittedEntries[0].Index != lastIndex+1 {
+ t.Fatalf("expected 1 entry in second batch will be continued, got %d after %v", rd.CommittedEntries[0].Index, lastIndex)
+ }
+}
+
+// TestNodeCommitPaginationAfterRestart regression tests a scenario in which the
+// Storage's Entries size limitation is slightly more permissive than Raft's
+// internal one. The original bug was the following:
+//
+// - node learns that index 11 (or 100, doesn't matter) is committed
+// - nextEnts returns index 1..10 in CommittedEntries due to size limiting. However,
+// index 10 already exceeds maxBytes, due to a user-provided impl of Entries.
+// - Commit index gets bumped to 10
+// - the node persists the HardState, but crashes before applying the entries
+// - upon restart, the storage returns the same entries, but `slice` takes a different code path
+// (since it is now called with an upper bound of 10) and removes the last entry.
+// - Raft emits a HardState with a regressing commit index.
+//
+// A simpler version of this test would have the storage return a lot less entries than dictated
+// by maxSize (for example, exactly one entry) after the restart, resulting in a larger regression.
+// This wouldn't need to exploit anything about Raft-internal code paths to fail.
+func TestNodeCommitPaginationAfterRestart(t *testing.T) {
+ s := &ignoreSizeHintMemStorage{
+ MemoryStorage: NewRealMemoryStorage(),
+ }
+ persistedHardState := raftpb.HardState{
+ Term: 1,
+ Vote: 1,
+ Commit: 10,
+ }
+
+ s.hardState = persistedHardState
+ s.ents = make([]raftpb.Entry, 10)
+ var size uint64
+ for i := range s.ents {
+ ent := raftpb.Entry{
+ Term: 1,
+ Index: uint64(i + 1),
+ Type: raftpb.EntryNormal,
+ Data: []byte("a"),
+ }
+
+ s.ents[i] = ent
+ size += uint64(ent.Size())
+ }
+
+ cfg := newTestConfig(1, []uint64{1}, 10, 1, s)
+ // Set a MaxSizePerMsg that would suggest to Raft that the last committed entry should
+ // not be included in the initial rd.CommittedEntries. However, our storage will ignore
+ // this and *will* return it (which is how the Commit index ended up being 10 initially).
+ cfg.MaxSizePerMsg = size - uint64(s.ents[len(s.ents)-1].Size()) - 1
+
+ r, err := NewRawNode(cfg)
+ if err != nil {
+ t.Fatal(err)
+ }
+ n := newNode()
+ defer s.Close()
+ n.r = r
+ n.prevS = newPrevState(r)
+ n.NotifyEventCh()
+ n.Campaign(context.TODO())
+
+ defer n.Stop()
+
+ rd, _ := n.StepNode(true, false)
+ if !IsEmptyHardState(rd.HardState) && rd.HardState.Commit < persistedHardState.Commit {
+ t.Errorf("HardState regressed: Commit %d -> %d\nCommitting:\n",
+ persistedHardState.Commit, rd.HardState.Commit,
+ )
+ }
+}
+
+// TestNodeCommitEntriesTooMuch check the commit index will be continued even
+// if the apply commit channel is full
+func TestNodeCommitEntriesWhileNoMoreApply(t *testing.T) {
+ s := NewMemoryStorage()
+ cfg := newTestConfig(1, []uint64{1}, 10, 1, s)
+ cfg.MaxSizePerMsg = 2048
+ r, err := NewRawNode(cfg)
+ if err != nil {
+ t.Fatal(err)
+ }
+ n := newNode()
+ n.r = r
+ n.prevS = newPrevState(r)
+ n.NotifyEventCh()
+ n.Campaign(context.TODO())
+
+ rd, _ := n.StepNode(true, false)
+ if len(rd.CommittedEntries) != 1 {
+ t.Fatalf("expected 1 (empty) entry, got %d", len(rd.CommittedEntries))
+ }
+ s.Append(rd.Entries)
+ n.Advance(rd)
+ lastIndex := rd.CommittedEntries[len(rd.CommittedEntries)-1].Index
+
+ blob := []byte(strings.Repeat("a", 1000))
+ for i := 0; i < 3; i++ {
+ if err := n.Propose(context.TODO(), blob); err != nil {
+ t.Fatal(err)
+ }
+ }
+ // step node with no more commit entries
+ rd, _ = n.StepNode(false, false)
+ if len(rd.CommittedEntries) != 0 {
+ t.Fatalf("expected 0 entries if no more need, got %d", len(rd.CommittedEntries))
+ }
+ if rd.HardState.Commit <= lastIndex {
+ t.Fatalf("hard commit should inc even no more commit entries: %v, %v", rd.HardState, lastIndex)
+ }
+ s.Append(rd.Entries)
+ n.Advance(rd)
+
+ // The 3 proposals will commit in two batches.
+ rd, _ = n.StepNode(true, false)
+ if len(rd.CommittedEntries) != 2 {
+ t.Fatalf("expected 2 entries in first batch, got %d", len(rd.CommittedEntries))
+ }
+ if rd.CommittedEntries[0].Index != lastIndex+1 {
+ t.Fatalf("expected 1 entry in second batch will be continued, got %d after %v", rd.CommittedEntries[0].Index, lastIndex)
+ }
+ lastIndex = rd.CommittedEntries[len(rd.CommittedEntries)-1].Index
+ s.Append(rd.Entries)
+ n.Advance(rd)
+ rd, _ = n.StepNode(true, false)
+ if len(rd.CommittedEntries) != 1 {
+ t.Fatalf("expected 1 entry in second batch, got %d", len(rd.CommittedEntries))
+ }
+ s.Append(rd.Entries)
+ n.Advance(rd)
+ if rd.CommittedEntries[0].Index != lastIndex+1 {
+ t.Fatalf("expected 1 entry in second batch will be continued, got %d after %v", rd.CommittedEntries[0].Index, lastIndex)
+ }
+}
diff --git a/raft/progress.go b/raft/progress.go
index acd09717..0db37a26 100644
--- a/raft/progress.go
+++ b/raft/progress.go
@@ -17,7 +17,7 @@ package raft
import (
"fmt"
- pb "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ pb "github.com/youzan/ZanRedisDB/raft/raftpb"
)
const (
@@ -190,8 +190,8 @@ func (pr *Progress) needSnapshotAbort() bool {
}
func (pr *Progress) String() string {
- return fmt.Sprintf("next = %d, match = %d, state = %s, waiting = %v, pendingSnapshot = %d, group = %s",
- pr.Next, pr.Match, pr.State, pr.IsPaused(), pr.PendingSnapshot, pr.group.String())
+ return fmt.Sprintf("next = %d, match = %d, state = %s, waiting = %v, pendingSnapshot = %d, group = %s, recentActive = %v, isLearner = %v",
+ pr.Next, pr.Match, pr.State, pr.IsPaused(), pr.PendingSnapshot, pr.group.String(), pr.RecentActive, pr.IsLearner)
}
type inflights struct {
diff --git a/raft/proposal_queue.go b/raft/proposal_queue.go
new file mode 100644
index 00000000..13dbfe5e
--- /dev/null
+++ b/raft/proposal_queue.go
@@ -0,0 +1,177 @@
+// Copyright 2017-2019 Lei Ni (nilei81@gmail.com)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "errors"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+var ErrProposalQueueTimeout = errors.New("queue proposal timeout")
+var ErrProposalQueueTooFull = errors.New("queue proposal too full")
+
+// ProposalQueue is the queue used to hold Raft messages.
+type ProposalQueue struct {
+ size uint64
+ left []msgWithDrop
+ right []msgWithDrop
+ leftInWrite bool
+ stopped bool
+ idx uint64
+ oldIdx uint64
+ cycle uint64
+ lazyFreeCycle uint64
+ mu sync.Mutex
+ waitC chan struct{}
+ waitCnt int64
+}
+
+// NewProposalQueue creates a new ProposalQueue instance.
+func NewProposalQueue(size uint64, lazyFreeCycle uint64) *ProposalQueue {
+ q := &ProposalQueue{
+ size: size,
+ lazyFreeCycle: lazyFreeCycle,
+ left: make([]msgWithDrop, size),
+ right: make([]msgWithDrop, size),
+ waitC: make(chan struct{}, 1),
+ }
+ return q
+}
+
+// Close closes the queue so no further messages can be added.
+func (q *ProposalQueue) Close() {
+ q.mu.Lock()
+ defer q.mu.Unlock()
+ q.stopped = true
+ if q.waitC != nil {
+ close(q.waitC)
+ }
+}
+
+func (q *ProposalQueue) targetQueue() []msgWithDrop {
+ var t []msgWithDrop
+ if q.leftInWrite {
+ t = q.left
+ } else {
+ t = q.right
+ }
+ return t
+}
+
+// AddWait adds the specified message to the queue and wait if full.
+func (q *ProposalQueue) AddWait(ctx context.Context, msg msgWithDrop,
+ to time.Duration, stopC chan struct{}) (bool, bool, error) {
+ if to <= 0 {
+ added, stopped, _ := q.Add(msg)
+ return added, stopped, nil
+ }
+ var t *time.Timer
+ for {
+ added, stopped, w := q.Add(msg)
+ if added || stopped {
+ if t != nil {
+ t.Stop()
+ }
+ return added, stopped, nil
+ }
+ // too full
+ if atomic.LoadInt64(&q.waitCnt) > int64(q.size)*5 {
+ if t != nil {
+ t.Stop()
+ }
+ return false, stopped, ErrProposalQueueTooFull
+ }
+ if t == nil {
+ t = time.NewTimer(to)
+ }
+ atomic.AddInt64(&q.waitCnt, 1)
+ select {
+ case <-stopC:
+ atomic.AddInt64(&q.waitCnt, -1)
+ t.Stop()
+ return false, false, ErrStopped
+ case <-ctx.Done():
+ atomic.AddInt64(&q.waitCnt, -1)
+ t.Stop()
+ return false, false, ctx.Err()
+ case <-t.C:
+ atomic.AddInt64(&q.waitCnt, -1)
+ t.Stop()
+ return false, false, ErrProposalQueueTimeout
+ case <-w:
+ }
+ atomic.AddInt64(&q.waitCnt, -1)
+ }
+}
+
+// Add adds the specified message to the queue.
+func (q *ProposalQueue) Add(msg msgWithDrop) (bool, bool, chan struct{}) {
+ q.mu.Lock()
+ wc := q.waitC
+ if q.idx >= q.size {
+ q.mu.Unlock()
+ return false, q.stopped, wc
+ }
+ if q.stopped {
+ q.mu.Unlock()
+ return false, true, wc
+ }
+ w := q.targetQueue()
+ w[q.idx] = msg
+ q.idx++
+ q.mu.Unlock()
+ return true, false, wc
+}
+
+func (q *ProposalQueue) gc() {
+ if q.lazyFreeCycle > 0 {
+ oldq := q.targetQueue()
+ if q.lazyFreeCycle == 1 {
+ for i := uint64(0); i < q.oldIdx; i++ {
+ oldq[i].m.Entries = nil
+ }
+ } else if q.cycle%q.lazyFreeCycle == 0 {
+ for i := uint64(0); i < q.size; i++ {
+ oldq[i].m.Entries = nil
+ }
+ }
+ }
+}
+
+// Get returns everything current in the queue.
+func (q *ProposalQueue) Get() []msgWithDrop {
+ q.mu.Lock()
+ defer q.mu.Unlock()
+ needNotify := false
+ if q.idx >= q.size {
+ needNotify = true
+ }
+ q.cycle++
+ sz := q.idx
+ q.idx = 0
+ t := q.targetQueue()
+ q.leftInWrite = !q.leftInWrite
+ q.gc()
+ q.oldIdx = sz
+ if needNotify {
+ close(q.waitC)
+ q.waitC = make(chan struct{}, 1)
+ }
+ return t[:sz]
+}
diff --git a/raft/raft.go b/raft/raft.go
index 6abba905..ad9e58b4 100644
--- a/raft/raft.go
+++ b/raft/raft.go
@@ -25,7 +25,7 @@ import (
"sync"
"time"
- pb "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ pb "github.com/youzan/ZanRedisDB/raft/raftpb"
)
// None is a placeholder node ID used when there is no leader.
@@ -149,6 +149,9 @@ type Config struct {
// replication. Note: math.MaxUint64 for unlimited, 0 for at most one entry per
// message.
MaxSizePerMsg uint64
+ // MaxCommittedSizePerReady limits the size of the committed entries which
+ // can be applied.
+ MaxCommittedSizePerReady uint64
// MaxInflightMsgs limits the max number of in-flight append messages during
// optimistic replication phase. The application transportation layer usually
// has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid
@@ -213,6 +216,11 @@ func (c *Config) validate() error {
if c.Storage == nil {
return errors.New("storage cannot be nil")
}
+ // default MaxCommittedSizePerReady to MaxSizePerMsg because they were
+ // previously the same parameter.
+ if c.MaxCommittedSizePerReady == 0 {
+ c.MaxCommittedSizePerReady = c.MaxSizePerMsg
+ }
if c.MaxInflightMsgs <= 0 {
return errors.New("max inflight messages must be greater than 0")
@@ -253,6 +261,7 @@ type raft struct {
maxMsgSize uint64
prs map[uint64]*Progress
learnerPrs map[uint64]*Progress
+ matchBuf uint64Slice
state StateType
//isLearner is true if the local raft node is a learner.
@@ -303,7 +312,7 @@ func newRaft(c *Config) *raft {
if err := c.validate(); err != nil {
panic(err.Error())
}
- raftlog := newLog(c.Storage, c.Logger)
+ raftlog := newLogWithSize(c.Storage, c.Logger, c.MaxCommittedSizePerReady)
hs, cs, err := c.Storage.InitialState()
if err != nil {
panic(err) // TODO(bdarnell)
@@ -499,22 +508,35 @@ func (r *raft) forEachProgress(f func(id uint64, pr *Progress)) {
}
}
-// sendAppend sends RPC, with entries to the given peer.
+// sendAppend sends an append RPC with new entries (if any) and the
+// current commit index to the given peer.
func (r *raft) sendAppend(to uint64) {
+ r.maybeSendAppend(to, true)
+}
+
+// maybeSendAppend sends an append RPC with new entries to the given peer,
+// if necessary. Returns true if a message was sent. The sendIfEmpty
+// argument controls whether messages with no entries will be sent
+// ("empty" messages are useful to convey updated Commit indexes, but
+// are undesirable when we're sending multiple messages in a batch).
+func (r *raft) maybeSendAppend(to uint64, sendIfEmpty bool) bool {
pr := r.getProgress(to)
if pr.IsPaused() {
- return
+ return false
}
m := pb.Message{FromGroup: r.group, ToGroup: pr.group}
m.To = to
term, errt := r.raftLog.term(pr.Next - 1)
ents, erre := r.raftLog.entries(pr.Next, r.maxMsgSize)
+ if len(ents) == 0 && !sendIfEmpty {
+ return false
+ }
if errt != nil || erre != nil { // send snapshot if we failed to get term or entries
if !pr.RecentActive {
r.logger.Debugf("ignore sending snapshot to %x since it is not recently active", to)
- return
+ return false
}
m.Type = pb.MsgSnap
@@ -522,7 +544,7 @@ func (r *raft) sendAppend(to uint64) {
if err != nil {
if err == ErrSnapshotTemporarilyUnavailable {
r.logger.Debugf("%x failed to send snapshot to %x because snapshot is temporarily unavailable", r.id, to)
- return
+ return false
}
panic(err) // TODO(bdarnell)
}
@@ -556,6 +578,7 @@ func (r *raft) sendAppend(to uint64) {
}
}
r.send(m)
+ return true
}
// sendHeartbeat sends an empty MsgApp
@@ -609,17 +632,40 @@ func (r *raft) bcastHeartbeatWithCtx(ctx []byte) {
})
}
+func (r *raft) advance(rd Ready) {
+ // If entries were applied (or a snapshot), update our cursor for
+ // the next Ready. Note that if the current HardState contains a
+ // new Commit index, this does not mean that we're also applying
+ // all of the new entries due to commit pagination by size.
+ if index := rd.appliedCursor(); index > 0 {
+ r.raftLog.appliedTo(index)
+ }
+ if len(rd.Entries) > 0 {
+ e := rd.Entries[len(rd.Entries)-1]
+ r.raftLog.stableTo(e.Index, e.Term)
+ }
+ if !IsEmptySnap(rd.Snapshot) {
+ r.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index)
+ }
+}
+
// maybeCommit attempts to advance the commit index. Returns true if
// the commit index changed (in which case the caller should call
// r.bcastAppend).
func (r *raft) maybeCommit() bool {
- // TODO(bmizerany): optimize.. Currently naive
- mis := make(uint64Slice, 0, len(r.prs))
+ // Preserving matchBuf across calls is an optimization
+ // used to avoid allocating a new slice on each call.
+ if cap(r.matchBuf) < len(r.prs) {
+ r.matchBuf = make(uint64Slice, len(r.prs))
+ }
+ r.matchBuf = r.matchBuf[:len(r.prs)]
+ idx := 0
for _, p := range r.prs {
- mis = append(mis, p.Match)
+ r.matchBuf[idx] = p.Match
+ idx++
}
- sort.Sort(sort.Reverse(mis))
- mci := mis[r.quorum()-1]
+ sort.Sort(&r.matchBuf)
+ mci := r.matchBuf[len(r.matchBuf)-r.quorum()]
return r.raftLog.maybeCommit(mci, r.Term)
}
@@ -731,6 +777,8 @@ func (r *raft) becomePreCandidate() {
r.step = stepCandidate
r.votes = make(map[uint64]bool)
r.tick = r.tickElection
+ // see: https://github.com/etcd-io/etcd/pull/8334
+ r.lead = None
r.state = StatePreCandidate
r.logger.Infof("%x(%v) became pre-candidate at term %d", r.id, r.group.Name, r.Term)
}
@@ -745,6 +793,13 @@ func (r *raft) becomeLeader() {
r.tick = r.tickHeartbeat
r.lead = r.id
r.state = StateLeader
+ // see: https://github.com/etcd-io/etcd/pull/10279
+ // Followers enter replicate mode when they've been successfully probed
+ // (perhaps after having received a snapshot as a result). The leader is
+ // trivially in this state. Note that r.reset() has initialized this
+ // progress with the last index already.
+ r.prs[r.id].becomeReplicate()
+
ents, err := r.raftLog.entries(r.raftLog.committed+1, noLimit)
if err != nil {
r.logger.Panicf("unexpected error getting uncommitted entries (%v)", err)
@@ -762,7 +817,41 @@ func (r *raft) becomeLeader() {
r.logger.Infof("%x(%v) became leader at term %d", r.id, r.group.Name, r.Term)
}
+func (r *raft) hup(t CampaignType) {
+ if r.state == StateLeader {
+ r.logger.Debugf("%x ignoring MsgHup because already leader", r.id)
+ return
+ }
+
+ if !r.promotable() {
+ r.logger.Warningf("%x is unpromotable and can not campaign; ignoring MsgHup", r.id)
+ return
+ }
+ ents, err := r.raftLog.slice(r.raftLog.applied+1, r.raftLog.committed+1, noLimit)
+ if err != nil {
+ if err == ErrCompacted {
+ r.logger.Errorf("%x cannot campaign at term %d since log is compacted: %d", r.id, r.Term, r.raftLog.applied)
+ return
+ }
+ r.logger.Panicf("unexpected error getting unapplied entries (%v)", err)
+ }
+ if n := numOfPendingConf(ents); n != 0 && r.raftLog.committed > r.raftLog.applied {
+ r.logger.Warningf("%x cannot campaign at term %d since there are still %d pending configuration changes to apply", r.id, r.Term, n)
+ return
+ }
+
+ r.logger.Infof("%x(%v) is starting a new election at term %d", r.id, r.group.Name, r.Term)
+ r.campaign(t)
+}
+
+// campaign transitions the raft instance to candidate state. This must only be
+// called after verifying that this is a legitimate transition.
func (r *raft) campaign(t CampaignType) {
+ if !r.promotable() {
+ // This path should not be hit (callers are supposed to check), but
+ // better safe than sorry.
+ r.logger.Warningf("%x is unpromotable; campaign() should have been called", r.id)
+ }
var term uint64
var voteMsg pb.MessageType
if t == campaignPreElection {
@@ -858,7 +947,7 @@ func (r *raft) Step(m pb.Message) error {
}
case m.Term < r.Term:
- if r.checkQuorum && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) {
+ if (r.checkQuorum || r.preVote) && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) {
// We have received messages from a leader at a lower term. It is possible
// that these messages were simply delayed in the network, but this could
// also mean that this node has advanced its term number during a network
@@ -873,6 +962,13 @@ func (r *raft) Step(m pb.Message) error {
// but it will not receive MsgApp or MsgHeartbeat, so it will not create
// disruptive term increases
r.send(pb.Message{To: m.From, ToGroup: m.FromGroup, Type: pb.MsgAppResp})
+ } else if m.Type == pb.MsgPreVote {
+ // Before Pre-Vote enable, there may have candidate with higher term,
+ // but less log. After update to Pre-Vote, the cluster may deadlock if
+ // we drop messages with a lower term.
+ r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d",
+ r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
+ r.send(pb.Message{To: m.From, Term: r.Term, Type: pb.MsgPreVoteResp, Reject: true})
} else {
// ignore other cases
r.logger.Infof("%x(%v) [term: %d] ignored a %s message with lower term from %x [term: %d]",
@@ -883,30 +979,11 @@ func (r *raft) Step(m pb.Message) error {
switch m.Type {
case pb.MsgHup:
- if r.state != StateLeader {
- ents, err := r.raftLog.slice(r.raftLog.applied+1, r.raftLog.committed+1, noLimit)
- if err != nil {
- if err == ErrCompacted {
- r.logger.Errorf("%x cannot campaign at term %d since log is compacted: %d", r.id, r.Term, r.raftLog.applied)
- return nil
- }
- r.logger.Panicf("unexpected error getting unapplied entries (%v)", err)
- }
- if n := numOfPendingConf(ents); n != 0 && r.raftLog.committed > r.raftLog.applied {
- r.logger.Warningf("%x cannot campaign at term %d since there are still %d pending configuration changes to apply", r.id, r.Term, n)
- return nil
- }
-
- r.logger.Infof("%x(%v) is starting a new election at term %d", r.id, r.group.Name, r.Term)
- if r.preVote {
- r.campaign(campaignPreElection)
- } else {
- r.campaign(campaignElection)
- }
+ if r.preVote {
+ r.hup(campaignPreElection)
} else {
- r.logger.Debugf("%x ignoring MsgHup because already leader", r.id)
+ r.hup(campaignElection)
}
-
case pb.MsgVote, pb.MsgPreVote:
if r.isLearner {
// TODO: learner may need to vote, in case of node down when confchange.
@@ -986,10 +1063,12 @@ func stepLeader(r *raft, m pb.Message) bool {
return true
}
- for i, e := range m.Entries {
+ for i := range m.Entries {
+ e := &m.Entries[i]
if e.Type == pb.EntryConfChange {
if r.pendingConf {
- r.logger.Infof("propose conf %s ignored since pending unapplied configuration", e.String())
+ r.logger.Infof("propose conf %s ignored since pending unapplied configuration [applied: %d]",
+ e, r.raftLog.applied)
m.Entries[i] = pb.Entry{Type: pb.EntryNormal}
} else {
r.logger.Infof("propose conf change %s ", e.String())
@@ -1024,7 +1103,13 @@ func stepLeader(r *raft, m pb.Message) bool {
}
}
} else {
- r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data})
+ // there is only one voting member (the leader) in the cluster
+ if m.From == None || m.From == r.id { // from leader itself
+ r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data})
+ } else { // from learner member
+ // see https://github.com/etcd-io/etcd/pull/10590
+ r.send(pb.Message{To: m.From, ToGroup: m.FromGroup, Type: pb.MsgReadIndexResp, Index: r.raftLog.committed, Entries: m.Entries})
+ }
}
return false
@@ -1058,7 +1143,13 @@ func stepLeader(r *raft, m pb.Message) bool {
pr.becomeReplicate()
case pr.State == ProgressStateSnapshot && pr.needSnapshotAbort():
r.logger.Debugf("%x snapshot aborted, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
+ // Transition back to replicating state via probing state
+ // (which takes the snapshot into account). If we didn't
+ // move to replicating state, that would only happen with
+ // the next round of appends (but there may not be a next
+ // round for a while, exposing an inconsistent RaftStatus).
pr.becomeProbe()
+ pr.becomeReplicate()
case pr.State == ProgressStateReplicate:
pr.ins.freeTo(m.Index)
}
@@ -1066,10 +1157,18 @@ func stepLeader(r *raft, m pb.Message) bool {
if r.maybeCommit() {
r.bcastAppend()
} else if oldPaused {
- // update() reset the wait state on this node. If we had delayed sending
- // an update before, send it now.
+ // If we were paused before, this node may be missing the
+ // latest commit index, so send it.
r.sendAppend(m.From)
}
+ // We've updated flow control information above, which may
+ // allow us to send multiple (size-limited) in-flight messages
+ // at once (such as when transitioning from probe to
+ // replicate, or when freeTo() covers multiple messages). If
+ // we have more entries to send, send as many messages as we
+ // can (without sending empty messages for the commit index)
+ for r.maybeSendAppend(m.From, false) {
+ }
// Transfer leadership is in progress.
if m.From == r.leadTransferee && pr.Match == r.raftLog.lastIndex() {
r.logger.Infof("%x(%v) sent MsgTimeoutNow to %x after received MsgAppResp", r.id, r.group.Name, m.From)
@@ -1183,10 +1282,10 @@ func stepCandidate(r *raft, m pb.Message) bool {
r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
return true
case pb.MsgApp:
- r.becomeFollower(r.Term, m.From)
+ r.becomeFollower(m.Term, m.From) // always m.Term == r.Term
r.handleAppendEntries(m)
case pb.MsgHeartbeat:
- r.becomeFollower(r.Term, m.From)
+ r.becomeFollower(m.Term, m.From) // always m.Term == r.Term
r.handleHeartbeat(m)
case pb.MsgSnap:
r.becomeFollower(m.Term, m.From)
@@ -1203,6 +1302,8 @@ func stepCandidate(r *raft, m pb.Message) bool {
r.bcastAppend()
}
case len(r.votes) - gr:
+ // pb.MsgPreVoteResp contains future term of pre-candidate
+ // m.Term > r.Term; reuse r.Term
r.becomeFollower(r.Term, None)
}
case pb.MsgTimeoutNow:
@@ -1254,7 +1355,7 @@ func stepFollower(r *raft, m pb.Message) bool {
// Leadership transfers never use pre-vote even if r.preVote is true; we
// know we are not recovering from a partition so there is no need for the
// extra round trip.
- r.campaign(campaignTransfer)
+ r.hup(campaignTransfer)
} else {
r.logger.Infof("%x received MsgTimeoutNow from %x but is not promotable", r.id, m.From)
}
@@ -1325,8 +1426,9 @@ func (r *raft) restore(s pb.Snapshot) bool {
return false
}
- // The normal peer can't become learner.
- if !r.isLearner {
+ // Both of `prs` and `learnerPrs` are empty means the peer is new created by
+ // conf change, in which case we should accept snapshots make it as learner.
+ if (len(r.prs) > 0 || len(r.learnerPrs) > 0) && !r.isLearner {
for _, id := range s.Metadata.ConfState.Learners {
if id == r.id {
r.logger.Errorf("%x can't become learner when restores snapshot [index: %d, term: %d]", r.id, s.Metadata.Index, s.Metadata.Term)
@@ -1376,8 +1478,8 @@ func (r *raft) restoreNode(nodes []uint64, grpsConf []*pb.Group, isLearner bool)
// promotable indicates whether state machine can be promoted to leader,
// which is true when its own id is in progress list.
func (r *raft) promotable() bool {
- _, ok := r.prs[r.id]
- return ok
+ pr, ok := r.prs[r.id]
+ return ok && pr != nil && !pr.IsLearner && !r.raftLog.hasPendingSnapshot()
}
func (r *raft) updateNode(id uint64, g pb.Group) {
diff --git a/raft/raft_flow_control_test.go b/raft/raft_flow_control_test.go
index 028d19d7..47df559c 100644
--- a/raft/raft_flow_control_test.go
+++ b/raft/raft_flow_control_test.go
@@ -17,7 +17,7 @@ package raft
import (
"testing"
- pb "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ pb "github.com/youzan/ZanRedisDB/raft/raftpb"
)
// TestMsgAppFlowControlFull ensures:
@@ -25,6 +25,7 @@ import (
// 2. when the window is full, no more msgApp can be sent.
func TestMsgAppFlowControlFull(t *testing.T) {
r := newTestRaft(1, []uint64{1, 2}, 5, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.becomeCandidate()
r.becomeLeader()
@@ -61,6 +62,7 @@ func TestMsgAppFlowControlFull(t *testing.T) {
// 2. out-of-dated msgAppResp has no effect on the sliding window.
func TestMsgAppFlowControlMoveForward(t *testing.T) {
r := newTestRaft(1, []uint64{1, 2}, 5, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.becomeCandidate()
r.becomeLeader()
@@ -106,6 +108,7 @@ func TestMsgAppFlowControlMoveForward(t *testing.T) {
// frees one slot if the window is full.
func TestMsgAppFlowControlRecvHeartbeat(t *testing.T) {
r := newTestRaft(1, []uint64{1, 2}, 5, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.becomeCandidate()
r.becomeLeader()
diff --git a/raft/raft_paper_test.go b/raft/raft_paper_test.go
index a55a7ba5..86627655 100644
--- a/raft/raft_paper_test.go
+++ b/raft/raft_paper_test.go
@@ -33,7 +33,7 @@ import (
"reflect"
"sort"
- pb "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ pb "github.com/youzan/ZanRedisDB/raft/raftpb"
)
func TestFollowerUpdateTermFromMessage(t *testing.T) {
@@ -53,6 +53,7 @@ func TestLeaderUpdateTermFromMessage(t *testing.T) {
// Reference: section 5.1
func testUpdateTermFromMessage(t *testing.T, state StateType) {
r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
switch state {
case StateFollower:
r.becomeFollower(1, 2)
@@ -84,6 +85,7 @@ func TestRejectStaleTermMessage(t *testing.T) {
return false
}
r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.step = fakeStep
r.loadState(pb.HardState{Term: 2})
@@ -98,6 +100,7 @@ func TestRejectStaleTermMessage(t *testing.T) {
// Reference: section 5.2
func TestStartAsFollower(t *testing.T) {
r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
if r.state != StateFollower {
t.Errorf("state = %s, want %s", r.state, StateFollower)
}
@@ -111,6 +114,7 @@ func TestLeaderBcastBeat(t *testing.T) {
// heartbeat interval
hi := 1
r := newTestRaft(1, []uint64{1, 2, 3}, 10, hi, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.becomeCandidate()
r.becomeLeader()
for i := 0; i < 10; i++ {
@@ -157,6 +161,7 @@ func testNonleaderStartElection(t *testing.T, state StateType) {
// election timeout
et := 10
r := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
switch state {
case StateFollower:
r.becomeFollower(1, 2)
@@ -225,10 +230,11 @@ func TestLeaderElectionInOneRoundRPC(t *testing.T) {
}
for i, tt := range tests {
r := newTestRaft(1, idsBySize(tt.size), 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
for id, vote := range tt.votes {
- r.Step(pb.Message{From: id, To: 1, Type: pb.MsgVoteResp, Reject: !vote})
+ r.Step(pb.Message{From: id, To: 1, Term: r.Term, Type: pb.MsgVoteResp, Reject: !vote})
}
if r.state != tt.state {
@@ -258,6 +264,7 @@ func TestFollowerVote(t *testing.T) {
}
for i, tt := range tests {
r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.loadState(pb.HardState{Term: 1, Vote: tt.vote})
r.Step(pb.Message{From: tt.nvote, FromGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},
@@ -288,6 +295,7 @@ func TestCandidateFallback(t *testing.T) {
}
for i, tt := range tests {
r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
if r.state != StateCandidate {
t.Fatalf("unexpected state = %s, want %s", r.state, StateCandidate)
@@ -321,6 +329,7 @@ func TestCandidateElectionTimeoutRandomized(t *testing.T) {
func testNonleaderElectionTimeoutRandomized(t *testing.T, state StateType) {
et := 10
r := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
timeouts := make(map[int]bool)
for round := 0; round < 50*et; round++ {
switch state {
@@ -368,6 +377,11 @@ func testNonleadersElectionTimeoutNonconflict(t *testing.T, state StateType) {
for k := range rs {
rs[k] = newTestRaft(ids[k], ids, et, 1, NewMemoryStorage())
}
+ defer func() {
+ for k := range rs {
+ closeAndFreeRaft(rs[k])
+ }
+ }()
conflicts := 0
for round := 0; round < 1000; round++ {
for _, r := range rs {
@@ -410,6 +424,7 @@ func testNonleadersElectionTimeoutNonconflict(t *testing.T, state StateType) {
func TestLeaderStartReplication(t *testing.T) {
s := NewMemoryStorage()
r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)
+ defer closeAndFreeRaft(r)
r.becomeCandidate()
r.becomeLeader()
commitNoopEntry(r, s)
@@ -451,6 +466,7 @@ func TestLeaderStartReplication(t *testing.T) {
func TestLeaderCommitEntry(t *testing.T) {
s := NewMemoryStorage()
r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)
+ defer closeAndFreeRaft(r)
r.becomeCandidate()
r.becomeLeader()
commitNoopEntry(r, s)
@@ -505,6 +521,7 @@ func TestLeaderAcknowledgeCommit(t *testing.T) {
for i, tt := range tests {
s := NewMemoryStorage()
r := newTestRaft(1, idsBySize(tt.size), 10, 1, s)
+ defer closeAndFreeRaft(r)
r.becomeCandidate()
r.becomeLeader()
commitNoopEntry(r, s)
@@ -539,6 +556,7 @@ func TestLeaderCommitPrecedingEntries(t *testing.T) {
storage := NewMemoryStorage()
storage.Append(tt)
r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, storage)
+ defer closeAndFreeRaft(r)
r.loadState(pb.HardState{Term: 2})
r.becomeCandidate()
r.becomeLeader()
@@ -594,6 +612,7 @@ func TestFollowerCommitEntry(t *testing.T) {
}
for i, tt := range tests {
r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.becomeFollower(1, 2)
r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 1, Entries: tt.ents, Commit: tt.commit})
@@ -637,6 +656,7 @@ func TestFollowerCheckMsgApp(t *testing.T) {
storage := NewMemoryStorage()
storage.Append(ents)
r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, storage)
+ defer closeAndFreeRaft(r)
r.loadState(pb.HardState{Commit: 1})
r.becomeFollower(2, 2)
@@ -697,6 +717,7 @@ func TestFollowerAppendEntries(t *testing.T) {
storage := NewMemoryStorage()
storage.Append([]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}})
r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, storage)
+ defer closeAndFreeRaft(r)
r.becomeFollower(2, 2)
r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 2, LogTerm: tt.term, Index: tt.index, Entries: tt.ents})
@@ -764,10 +785,12 @@ func TestLeaderSyncFollowerLog(t *testing.T) {
}
for i, tt := range tests {
leadStorage := NewMemoryStorage()
+ defer leadStorage.Close()
leadStorage.Append(ents)
lead := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, leadStorage)
lead.loadState(pb.HardState{Commit: lead.raftLog.lastIndex(), Term: term})
followerStorage := NewMemoryStorage()
+ defer followerStorage.Close()
followerStorage.Append(tt)
follower := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, followerStorage)
follower.loadState(pb.HardState{Term: term - 1})
@@ -801,6 +824,7 @@ func TestVoteRequest(t *testing.T) {
}
for j, tt := range tests {
r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.Step(pb.Message{
From: 2, To: 1, Type: pb.MsgApp, Term: tt.wterm - 1, LogTerm: 0, Index: 0, Entries: tt.ents,
})
@@ -864,6 +888,7 @@ func TestVoter(t *testing.T) {
storage := NewMemoryStorage()
storage.Append(tt.ents)
r := newTestRaft(1, []uint64{1, 2}, 10, 1, storage)
+ defer closeAndFreeRaft(r)
r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgVote, Term: 3, LogTerm: tt.logterm, Index: tt.index})
@@ -900,6 +925,7 @@ func TestLeaderOnlyCommitsLogFromCurrentTerm(t *testing.T) {
storage := NewMemoryStorage()
storage.Append(ents)
r := newTestRaft(1, []uint64{1, 2}, 10, 1, storage)
+ defer closeAndFreeRaft(r)
r.loadState(pb.HardState{Term: 2})
// become leader at term 3
r.becomeCandidate()
@@ -921,7 +947,7 @@ func (s messageSlice) Len() int { return len(s) }
func (s messageSlice) Less(i, j int) bool { return fmt.Sprint(s[i]) < fmt.Sprint(s[j]) }
func (s messageSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func commitNoopEntry(r *raft, s *MemoryStorage) {
+func commitNoopEntry(r *raft, s IExtRaftStorage) {
if r.state != StateLeader {
panic("it should only be used when it is the leader")
}
diff --git a/raft/raft_snap_test.go b/raft/raft_snap_test.go
index 0399e180..f453ef35 100644
--- a/raft/raft_snap_test.go
+++ b/raft/raft_snap_test.go
@@ -17,7 +17,7 @@ package raft
import (
"testing"
- pb "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ pb "github.com/youzan/ZanRedisDB/raft/raftpb"
)
var (
@@ -32,6 +32,7 @@ var (
func TestSendingSnapshotSetPendingSnapshot(t *testing.T) {
storage := NewMemoryStorage()
+ defer storage.Close()
sm := newTestRaft(1, []uint64{1}, 10, 1, storage)
sm.restore(testingSnap)
@@ -50,6 +51,7 @@ func TestSendingSnapshotSetPendingSnapshot(t *testing.T) {
func TestPendingSnapshotPauseReplication(t *testing.T) {
storage := NewMemoryStorage()
+ defer storage.Close()
sm := newTestRaft(1, []uint64{1, 2}, 10, 1, storage)
sm.restore(testingSnap)
@@ -67,6 +69,7 @@ func TestPendingSnapshotPauseReplication(t *testing.T) {
func TestSnapshotFailure(t *testing.T) {
storage := NewMemoryStorage()
+ defer storage.Close()
sm := newTestRaft(1, []uint64{1, 2}, 10, 1, storage)
sm.restore(testingSnap)
@@ -90,6 +93,7 @@ func TestSnapshotFailure(t *testing.T) {
func TestSnapshotSucceed(t *testing.T) {
storage := NewMemoryStorage()
+ defer storage.Close()
sm := newTestRaft(1, []uint64{1, 2}, 10, 1, storage)
sm.restore(testingSnap)
@@ -111,8 +115,119 @@ func TestSnapshotSucceed(t *testing.T) {
}
}
+// TestSnapshotSucceedViaAppResp regression tests the situation in which a snap-
+// shot is sent to a follower at the most recent index (i.e. the snapshot index
+// is the leader's last index is the committed index). In that situation, a bug
+// in the past left the follower in probing status until the next log entry was
+// committed.
+func TestSnapshotSucceedViaAppResp(t *testing.T) {
+ snap := pb.Snapshot{
+ Metadata: pb.SnapshotMetadata{
+ Index: 11, // magic number
+ Term: 11, // magic number
+ ConfState: pb.ConfState{Nodes: []uint64{1, 2, 3}},
+ },
+ }
+
+ s1 := NewMemoryStorage()
+ n1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s1)
+ defer closeAndFreeRaft(n1)
+
+ // Become follower because otherwise the way this test sets things up the
+ // leadership term will be 1 (which is stale). We want it to match the snap-
+ // shot term in this test.
+ n1.becomeFollower(snap.Metadata.Term-1, 2)
+ n1.becomeCandidate()
+ n1.becomeLeader()
+
+ // Apply a snapshot on the leader. This is a workaround against the fact that
+ // the leader will always append an empty entry, but that empty entry works
+ // against what we're trying to assert in this test, namely that a snapshot
+ // at the latest committed index leaves the follower in probing state.
+ // With the snapshot, the empty entry is fully committed.
+ n1.restore(snap)
+
+ noMessage := pb.MessageType(-1)
+ mustSend := func(from, to *raft, typ pb.MessageType) pb.Message {
+ t.Helper()
+ for i, msg := range from.msgs {
+ if msg.From != from.id || msg.To != to.id || msg.Type != typ {
+ continue
+ }
+ t.Log(DescribeMessage(msg, func([]byte) string { return "" }))
+ if err := to.Step(msg); err != nil {
+ t.Fatalf("%v: %s", msg, err)
+ }
+ from.msgs = append(from.msgs[:i], from.msgs[i+1:]...)
+ return msg
+ }
+ if typ == noMessage {
+ if len(from.msgs) == 0 {
+ return pb.Message{}
+ }
+ t.Fatalf("expected no more messages, but got %d->%d %v", from.id, to.id, from.msgs)
+ }
+ t.Fatalf("message %d->%d %s not found in %v", from.id, to.id, typ, from.msgs)
+ return pb.Message{} // unreachable
+ }
+
+ // Create the follower that will receive the snapshot.
+ s2 := NewMemoryStorage()
+ n2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, s2)
+ defer closeAndFreeRaft(n2)
+
+ // Let the leader probe the follower.
+ if !n1.maybeSendAppend(2, true /* sendIfEmpty */) {
+ t.Fatalf("expected message to be sent")
+ }
+ if msg := mustSend(n1, n2, pb.MsgApp); len(msg.Entries) > 0 {
+ // For this test to work, the leader must not have anything to append
+ // to the follower right now.
+ t.Fatalf("unexpectedly appending entries %v", msg.Entries)
+ }
+
+ // Follower rejects the append (because it doesn't have any log entries)
+ if msg := mustSend(n2, n1, pb.MsgAppResp); !msg.Reject {
+ t.Fatalf("expected a rejection with zero hint, got reject=%t hint=%d", msg.Reject, msg.RejectHint)
+ }
+
+ expIdx := snap.Metadata.Index
+ // Leader sends snapshot due to RejectHint of zero (the storage we use here
+ // has index zero compacted).
+ if msg := mustSend(n1, n2, pb.MsgSnap); msg.Snapshot.Metadata.Index != expIdx {
+ t.Fatalf("expected snapshot at index %d, got %d", expIdx, msg.Snapshot.Metadata.Index)
+ }
+
+ // n2 reacts to snapshot with MsgAppResp.
+ if msg := mustSend(n2, n1, pb.MsgAppResp); msg.Index != expIdx {
+ t.Fatalf("expected AppResp at index %d, got %d", expIdx, msg.Index)
+ }
+
+ // Leader sends MsgApp to communicate commit index.
+ if msg := mustSend(n1, n2, pb.MsgApp); msg.Commit != expIdx {
+ t.Fatalf("expected commit index %d, got %d", expIdx, msg.Commit)
+ }
+
+ // Follower responds.
+ mustSend(n2, n1, pb.MsgAppResp)
+
+ // Leader has correct state for follower.
+ pr := n1.prs[2]
+ if pr.State != ProgressStateReplicate {
+ t.Fatalf("unexpected state %v", pr)
+ }
+ if pr.Match != expIdx || pr.Next != expIdx+1 {
+ t.Fatalf("expected match = %d, next = %d; got match = %d and next = %d", expIdx, expIdx+1, pr.Match, pr.Next)
+ }
+
+ // Leader and follower are done.
+ mustSend(n1, n2, noMessage)
+ mustSend(n2, n1, noMessage)
+}
+
func TestSnapshotAbort(t *testing.T) {
storage := NewMemoryStorage()
+ defer storage.Close()
sm := newTestRaft(1, []uint64{1, 2}, 10, 1, storage)
sm.restore(testingSnap)
@@ -128,7 +243,14 @@ func TestSnapshotAbort(t *testing.T) {
if sm.prs[2].PendingSnapshot != 0 {
t.Fatalf("PendingSnapshot = %d, want 0", sm.prs[2].PendingSnapshot)
}
- if sm.prs[2].Next != 12 {
- t.Fatalf("Next = %d, want 12", sm.prs[2].Next)
+ // The follower entered ProgressStateReplicate and the leader send an append
+ // and optimistically updated the progress (so we see 13 instead of 12).
+ // There is something to append because the leader appended an empty entry
+ // to the log at index 12 when it assumed leadership.
+ if sm.prs[2].Next != 13 {
+ t.Fatalf("Next = %d, want 13", sm.prs[2].Next)
+ }
+ if n := sm.prs[2].ins.count; n != 1 {
+ t.Fatalf("expected an inflight message, got %d", n)
}
}
diff --git a/raft/raft_test.go b/raft/raft_test.go
index b9081e29..2298e5a6 100644
--- a/raft/raft_test.go
+++ b/raft/raft_test.go
@@ -20,13 +20,14 @@ import (
"math"
"math/rand"
"reflect"
+ "strings"
"testing"
- pb "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ pb "github.com/youzan/ZanRedisDB/raft/raftpb"
)
// nextEnts returns the appliable entries and updates the applied index
-func nextEnts(r *raft, s *MemoryStorage) (ents []pb.Entry) {
+func nextEnts(r *raft, s IExtRaftStorage) (ents []pb.Entry) {
// Transfer all unstable entries to "stable" storage.
s.Append(r.raftLog.unstableEntries())
r.raftLog.stableTo(r.raftLog.lastIndex(), r.raftLog.lastTerm())
@@ -36,6 +37,25 @@ func nextEnts(r *raft, s *MemoryStorage) (ents []pb.Entry) {
return ents
}
+func newInitedMemoryStorage(ents []pb.Entry) IExtRaftStorage {
+ testStorage := NewMemoryStorage()
+ ms, ok := testStorage.(*MemoryStorage)
+ if ok {
+ ms.ents = ents
+ return testStorage
+ }
+ rs, ok := testStorage.(*RocksStorage)
+ if ok {
+ rs.reset(ents)
+ return testStorage
+ }
+ panic("unknown raft storage")
+}
+
+func closeAndFreeRaft(r *raft) {
+ r.raftLog.storage.(IExtRaftStorage).Close()
+}
+
type stateMachine interface {
Step(m pb.Message) error
readMessages() []pb.Message
@@ -270,9 +290,29 @@ func TestProgressResume(t *testing.T) {
}
}
+func TestProgressLeader(t *testing.T) {
+ r := newTestRaft(1, []uint64{1, 2}, 5, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
+ r.becomeCandidate()
+ r.becomeLeader()
+ r.prs[2].becomeReplicate()
+
+ // Send proposals to r1. The first 5 entries should be appended to the log.
+ propMsg := pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("foo")}}}
+ for i := 0; i < 5; i++ {
+ if pr := r.prs[r.id]; pr.State != ProgressStateReplicate || pr.Match != uint64(i+1) || pr.Next != pr.Match+1 {
+ t.Errorf("unexpected progress %v", pr)
+ }
+ if err := r.Step(propMsg); err != nil {
+ t.Fatalf("proposal resulted in error: %v", err)
+ }
+ }
+}
+
// TestProgressResumeByHeartbeatResp ensures raft.heartbeat reset progress.paused by heartbeat response.
func TestProgressResumeByHeartbeatResp(t *testing.T) {
r := newTestRaft(1, []uint64{1, 2}, 5, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.becomeCandidate()
r.becomeLeader()
r.prs[2].Paused = true
@@ -291,6 +331,7 @@ func TestProgressResumeByHeartbeatResp(t *testing.T) {
func TestProgressPaused(t *testing.T) {
r := newTestRaft(1, []uint64{1, 2}, 5, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.becomeCandidate()
r.becomeLeader()
r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
@@ -303,6 +344,75 @@ func TestProgressPaused(t *testing.T) {
}
}
+func TestProgressFlowControl(t *testing.T) {
+ cfg := newTestConfig(1, []uint64{1, 2}, 5, 1, NewMemoryStorage())
+ cfg.MaxInflightMsgs = 3
+ cfg.MaxSizePerMsg = 2048
+ r := newRaft(cfg)
+ defer closeAndFreeRaft(r)
+ r.becomeCandidate()
+ r.becomeLeader()
+
+ // Throw away all the messages relating to the initial election.
+ r.readMessages()
+
+ // While node 2 is in probe state, propose a bunch of entries.
+ r.prs[2].becomeProbe()
+ blob := []byte(strings.Repeat("a", 1000))
+ for i := 0; i < 10; i++ {
+ r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: blob}}})
+ }
+
+ ms := r.readMessages()
+ // First append has two entries: the empty entry to confirm the
+ // election, and the first proposal (only one proposal gets sent
+ // because we're in probe state).
+ if len(ms) != 1 || ms[0].Type != pb.MsgApp {
+ t.Fatalf("expected 1 MsgApp, got %v", ms)
+ }
+ if len(ms[0].Entries) != 2 {
+ t.Fatalf("expected 2 entries, got %d", len(ms[0].Entries))
+ }
+ if len(ms[0].Entries[0].Data) != 0 || len(ms[0].Entries[1].Data) != 1000 {
+ t.Fatalf("unexpected entry sizes: %v", ms[0].Entries)
+ }
+
+ // When this append is acked, we change to replicate state and can
+ // send multiple messages at once.
+ r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgAppResp, Index: ms[0].Entries[1].Index})
+ ms = r.readMessages()
+ if len(ms) != 3 {
+ t.Fatalf("expected 3 messages, got %d", len(ms))
+ }
+ for i, m := range ms {
+ if m.Type != pb.MsgApp {
+ t.Errorf("%d: expected MsgApp, got %s", i, m.Type)
+ }
+ if len(m.Entries) != 2 {
+ t.Errorf("%d: expected 2 entries, got %d", i, len(m.Entries))
+ }
+ }
+
+ // Ack all three of those messages together and get the last two
+ // messages (containing three entries).
+ r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgAppResp, Index: ms[2].Entries[1].Index})
+ ms = r.readMessages()
+ if len(ms) != 2 {
+ t.Fatalf("expected 2 messages, got %d", len(ms))
+ }
+ for i, m := range ms {
+ if m.Type != pb.MsgApp {
+ t.Errorf("%d: expected MsgApp, got %s", i, m.Type)
+ }
+ }
+ if len(ms[0].Entries) != 2 {
+ t.Errorf("%d: expected 2 entries, got %d", 0, len(ms[0].Entries))
+ }
+ if len(ms[1].Entries) != 1 {
+ t.Errorf("%d: expected 1 entry, got %d", 1, len(ms[1].Entries))
+ }
+}
+
func TestLeaderElection(t *testing.T) {
testLeaderElection(t, false)
}
@@ -313,8 +423,15 @@ func TestLeaderElectionPreVote(t *testing.T) {
func testLeaderElection(t *testing.T, preVote bool) {
var cfg func(*Config)
+ candState := StateType(StateCandidate)
+ candTerm := uint64(1)
if preVote {
cfg = preVoteConfig
+ // In pre-vote mode, an election that fails to complete
+ // leaves the node in pre-candidate state without advancing
+ // the term.
+ candState = StatePreCandidate
+ candTerm = 0
}
tests := []struct {
*network
@@ -323,8 +440,8 @@ func testLeaderElection(t *testing.T, preVote bool) {
}{
{newNetworkWithConfig(cfg, nil, nil, nil), StateLeader, 1},
{newNetworkWithConfig(cfg, nil, nil, nopStepper), StateLeader, 1},
- {newNetworkWithConfig(cfg, nil, nopStepper, nopStepper), StateCandidate, 1},
- {newNetworkWithConfig(cfg, nil, nopStepper, nopStepper, nil), StateCandidate, 1},
+ {newNetworkWithConfig(cfg, nil, nopStepper, nopStepper), candState, candTerm},
+ {newNetworkWithConfig(cfg, nil, nopStepper, nopStepper, nil), candState, candTerm},
{newNetworkWithConfig(cfg, nil, nopStepper, nopStepper, nil, nil), StateLeader, 1},
// three logs further along than 0, but in the same term so rejections
@@ -334,26 +451,19 @@ func testLeaderElection(t *testing.T, preVote bool) {
StateFollower, 1},
}
+ defer func() {
+ for _, tt := range tests {
+ tt.network.closeAll()
+ }
+ }()
for i, tt := range tests {
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
sm := tt.network.peers[1].(*raft)
- var expState StateType
- var expTerm uint64
- if tt.state == StateCandidate && preVote {
- // In pre-vote mode, an election that fails to complete
- // leaves the node in pre-candidate state without advancing
- // the term.
- expState = StatePreCandidate
- expTerm = 0
- } else {
- expState = tt.state
- expTerm = tt.expTerm
- }
- if sm.state != expState {
- t.Errorf("#%d: state = %s, want %s", i, sm.state, expState)
+ if sm.state != tt.state {
+ t.Errorf("#%d: state = %s, want %s", i, sm.state, tt.state)
}
- if g := sm.Term; g != expTerm {
- t.Errorf("#%d: term = %d, want %d", i, g, expTerm)
+ if g := sm.Term; g != tt.expTerm {
+ t.Errorf("#%d: term = %d, want %d", i, g, tt.expTerm)
}
}
}
@@ -363,6 +473,8 @@ func testLeaderElection(t *testing.T, preVote bool) {
func TestLearnerElectionTimeout(t *testing.T) {
n1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())
n2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(n1)
+ defer closeAndFreeRaft(n2)
n1.becomeFollower(1, None)
n2.becomeFollower(1, None)
@@ -383,6 +495,8 @@ func TestLearnerElectionTimeout(t *testing.T) {
func TestLearnerPromotion(t *testing.T) {
n1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())
n2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(n1)
+ defer closeAndFreeRaft(n2)
n1.becomeFollower(1, None)
n2.becomeFollower(1, None)
@@ -437,6 +551,7 @@ func TestLearnerPromotion(t *testing.T) {
// TestLearnerCannotVote checks that a learner can't vote even it receives a valid Vote request.
func TestLearnerCannotVote(t *testing.T) {
n2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(n2)
n2.becomeFollower(1, None)
@@ -465,6 +580,7 @@ func testLeaderCycle(t *testing.T, preVote bool) {
cfg = preVoteConfig
}
n := newNetworkWithConfig(cfg, nil, nil, nil)
+ defer n.closeAll()
for campaignerID := uint64(1); campaignerID <= 3; campaignerID++ {
n.send(pb.Message{From: campaignerID, To: campaignerID, Type: pb.MsgHup})
@@ -520,6 +636,7 @@ func testLeaderElectionOverwriteNewerLogs(t *testing.T, preVote bool) {
votedWithConfig(cfg, 3, 2), // Node 4: Voted but didn't get logs
votedWithConfig(cfg, 3, 2)) // Node 5: Voted but didn't get logs
+ defer n.closeAll()
// Node 1 campaigns. The election fails because a quorum of nodes
// know about the election that already happened at term 2. Node 1's
// term is pushed ahead to 2.
@@ -569,6 +686,7 @@ func TestPreVoteFromAnyState(t *testing.T) {
func testVoteFromAnyState(t *testing.T, vt pb.MessageType) {
for st := StateType(0); st < numStates; st++ {
r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.Term = 1
switch st {
@@ -664,6 +782,11 @@ func TestLogReplication(t *testing.T) {
},
}
+ defer func() {
+ for _, tt := range tests {
+ tt.closeAll()
+ }
+ }()
for i, tt := range tests {
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
@@ -703,6 +826,8 @@ func TestLogReplication(t *testing.T) {
func TestLearnerLogReplication(t *testing.T) {
n1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())
n2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(n1)
+ defer closeAndFreeRaft(n2)
nt := newNetwork(n1, n2)
@@ -742,6 +867,7 @@ func TestLearnerLogReplication(t *testing.T) {
func TestSingleNodeCommit(t *testing.T) {
tt := newNetwork(nil)
+ defer tt.closeAll()
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
@@ -757,6 +883,7 @@ func TestSingleNodeCommit(t *testing.T) {
// filtered.
func TestCannotCommitWithoutNewTermEntry(t *testing.T) {
tt := newNetwork(nil, nil, nil, nil, nil)
+ defer tt.closeAll()
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
// 0 cannot reach 2,3,4
@@ -801,6 +928,7 @@ func TestCannotCommitWithoutNewTermEntry(t *testing.T) {
// when leader changes, no new proposal comes in.
func TestCommitWithoutNewTermEntry(t *testing.T) {
tt := newNetwork(nil, nil, nil, nil, nil)
+ defer tt.closeAll()
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
// 0 cannot reach 2,3,4
@@ -833,6 +961,9 @@ func TestDuelingCandidates(t *testing.T) {
a := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
b := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
c := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(a)
+ defer closeAndFreeRaft(b)
+ defer closeAndFreeRaft(c)
nt := newNetwork(a, b, c)
nt.cut(1, 3)
@@ -860,10 +991,11 @@ func TestDuelingCandidates(t *testing.T) {
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
wlog := &raftLog{
- storage: &MemoryStorage{ents: []pb.Entry{{}, {Data: nil, Term: 1, Index: 1}}},
+ storage: newInitedMemoryStorage([]pb.Entry{{}, {Data: nil, Term: 1, Index: 1}}),
committed: 1,
unstable: unstable{offset: 2},
}
+ defer wlog.storage.(IExtRaftStorage).Close()
tests := []struct {
sm *raft
state StateType
@@ -904,6 +1036,9 @@ func TestDuelingPreCandidates(t *testing.T) {
a := newRaft(cfgA)
b := newRaft(cfgB)
c := newRaft(cfgC)
+ defer closeAndFreeRaft(a)
+ defer closeAndFreeRaft(b)
+ defer closeAndFreeRaft(c)
nt := newNetwork(a, b, c)
nt.cut(1, 3)
@@ -930,10 +1065,11 @@ func TestDuelingPreCandidates(t *testing.T) {
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
wlog := &raftLog{
- storage: &MemoryStorage{ents: []pb.Entry{{}, {Data: nil, Term: 1, Index: 1}}},
+ storage: newInitedMemoryStorage([]pb.Entry{{}, {Data: nil, Term: 1, Index: 1}}),
committed: 1,
unstable: unstable{offset: 2},
}
+ defer wlog.storage.(IExtRaftStorage).Close()
tests := []struct {
sm *raft
state StateType
@@ -966,6 +1102,7 @@ func TestDuelingPreCandidates(t *testing.T) {
func TestCandidateConcede(t *testing.T) {
tt := newNetwork(nil, nil, nil)
+ defer tt.closeAll()
tt.isolate(1)
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
@@ -990,9 +1127,9 @@ func TestCandidateConcede(t *testing.T) {
t.Errorf("term = %d, want %d", g, 1)
}
wantLog := ltoa(&raftLog{
- storage: &MemoryStorage{
- ents: []pb.Entry{{}, {Data: nil, Term: 1, Index: 1}, {Term: 1, Index: 2, Data: data}},
- },
+ storage: newInitedMemoryStorage(
+ []pb.Entry{{}, {Data: nil, Term: 1, Index: 1}, {Term: 1, Index: 2, Data: data}},
+ ),
unstable: unstable{offset: 3},
committed: 2,
})
@@ -1010,6 +1147,7 @@ func TestCandidateConcede(t *testing.T) {
func TestSingleNodeCandidate(t *testing.T) {
tt := newNetwork(nil)
+ defer tt.closeAll()
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
sm := tt.peers[1].(*raft)
@@ -1020,6 +1158,7 @@ func TestSingleNodeCandidate(t *testing.T) {
func TestSingleNodePreCandidate(t *testing.T) {
tt := newNetworkWithConfig(preVoteConfig, nil)
+ defer tt.closeAll()
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
sm := tt.peers[1].(*raft)
@@ -1030,6 +1169,7 @@ func TestSingleNodePreCandidate(t *testing.T) {
func TestOldMessages(t *testing.T) {
tt := newNetwork(nil, nil, nil)
+ defer tt.closeAll()
// make 0 leader @ term 3
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
tt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
@@ -1040,13 +1180,13 @@ func TestOldMessages(t *testing.T) {
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
ilog := &raftLog{
- storage: &MemoryStorage{
- ents: []pb.Entry{
+ storage: newInitedMemoryStorage(
+ []pb.Entry{
{}, {Data: nil, Term: 1, Index: 1},
{Data: nil, Term: 2, Index: 2}, {Data: nil, Term: 3, Index: 3},
{Data: []byte("somedata"), Term: 3, Index: 4},
},
- },
+ ),
unstable: unstable{offset: 5},
committed: 4,
}
@@ -1092,6 +1232,7 @@ func TestProposal(t *testing.T) {
tt.send(m)
}
+ defer tt.closeAll()
data := []byte("somedata")
// promote 0 the leader
@@ -1101,12 +1242,13 @@ func TestProposal(t *testing.T) {
wantLog := newLog(NewMemoryStorage(), raftLogger)
if tt.success {
wantLog = &raftLog{
- storage: &MemoryStorage{
- ents: []pb.Entry{{}, {Data: nil, Term: 1, Index: 1}, {Term: 1, Index: 2, Data: data}},
- },
+ storage: newInitedMemoryStorage(
+ []pb.Entry{{}, {Data: nil, Term: 1, Index: 1}, {Term: 1, Index: 2, Data: data}},
+ ),
unstable: unstable{offset: 3},
committed: 2}
}
+ defer wantLog.storage.(IExtRaftStorage).Close()
base := ltoa(wantLog)
for i, p := range tt.peers {
if sm, ok := p.(*raft); ok {
@@ -1133,6 +1275,7 @@ func TestProposalByProxy(t *testing.T) {
}
for j, tt := range tests {
+ defer tt.closeAll()
// promote 0 the leader
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
@@ -1140,11 +1283,12 @@ func TestProposalByProxy(t *testing.T) {
tt.send(pb.Message{From: 2, To: 2, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
wantLog := &raftLog{
- storage: &MemoryStorage{
- ents: []pb.Entry{{}, {Data: nil, Term: 1, Index: 1}, {Term: 1, Data: data, Index: 2}},
- },
+ storage: newInitedMemoryStorage(
+ []pb.Entry{{}, {Data: nil, Term: 1, Index: 1}, {Term: 1, Data: data, Index: 2}},
+ ),
unstable: unstable{offset: 3},
committed: 2}
+ defer wantLog.storage.(IExtRaftStorage).Close()
base := ltoa(wantLog)
for i, p := range tt.peers {
if sm, ok := p.(*raft); ok {
@@ -1197,8 +1341,9 @@ func TestCommit(t *testing.T) {
}
for i, tt := range tests {
storage := NewMemoryStorage()
+ defer storage.Close()
storage.Append(tt.logs)
- storage.hardState = pb.HardState{Term: tt.smTerm}
+ storage.SetHardState(pb.HardState{Term: tt.smTerm})
sm := newTestRaft(1, []uint64{1}, 5, 1, storage)
for j := 0; j < len(tt.matches); j++ {
@@ -1227,6 +1372,7 @@ func TestPastElectionTimeout(t *testing.T) {
for i, tt := range tests {
sm := newTestRaft(1, []uint64{1}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(sm)
sm.electionElapsed = tt.elapse
c := 0
for j := 0; j < 10000; j++ {
@@ -1254,6 +1400,7 @@ func TestStepIgnoreOldTermMsg(t *testing.T) {
return false
}
sm := newTestRaft(1, []uint64{1}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(sm)
sm.step = fakeStep
sm.Term = 2
sm.Step(pb.Message{Type: pb.MsgApp, Term: sm.Term - 1})
@@ -1294,6 +1441,7 @@ func TestHandleMsgApp(t *testing.T) {
for i, tt := range tests {
storage := NewMemoryStorage()
+ defer storage.Close()
storage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}})
sm := newTestRaft(1, []uint64{1}, 10, 1, storage)
sm.becomeFollower(2, None)
@@ -1328,6 +1476,7 @@ func TestHandleHeartbeat(t *testing.T) {
for i, tt := range tests {
storage := NewMemoryStorage()
+ defer storage.Close()
storage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}, {Index: 3, Term: 3}})
sm := newTestRaft(1, []uint64{1, 2}, 5, 1, storage)
sm.becomeFollower(2, 2)
@@ -1351,6 +1500,7 @@ func TestHandleHeartbeat(t *testing.T) {
// related issue: https://github.com/coreos/etcd/issues/7571
func TestRaftFreesReadOnlyMem(t *testing.T) {
sm := newTestRaft(1, []uint64{1, 2}, 5, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(sm)
sm.becomeCandidate()
sm.becomeLeader()
sm.raftLog.commitTo(sm.raftLog.lastIndex())
@@ -1398,6 +1548,7 @@ func TestRaftFreesReadOnlyMem(t *testing.T) {
// TestHandleHeartbeatResp ensures that we re-send log entries when we get a heartbeat response.
func TestHandleHeartbeatResp(t *testing.T) {
storage := NewMemoryStorage()
+ defer storage.Close()
storage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}, {Index: 3, Term: 3}})
sm := newTestRaft(1, []uint64{1, 2}, 5, 1, storage)
sm.becomeCandidate()
@@ -1444,6 +1595,7 @@ func TestHandleHeartbeatResp(t *testing.T) {
// MsgAppResp.
func TestMsgAppRespWaitReset(t *testing.T) {
sm := newTestRaft(1, []uint64{1, 2, 3}, 5, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(sm)
sm.becomeCandidate()
sm.becomeLeader()
@@ -1550,6 +1702,7 @@ func testRecvMsgVote(t *testing.T, msgType pb.MessageType) {
for i, tt := range tests {
sm := newTestRaft(1, []uint64{1}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(sm)
sm.state = tt.state
switch tt.state {
case StateFollower:
@@ -1561,7 +1714,7 @@ func testRecvMsgVote(t *testing.T, msgType pb.MessageType) {
}
sm.Vote = tt.voteFor
sm.raftLog = &raftLog{
- storage: &MemoryStorage{ents: []pb.Entry{{}, {Index: 1, Term: 2}, {Index: 2, Term: 2}}},
+ storage: newInitedMemoryStorage([]pb.Entry{{}, {Index: 1, Term: 2}, {Index: 2, Term: 2}}),
unstable: unstable{offset: 3},
}
@@ -1631,6 +1784,7 @@ func TestStateTransition(t *testing.T) {
}()
sm := newTestRaft(1, []uint64{1}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(sm)
sm.state = tt.from
switch tt.to {
@@ -1673,6 +1827,7 @@ func TestAllServerStepdown(t *testing.T) {
for i, tt := range tests {
sm := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(sm)
switch tt.state {
case StateFollower:
sm.becomeFollower(1, None)
@@ -1711,8 +1866,78 @@ func TestAllServerStepdown(t *testing.T) {
}
}
+func TestCandidateResetTermMsgHeartbeat(t *testing.T) {
+ testCandidateResetTerm(t, pb.MsgHeartbeat)
+}
+
+func TestCandidateResetTermMsgApp(t *testing.T) {
+ testCandidateResetTerm(t, pb.MsgApp)
+}
+
+// testCandidateResetTerm tests when a candidate receives a
+// MsgHeartbeat or MsgApp from leader, "Step" resets the term
+// with leader's and reverts back to follower.
+func testCandidateResetTerm(t *testing.T, mt pb.MessageType) {
+ a := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+ b := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+ c := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+
+ nt := newNetwork(a, b, c)
+ defer nt.closeAll()
+
+ nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
+ if a.state != StateLeader {
+ t.Errorf("state = %s, want %s", a.state, StateLeader)
+ }
+ if b.state != StateFollower {
+ t.Errorf("state = %s, want %s", b.state, StateFollower)
+ }
+ if c.state != StateFollower {
+ t.Errorf("state = %s, want %s", c.state, StateFollower)
+ }
+
+ // isolate 3 and increase term in rest
+ nt.isolate(3)
+
+ nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
+ nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
+
+ if a.state != StateLeader {
+ t.Errorf("state = %s, want %s", a.state, StateLeader)
+ }
+ if b.state != StateFollower {
+ t.Errorf("state = %s, want %s", b.state, StateFollower)
+ }
+
+ // trigger campaign in isolated c
+ c.resetRandomizedElectionTimeout()
+ for i := 0; i < c.randomizedElectionTimeout; i++ {
+ c.tick()
+ }
+
+ if c.state != StateCandidate {
+ t.Errorf("state = %s, want %s", c.state, StateCandidate)
+ }
+
+ nt.recover()
+
+ // leader sends to isolated candidate
+ // and expects candidate to revert to follower
+ nt.send(pb.Message{From: 1, To: 3, Term: a.Term, Type: mt})
+
+ if c.state != StateFollower {
+ t.Errorf("state = %s, want %s", c.state, StateFollower)
+ }
+
+ // follower c term is reset with leader's
+ if a.Term != c.Term {
+ t.Errorf("follower term expected same term as leader's %d, got %d", a.Term, c.Term)
+ }
+}
+
func TestLeaderStepdownWhenQuorumActive(t *testing.T) {
sm := newTestRaft(1, []uint64{1, 2, 3}, 5, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(sm)
sm.checkQuorum = true
@@ -1731,6 +1956,7 @@ func TestLeaderStepdownWhenQuorumActive(t *testing.T) {
func TestLeaderStepdownWhenQuorumLost(t *testing.T) {
sm := newTestRaft(1, []uint64{1, 2, 3}, 5, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(sm)
sm.checkQuorum = true
@@ -1750,6 +1976,9 @@ func TestLeaderSupersedingWithCheckQuorum(t *testing.T) {
a := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
b := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
c := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(a)
+ defer closeAndFreeRaft(b)
+ defer closeAndFreeRaft(c)
a.checkQuorum = true
b.checkQuorum = true
@@ -1793,6 +2022,9 @@ func TestLeaderElectionWithCheckQuorum(t *testing.T) {
a := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
b := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
c := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(a)
+ defer closeAndFreeRaft(b)
+ defer closeAndFreeRaft(c)
a.checkQuorum = true
b.checkQuorum = true
@@ -1842,6 +2074,9 @@ func TestFreeStuckCandidateWithCheckQuorum(t *testing.T) {
a := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
b := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
c := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(a)
+ defer closeAndFreeRaft(b)
+ defer closeAndFreeRaft(c)
a.checkQuorum = true
b.checkQuorum = true
@@ -1909,6 +2144,8 @@ func TestFreeStuckCandidateWithCheckQuorum(t *testing.T) {
func TestNonPromotableVoterWithCheckQuorum(t *testing.T) {
a := newTestRaft(1, []uint64{1, 2}, 10, 1, NewMemoryStorage())
b := newTestRaft(2, []uint64{1}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(a)
+ defer closeAndFreeRaft(b)
a.checkQuorum = true
b.checkQuorum = true
@@ -1944,6 +2181,9 @@ func TestReadOnlyOptionSafe(t *testing.T) {
a := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
b := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
c := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(a)
+ defer closeAndFreeRaft(b)
+ defer closeAndFreeRaft(c)
nt := newNetwork(a, b, c)
setRandomizedElectionTimeout(b, b.electionTimeout+1)
@@ -1994,10 +2234,64 @@ func TestReadOnlyOptionSafe(t *testing.T) {
}
}
+func TestReadOnlyWithLearner(t *testing.T) {
+ a := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())
+ b := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())
+
+ nt := newNetwork(a, b)
+ setRandomizedElectionTimeout(b, b.electionTimeout+1)
+
+ for i := 0; i < b.electionTimeout; i++ {
+ b.tick()
+ }
+ nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
+
+ if a.state != StateLeader {
+ t.Fatalf("state = %s, want %s", a.state, StateLeader)
+ }
+
+ tests := []struct {
+ sm *raft
+ proposals int
+ wri uint64
+ wctx []byte
+ }{
+ {a, 10, 11, []byte("ctx1")},
+ {b, 10, 21, []byte("ctx2")},
+ {a, 10, 31, []byte("ctx3")},
+ {b, 10, 41, []byte("ctx4")},
+ }
+
+ for i, tt := range tests {
+ for j := 0; j < tt.proposals; j++ {
+ nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
+ }
+
+ nt.send(pb.Message{From: tt.sm.id, To: tt.sm.id, Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: tt.wctx}}})
+
+ r := tt.sm
+ if len(r.readStates) == 0 {
+ t.Fatalf("#%d: len(readStates) = 0, want non-zero", i)
+ }
+ rs := r.readStates[0]
+ if rs.Index != tt.wri {
+ t.Errorf("#%d: readIndex = %d, want %d", i, rs.Index, tt.wri)
+ }
+
+ if !bytes.Equal(rs.RequestCtx, tt.wctx) {
+ t.Errorf("#%d: requestCtx = %v, want %v", i, rs.RequestCtx, tt.wctx)
+ }
+ r.readStates = nil
+ }
+}
+
func TestReadOnlyOptionLease(t *testing.T) {
a := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
b := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
c := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(a)
+ defer closeAndFreeRaft(b)
+ defer closeAndFreeRaft(c)
a.readOnly.option = ReadOnlyLeaseBased
b.readOnly.option = ReadOnlyLeaseBased
c.readOnly.option = ReadOnlyLeaseBased
@@ -2067,6 +2361,7 @@ func TestReadOnlyForNewLeader(t *testing.T) {
peers := make([]stateMachine, 0)
for _, c := range nodeConfigs {
storage := NewMemoryStorage()
+ defer storage.Close()
storage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 1}})
storage.SetHardState(pb.HardState{Term: 1, Commit: c.committed})
if c.compact_index != 0 {
@@ -2150,9 +2445,10 @@ func TestLeaderAppResp(t *testing.T) {
// thus the last log term must be 1 to be committed.
sm := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
sm.raftLog = &raftLog{
- storage: &MemoryStorage{ents: []pb.Entry{{}, {Index: 1, Term: 0}, {Index: 2, Term: 1}}},
+ storage: newInitedMemoryStorage([]pb.Entry{{}, {Index: 1, Term: 0}, {Index: 2, Term: 1}}),
unstable: unstable{offset: 3},
}
+ defer closeAndFreeRaft(sm)
sm.becomeCandidate()
sm.becomeLeader()
sm.readMessages()
@@ -2207,6 +2503,7 @@ func TestBcastBeat(t *testing.T) {
storage := NewMemoryStorage()
storage.ApplySnapshot(s)
sm := newTestRaft(1, nil, 10, 1, storage)
+ defer closeAndFreeRaft(sm)
sm.Term = 1
sm.becomeCandidate()
@@ -2266,7 +2563,8 @@ func TestRecvMsgBeat(t *testing.T) {
for i, tt := range tests {
sm := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
- sm.raftLog = &raftLog{storage: &MemoryStorage{ents: []pb.Entry{{}, {Index: 1, Term: 0}, {Index: 2, Term: 1}}}}
+ sm.raftLog = &raftLog{storage: newInitedMemoryStorage([]pb.Entry{{}, {Index: 1, Term: 0}, {Index: 2, Term: 1}})}
+ defer closeAndFreeRaft(sm)
sm.Term = 1
sm.state = tt.state
switch tt.state {
@@ -2309,6 +2607,7 @@ func TestLeaderIncreaseNext(t *testing.T) {
for i, tt := range tests {
sm := newTestRaft(1, []uint64{1, 2}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(sm)
sm.raftLog.append(previousEnts...)
sm.becomeCandidate()
sm.becomeLeader()
@@ -2325,6 +2624,7 @@ func TestLeaderIncreaseNext(t *testing.T) {
func TestSendAppendForProgressProbe(t *testing.T) {
r := newTestRaft(1, []uint64{1, 2}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.becomeCandidate()
r.becomeLeader()
r.readMessages()
@@ -2392,6 +2692,7 @@ func TestSendAppendForProgressProbe(t *testing.T) {
func TestSendAppendForProgressReplicate(t *testing.T) {
r := newTestRaft(1, []uint64{1, 2}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.becomeCandidate()
r.becomeLeader()
r.readMessages()
@@ -2409,6 +2710,7 @@ func TestSendAppendForProgressReplicate(t *testing.T) {
func TestSendAppendForProgressSnapshot(t *testing.T) {
r := newTestRaft(1, []uint64{1, 2}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.becomeCandidate()
r.becomeLeader()
r.readMessages()
@@ -2429,6 +2731,7 @@ func TestRecvMsgUnreachable(t *testing.T) {
s := NewMemoryStorage()
s.Append(previousEnts)
r := newTestRaft(1, []uint64{1, 2}, 10, 1, s)
+ defer closeAndFreeRaft(r)
r.becomeCandidate()
r.becomeLeader()
r.readMessages()
@@ -2457,6 +2760,7 @@ func TestRestore(t *testing.T) {
}
storage := NewMemoryStorage()
+ defer storage.Close()
sm := newTestRaft(1, []uint64{1, 2}, 10, 1, storage)
if ok := sm.restore(s); !ok {
t.Fatal("restore fail, want succeed")
@@ -2476,6 +2780,13 @@ func TestRestore(t *testing.T) {
if ok := sm.restore(s); ok {
t.Fatal("restore succeed, want fail")
}
+ // It should not campaign before actually applying data.
+ for i := 0; i < sm.randomizedElectionTimeout; i++ {
+ sm.tick()
+ }
+ if sm.state != StateFollower {
+ t.Errorf("state = %d, want %d", sm.state, StateFollower)
+ }
}
// TestRestoreWithLearner restores a snapshot which contains learners.
@@ -2490,6 +2801,7 @@ func TestRestoreWithLearner(t *testing.T) {
storage := NewMemoryStorage()
sm := newTestLearnerRaft(3, []uint64{1, 2}, []uint64{3}, 10, 1, storage)
+ defer closeAndFreeRaft(sm)
if ok := sm.restore(s); !ok {
t.Error("restore fail, want succeed")
}
@@ -2542,6 +2854,7 @@ func TestRestoreInvalidLearner(t *testing.T) {
storage := NewMemoryStorage()
sm := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, storage)
+ defer closeAndFreeRaft(sm)
if sm.isLearner {
t.Errorf("%x is learner, want not", sm.id)
@@ -2564,6 +2877,7 @@ func TestRestoreLearnerPromotion(t *testing.T) {
storage := NewMemoryStorage()
sm := newTestLearnerRaft(3, []uint64{1, 2}, []uint64{3}, 10, 1, storage)
+ defer closeAndFreeRaft(sm)
if !sm.isLearner {
t.Errorf("%x is not learner, want yes", sm.id)
@@ -2589,10 +2903,16 @@ func TestLearnerReceiveSnapshot(t *testing.T) {
},
}
- n1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())
+ store := NewMemoryStorage()
+ n1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, store)
n2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(n1)
+ defer closeAndFreeRaft(n2)
n1.restore(s)
+ ready := newReady(n1, &SoftState{}, pb.HardState{}, true)
+ store.ApplySnapshot(ready.Snapshot)
+ n1.advance(ready)
// Force set n1 appplied index.
n1.raftLog.appliedTo(n1.raftLog.committed)
@@ -2616,6 +2936,7 @@ func TestRestoreIgnoreSnapshot(t *testing.T) {
commit := uint64(1)
storage := NewMemoryStorage()
sm := newTestRaft(1, []uint64{1, 2}, 10, 1, storage)
+ defer closeAndFreeRaft(sm)
sm.raftLog.append(previousEnts...)
sm.raftLog.commitTo(commit)
@@ -2656,6 +2977,7 @@ func TestProvideSnap(t *testing.T) {
}
storage := NewMemoryStorage()
sm := newTestRaft(1, []uint64{1}, 10, 1, storage)
+ defer closeAndFreeRaft(sm)
sm.restore(s)
sm.becomeCandidate()
@@ -2686,6 +3008,7 @@ func TestIgnoreProvidingSnap(t *testing.T) {
}
storage := NewMemoryStorage()
sm := newTestRaft(1, []uint64{1}, 10, 1, storage)
+ defer closeAndFreeRaft(sm)
sm.restore(s)
sm.becomeCandidate()
@@ -2715,6 +3038,7 @@ func TestRestoreFromSnapMsg(t *testing.T) {
m := pb.Message{Type: pb.MsgSnap, From: 1, Term: 2, Snapshot: s}
sm := newTestRaft(2, []uint64{1, 2}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(sm)
sm.Step(m)
if sm.lead != uint64(1) {
@@ -2726,6 +3050,7 @@ func TestRestoreFromSnapMsg(t *testing.T) {
func TestSlowNodeRestore(t *testing.T) {
nt := newNetwork(nil, nil, nil)
+ defer nt.closeAll()
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
nt.isolate(3)
@@ -2764,6 +3089,7 @@ func TestSlowNodeRestore(t *testing.T) {
func TestStepConfig(t *testing.T) {
// a raft that cannot make progress
r := newTestRaft(1, []uint64{1, 2}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.becomeCandidate()
r.becomeLeader()
index := r.raftLog.lastIndex()
@@ -2782,6 +3108,7 @@ func TestStepConfig(t *testing.T) {
func TestStepIgnoreConfig(t *testing.T) {
// a raft that cannot make progress
r := newTestRaft(1, []uint64{1, 2}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.becomeCandidate()
r.becomeLeader()
r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange}}})
@@ -2813,6 +3140,7 @@ func TestRecoverPendingConfig(t *testing.T) {
}
for i, tt := range tests {
r := newTestRaft(1, []uint64{1, 2}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.appendEntry(pb.Entry{Type: tt.entType})
r.becomeCandidate()
r.becomeLeader()
@@ -2832,6 +3160,7 @@ func TestRecoverDoublePendingConfig(t *testing.T) {
}
}()
r := newTestRaft(1, []uint64{1, 2}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.appendEntry(pb.Entry{Type: pb.EntryConfChange})
r.appendEntry(pb.Entry{Type: pb.EntryConfChange})
r.becomeCandidate()
@@ -2842,6 +3171,7 @@ func TestRecoverDoublePendingConfig(t *testing.T) {
// TestAddNode tests that addNode could update pendingConf and nodes correctly.
func TestAddNode(t *testing.T) {
r := newTestRaft(1, []uint64{1}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.pendingConf = true
grp := pb.Group{
NodeId: 2,
@@ -2862,6 +3192,7 @@ func TestAddNode(t *testing.T) {
// TestAddLearner tests that addLearner could update pendingConf and nodes correctly.
func TestAddLearner(t *testing.T) {
r := newTestRaft(1, []uint64{1}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.pendingConf = true
grp2 := pb.Group{
NodeId: 2,
@@ -2887,6 +3218,7 @@ func TestAddLearner(t *testing.T) {
// immediately when checkQuorum is set.
func TestAddNodeCheckQuorum(t *testing.T) {
r := newTestRaft(1, []uint64{1}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.pendingConf = true
r.checkQuorum = true
@@ -2926,6 +3258,7 @@ func TestAddNodeCheckQuorum(t *testing.T) {
// and removed list correctly.
func TestRemoveNode(t *testing.T) {
r := newTestRaft(1, []uint64{1, 2}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.pendingConf = true
r.removeNode(2)
if r.pendingConf {
@@ -2948,6 +3281,7 @@ func TestRemoveNode(t *testing.T) {
// and removed list correctly.
func TestRemoveLearner(t *testing.T) {
r := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.pendingConf = true
r.removeNode(2)
if r.pendingConf {
@@ -2979,6 +3313,7 @@ func TestPromotable(t *testing.T) {
}
for i, tt := range tests {
r := newTestRaft(id, tt.peers, 5, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
if g := r.promotable(); g != tt.wp {
t.Errorf("#%d: promotable = %v, want %v", i, g, tt.wp)
}
@@ -3001,6 +3336,7 @@ func TestRaftNodes(t *testing.T) {
}
for i, tt := range tests {
r := newTestRaft(1, tt.ids, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
if !reflect.DeepEqual(r.nodes(), tt.wids) {
t.Errorf("#%d: nodes = %+v, want %+v", i, r.nodes(), tt.wids)
}
@@ -3019,6 +3355,7 @@ func testCampaignWhileLeader(t *testing.T, preVote bool) {
cfg := newTestConfig(1, []uint64{1}, 5, 1, NewMemoryStorage())
cfg.PreVote = preVote
r := newRaft(cfg)
+ defer closeAndFreeRaft(r)
if r.state != StateFollower {
t.Errorf("expected new node to be follower but got %s", r.state)
}
@@ -3044,6 +3381,7 @@ func TestCommitAfterRemoveNode(t *testing.T) {
// Create a cluster with two nodes.
s := NewMemoryStorage()
r := newTestRaft(1, []uint64{1, 2}, 5, 1, s)
+ defer closeAndFreeRaft(r)
r.becomeCandidate()
r.becomeLeader()
@@ -3107,6 +3445,7 @@ func TestCommitAfterRemoveNode(t *testing.T) {
// if the transferee has the most up-to-date log entries when transfer starts.
func TestLeaderTransferToUpToDateNode(t *testing.T) {
nt := newNetwork(nil, nil, nil)
+ defer nt.closeAll()
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
lead := nt.peers[1].(*raft)
@@ -3135,6 +3474,7 @@ func TestLeaderTransferToUpToDateNode(t *testing.T) {
// to the follower.
func TestLeaderTransferToUpToDateNodeFromFollower(t *testing.T) {
nt := newNetwork(nil, nil, nil)
+ defer nt.closeAll()
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
lead := nt.peers[1].(*raft)
@@ -3160,6 +3500,7 @@ func TestLeaderTransferToUpToDateNodeFromFollower(t *testing.T) {
// even the current leader is still under its leader lease
func TestLeaderTransferWithCheckQuorum(t *testing.T) {
nt := newNetwork(nil, nil, nil)
+ defer nt.closeAll()
for i := 1; i < 4; i++ {
r := nt.peers[uint64(i)].(*raft)
r.checkQuorum = true
@@ -3196,6 +3537,7 @@ func TestLeaderTransferWithCheckQuorum(t *testing.T) {
func TestLeaderTransferToSlowFollower(t *testing.T) {
defaultLogger.EnableDebug()
nt := newNetwork(nil, nil, nil)
+ defer nt.closeAll()
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
nt.isolate(3)
@@ -3215,6 +3557,7 @@ func TestLeaderTransferToSlowFollower(t *testing.T) {
func TestLeaderTransferAfterSnapshot(t *testing.T) {
nt := newNetwork(nil, nil, nil)
+ defer nt.closeAll()
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
nt.isolate(3)
@@ -3230,16 +3573,39 @@ func TestLeaderTransferAfterSnapshot(t *testing.T) {
t.Fatalf("node 1 has match %x for node 3, want %x", lead.prs[3].Match, 1)
}
+ filtered := pb.Message{}
+ // Snapshot needs to be applied before sending MsgAppResp
+ nt.msgHook = func(m pb.Message) bool {
+ if m.Type != pb.MsgAppResp || m.From != 3 || m.Reject {
+ return true
+ }
+ filtered = m
+ return false
+ }
+
// Transfer leadership to 3 when node 3 is lack of snapshot.
nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
- // Send pb.MsgHeartbeatResp to leader to trigger a snapshot for node 3.
- nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgHeartbeatResp})
+ if lead.state != StateLeader {
+ t.Fatalf("node 1 should still be leader as snapshot is not applied, got %x", lead.state)
+ }
+ if reflect.DeepEqual(filtered, pb.Message{}) {
+ t.Fatalf("Follower should report snapshot progress automatically.")
+ }
+
+ // Apply snapshot and resume progress
+ follower := nt.peers[3].(*raft)
+ ready := newReady(follower, &SoftState{}, pb.HardState{}, true)
+ nt.storage[3].ApplySnapshot(ready.Snapshot)
+ follower.advance(ready)
+ nt.msgHook = nil
+ nt.send(filtered)
checkLeaderTransferState(t, lead, StateFollower, 3)
}
func TestLeaderTransferToSelf(t *testing.T) {
nt := newNetwork(nil, nil, nil)
+ defer nt.closeAll()
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
lead := nt.peers[1].(*raft)
@@ -3251,6 +3617,7 @@ func TestLeaderTransferToSelf(t *testing.T) {
func TestLeaderTransferToNonExistingNode(t *testing.T) {
nt := newNetwork(nil, nil, nil)
+ defer nt.closeAll()
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
lead := nt.peers[1].(*raft)
@@ -3261,6 +3628,7 @@ func TestLeaderTransferToNonExistingNode(t *testing.T) {
func TestLeaderTransferTimeout(t *testing.T) {
nt := newNetwork(nil, nil, nil)
+ defer nt.closeAll()
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
nt.isolate(3)
@@ -3288,6 +3656,7 @@ func TestLeaderTransferTimeout(t *testing.T) {
func TestLeaderTransferIgnoreProposal(t *testing.T) {
nt := newNetwork(nil, nil, nil)
+ defer nt.closeAll()
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
nt.isolate(3)
@@ -3314,6 +3683,7 @@ func TestLeaderTransferIgnoreProposal(t *testing.T) {
// if the transferee has the most up-to-date log entries when transfer starts.
func TestLeaderTransferReceiveHigherTermVote(t *testing.T) {
nt := newNetwork(nil, nil, nil)
+ defer nt.closeAll()
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
nt.isolate(3)
@@ -3333,6 +3703,7 @@ func TestLeaderTransferReceiveHigherTermVote(t *testing.T) {
func TestLeaderTransferRemoveNode(t *testing.T) {
nt := newNetwork(nil, nil, nil)
+ defer nt.closeAll()
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
nt.ignore(pb.MsgTimeoutNow)
@@ -3353,6 +3724,7 @@ func TestLeaderTransferRemoveNode(t *testing.T) {
// TestLeaderTransferBack verifies leadership can transfer back to self when last transfer is pending.
func TestLeaderTransferBack(t *testing.T) {
nt := newNetwork(nil, nil, nil)
+ defer nt.closeAll()
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
nt.isolate(3)
@@ -3374,6 +3746,7 @@ func TestLeaderTransferBack(t *testing.T) {
// when last transfer is pending.
func TestLeaderTransferSecondTransferToAnotherNode(t *testing.T) {
nt := newNetwork(nil, nil, nil)
+ defer nt.closeAll()
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
nt.isolate(3)
@@ -3395,6 +3768,7 @@ func TestLeaderTransferSecondTransferToAnotherNode(t *testing.T) {
// to the same node should not extend the timeout while the first one is pending.
func TestLeaderTransferSecondTransferToSameNode(t *testing.T) {
nt := newNetwork(nil, nil, nil)
+ defer nt.closeAll()
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
nt.isolate(3)
@@ -3434,6 +3808,7 @@ func checkLeaderTransferState(t *testing.T, r *raft, state StateType, lead uint6
// transitioned to StateLeader)
func TestTransferNonMember(t *testing.T) {
r := newTestRaft(1, []uint64{2, 3, 4}, 5, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(r)
r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgTimeoutNow})
r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgVoteResp})
@@ -3449,6 +3824,9 @@ func TestPreVoteWithSplitVote(t *testing.T) {
n1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
n2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
n3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(n1)
+ defer closeAndFreeRaft(n2)
+ defer closeAndFreeRaft(n3)
n1.becomeFollower(1, None)
n2.becomeFollower(1, None)
@@ -3520,6 +3898,56 @@ func TestPreVoteWithSplitVote(t *testing.T) {
}
}
+// TestPreVoteWithCheckQuorum ensures that after a node become pre-candidate,
+// it will checkQuorum correctly.
+func TestPreVoteWithCheckQuorum(t *testing.T) {
+ n1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+ n2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+ n3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+
+ n1.becomeFollower(1, None)
+ n2.becomeFollower(1, None)
+ n3.becomeFollower(1, None)
+
+ n1.preVote = true
+ n2.preVote = true
+ n3.preVote = true
+
+ n1.checkQuorum = true
+ n2.checkQuorum = true
+ n3.checkQuorum = true
+
+ nt := newNetwork(n1, n2, n3)
+ defer nt.closeAll()
+ nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
+
+ // isolate node 1. node 2 and node 3 have leader info
+ nt.isolate(1)
+
+ // check state
+ sm := nt.peers[1].(*raft)
+ if sm.state != StateLeader {
+ t.Fatalf("peer 1 state: %s, want %s", sm.state, StateLeader)
+ }
+ sm = nt.peers[2].(*raft)
+ if sm.state != StateFollower {
+ t.Fatalf("peer 2 state: %s, want %s", sm.state, StateFollower)
+ }
+ sm = nt.peers[3].(*raft)
+ if sm.state != StateFollower {
+ t.Fatalf("peer 3 state: %s, want %s", sm.state, StateFollower)
+ }
+
+ // node 2 will ignore node 3's PreVote
+ nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
+ nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
+
+ // Do we have a leader?
+ if n2.state != StateLeader && n3.state != StateFollower {
+ t.Errorf("no leader")
+ }
+}
+
// TestNodeWithSmallerTermCanCompleteElection tests the scenario where a node
// that has been partitioned away (and fallen behind) rejoins the cluster at
// about the same time the leader node gets partitioned away.
@@ -3529,6 +3957,9 @@ func TestNodeWithSmallerTermCanCompleteElection(t *testing.T) {
n1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
n2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
n3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+ defer closeAndFreeRaft(n1)
+ defer closeAndFreeRaft(n2)
+ defer closeAndFreeRaft(n3)
n1.becomeFollower(1, None)
n2.becomeFollower(1, None)
@@ -3618,6 +4049,284 @@ func TestNodeWithSmallerTermCanCompleteElection(t *testing.T) {
}
}
+// TestLearnerCampaign verifies that a learner won't campaign even if it receives
+// a MsgHup or MsgTimeoutNow.
+func TestLearnerCampaign(t *testing.T) {
+ n1 := newTestRaft(1, []uint64{1}, 10, 1, NewMemoryStorage())
+ grp2 := pb.Group{
+ NodeId: 2,
+ GroupId: 1,
+ RaftReplicaId: 2,
+ }
+ n1.addLearner(2, grp2)
+ n2 := newTestRaft(2, []uint64{1}, 10, 1, NewMemoryStorage())
+ n2.addLearner(2, grp2)
+ nt := newNetwork(n1, n2)
+ nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
+
+ if !n2.isLearner {
+ t.Fatalf("failed to make n2 a learner")
+ }
+
+ if n2.state != StateFollower {
+ t.Fatalf("n2 campaigned despite being learner")
+ }
+
+ nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
+ if n1.state != StateLeader || n1.lead != 1 {
+ t.Fatalf("n1 did not become leader")
+ }
+
+ // NB: TransferLeader already checks that the recipient is not a learner, but
+ // the check could have happened by the time the recipient becomes a learner,
+ // in which case it will receive MsgTimeoutNow as in this test case and we
+ // verify that it's ignored.
+ nt.send(pb.Message{From: 1, To: 2, Type: pb.MsgTimeoutNow})
+
+ if n2.state != StateFollower {
+ t.Fatalf("n2 accepted leadership transfer despite being learner")
+ }
+}
+
+// simulate rolling update a cluster for Pre-Vote. cluster has 3 nodes [n1, n2, n3].
+// n1 is leader with term 2
+// n2 is follower with term 2
+// n3 is partitioned, with term 4 and less log, state is candidate
+func newPreVoteMigrationCluster(t *testing.T) *network {
+ n1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+ n2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+ n3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
+
+ n1.becomeFollower(1, None)
+ n2.becomeFollower(1, None)
+ n3.becomeFollower(1, None)
+
+ n1.preVote = true
+ n2.preVote = true
+ // We intentionally do not enable PreVote for n3, this is done so in order
+ // to simulate a rolling restart process where it's possible to have a mixed
+ // version cluster with replicas with PreVote enabled, and replicas without.
+
+ nt := newNetwork(n1, n2, n3)
+ nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
+
+ // Cause a network partition to isolate n3.
+ nt.isolate(3)
+ nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
+ nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
+ nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
+
+ // check state
+ // n1.state == StateLeader
+ // n2.state == StateFollower
+ // n3.state == StateCandidate
+ if n1.state != StateLeader {
+ t.Fatalf("node 1 state: %s, want %s", n1.state, StateLeader)
+ }
+ if n2.state != StateFollower {
+ t.Fatalf("node 2 state: %s, want %s", n2.state, StateFollower)
+ }
+ if n3.state != StateCandidate {
+ t.Fatalf("node 3 state: %s, want %s", n3.state, StateCandidate)
+ }
+
+ // check term
+ // n1.Term == 2
+ // n2.Term == 2
+ // n3.Term == 4
+ if n1.Term != 2 {
+ t.Fatalf("node 1 term: %d, want %d", n1.Term, 2)
+ }
+ if n2.Term != 2 {
+ t.Fatalf("node 2 term: %d, want %d", n2.Term, 2)
+ }
+ if n3.Term != 4 {
+ t.Fatalf("node 3 term: %d, want %d", n3.Term, 4)
+ }
+
+ // Enable prevote on n3, then recover the network
+ n3.preVote = true
+ nt.recover()
+
+ return nt
+}
+
+func TestPreVoteMigrationCanCompleteElection(t *testing.T) {
+ nt := newPreVoteMigrationCluster(t)
+
+ // n1 is leader with term 2
+ // n2 is follower with term 2
+ // n3 is pre-candidate with term 4, and less log
+ n2 := nt.peers[2].(*raft)
+ n3 := nt.peers[3].(*raft)
+
+ // simulate leader down
+ nt.isolate(1)
+
+ // Call for elections from both n2 and n3.
+ nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
+ nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
+
+ // check state
+ // n2.state == Follower
+ // n3.state == PreCandidate
+ if n2.state != StateFollower {
+ t.Errorf("node 2 state: %s, want %s", n2.state, StateFollower)
+ }
+ if n3.state != StatePreCandidate {
+ t.Errorf("node 3 state: %s, want %s", n3.state, StatePreCandidate)
+ }
+
+ nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
+ nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
+
+ // Do we have a leader?
+ if n2.state != StateLeader && n3.state != StateFollower {
+ t.Errorf("no leader")
+ }
+}
+
+func TestPreVoteMigrationWithFreeStuckPreCandidate(t *testing.T) {
+ nt := newPreVoteMigrationCluster(t)
+
+ // n1 is leader with term 2
+ // n2 is follower with term 2
+ // n3 is pre-candidate with term 4, and less log
+ n1 := nt.peers[1].(*raft)
+ n2 := nt.peers[2].(*raft)
+ n3 := nt.peers[3].(*raft)
+
+ nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
+
+ if n1.state != StateLeader {
+ t.Errorf("node 1 state: %s, want %s", n1.state, StateLeader)
+ }
+ if n2.state != StateFollower {
+ t.Errorf("node 2 state: %s, want %s", n2.state, StateFollower)
+ }
+ if n3.state != StatePreCandidate {
+ t.Errorf("node 3 state: %s, want %s", n3.state, StatePreCandidate)
+ }
+
+ // Pre-Vote again for safety
+ nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
+
+ if n1.state != StateLeader {
+ t.Errorf("node 1 state: %s, want %s", n1.state, StateLeader)
+ }
+ if n2.state != StateFollower {
+ t.Errorf("node 2 state: %s, want %s", n2.state, StateFollower)
+ }
+ if n3.state != StatePreCandidate {
+ t.Errorf("node 3 state: %s, want %s", n3.state, StatePreCandidate)
+ }
+
+ nt.send(pb.Message{From: 1, To: 3, Type: pb.MsgHeartbeat, Term: n1.Term})
+
+ // Disrupt the leader so that the stuck peer is freed
+ if n1.state != StateFollower {
+ t.Errorf("state = %s, want %s", n1.state, StateFollower)
+ }
+ if n3.Term != n1.Term {
+ t.Errorf("term = %d, want %d", n3.Term, n1.Term)
+ }
+}
+
+func testConfChangeCheckBeforeCampaign(t *testing.T, v2 bool) {
+ nt := newNetwork(nil, nil, nil)
+ n1 := nt.peers[1].(*raft)
+ n2 := nt.peers[2].(*raft)
+ nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
+ if n1.state != StateLeader {
+ t.Errorf("node 1 state: %s, want %s", n1.state, StateLeader)
+ }
+
+ // Begin to remove the third node.
+ cc := pb.ConfChange{
+ Type: pb.ConfChangeRemoveNode,
+ ReplicaID: 2,
+ }
+ var ccData []byte
+ var err error
+ var ty pb.EntryType
+ if v2 {
+ // TODO: v2 config
+ //ccv2 := cc.AsV2()
+ //ccData, err = ccv2.Marshal()
+ //ty = pb.EntryConfChangeV2
+ ccData, err = cc.Marshal()
+ ty = pb.EntryConfChange
+ } else {
+ ccData, err = cc.Marshal()
+ ty = pb.EntryConfChange
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ nt.send(pb.Message{
+ From: 1,
+ To: 1,
+ Type: pb.MsgProp,
+ Entries: []pb.Entry{
+ {Type: ty, Data: ccData},
+ },
+ })
+
+ // Trigger campaign in node 2
+ for i := 0; i < n2.randomizedElectionTimeout; i++ {
+ n2.tick()
+ }
+ // It's still follower because committed conf change is not applied.
+ if n2.state != StateFollower {
+ t.Errorf("node 2 state: %s, want %s", n2.state, StateFollower)
+ }
+
+ // Transfer leadership to peer 2.
+ nt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})
+ if n1.state != StateLeader {
+ t.Errorf("node 1 state: %s, want %s", n1.state, StateLeader)
+ }
+ // It's still follower because committed conf change is not applied.
+ if n2.state != StateFollower {
+ t.Errorf("node 2 state: %s, want %s", n2.state, StateFollower)
+ }
+ // Abort transfer leader
+ for i := 0; i < n1.electionTimeout; i++ {
+ n1.tick()
+ }
+
+ // Advance apply
+ nextEnts(n2, nt.storage[2])
+
+ // Transfer leadership to peer 2 again.
+ nt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})
+ if n1.state != StateFollower {
+ t.Errorf("node 1 state: %s, want %s", n1.state, StateFollower)
+ }
+ if n2.state != StateLeader {
+ t.Errorf("node 2 state: %s, want %s", n2.state, StateLeader)
+ }
+
+ nextEnts(n1, nt.storage[1])
+ // Trigger campaign in node 2
+ for i := 0; i < n1.randomizedElectionTimeout; i++ {
+ n1.tick()
+ }
+ if n1.state != StateCandidate {
+ t.Errorf("node 1 state: %s, want %s", n1.state, StateCandidate)
+ }
+}
+
+// Tests if unapplied ConfChange is checked before campaign.
+func TestConfChangeCheckBeforeCampaign(t *testing.T) {
+ testConfChangeCheckBeforeCampaign(t, false)
+}
+
+// Tests if unapplied ConfChangeV2 is checked before campaign.
+func TestConfChangeV2CheckBeforeCampaign(t *testing.T) {
+ testConfChangeCheckBeforeCampaign(t, true)
+}
+
func entsWithConfig(configFunc func(*Config), terms ...uint64) *raft {
storage := NewMemoryStorage()
for i, term := range terms {
@@ -3649,9 +4358,12 @@ func votedWithConfig(configFunc func(*Config), vote, term uint64) *raft {
type network struct {
peers map[uint64]stateMachine
- storage map[uint64]*MemoryStorage
+ storage map[uint64]IExtRaftStorage
dropm map[connem]float64
ignorem map[pb.MessageType]bool
+ // msgHook is called for each message sent. It may inspect the
+ // message and return true to send it or false to drop it.
+ msgHook func(pb.Message) bool
}
// newNetwork initializes a network from peers.
@@ -3670,7 +4382,7 @@ func newNetworkWithConfig(configFunc func(*Config), peers ...stateMachine) *netw
peerGroups := grpsByIds(peerAddrs)
npeers := make(map[uint64]stateMachine, size)
- nstorage := make(map[uint64]*MemoryStorage, size)
+ nstorage := make(map[uint64]IExtRaftStorage, size)
for j, p := range peers {
id := peerAddrs[j]
@@ -3718,6 +4430,15 @@ func preVoteConfig(c *Config) {
c.PreVote = true
}
+func (nw *network) closeAll() {
+ for _, peer := range nw.peers {
+ pr, ok := peer.(*raft)
+ if ok {
+ closeAndFreeRaft(pr)
+ }
+ }
+}
+
func (nw *network) send(msgs ...pb.Message) error {
for len(msgs) > 0 {
m := msgs[0]
@@ -3785,6 +4506,11 @@ func (nw *network) filter(msgs []pb.Message) []pb.Message {
continue
}
}
+ if nw.msgHook != nil {
+ if !nw.msgHook(m) {
+ continue
+ }
+ }
mm = append(mm, m)
}
return mm
@@ -3869,3 +4595,14 @@ func newTestLearnerRaft(id uint64, peers []uint64, learners []uint64, election,
cfg.learners = peerGrps
return newRaft(cfg)
}
+
+// newTestRawNode sets up a RawNode with the given peers. The configuration will
+// not be reflected in the Storage.
+func newTestRawNode(id uint64, peers []uint64, election, heartbeat int, storage Storage) *RawNode {
+ cfg := newTestConfig(id, peers, election, heartbeat, storage)
+ rn, err := NewRawNode(cfg)
+ if err != nil {
+ panic(err)
+ }
+ return rn
+}
diff --git a/raft/raftpb/raft.pb.go b/raft/raftpb/raft.pb.go
index f8b09265..9579d2af 100644
--- a/raft/raftpb/raft.pb.go
+++ b/raft/raftpb/raft.pb.go
@@ -1,30 +1,15 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: raft.proto
-/*
- Package raftpb is a generated protocol buffer package.
-
- It is generated from these files:
- raft.proto
-
- It has these top-level messages:
- Entry
- SnapshotMetadata
- Snapshot
- Group
- Message
- HardState
- ConfState
- ConfChange
-*/
package raftpb
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
-
-import io "io"
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@@ -35,7 +20,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type EntryType int32
@@ -48,6 +33,7 @@ var EntryType_name = map[int32]string{
0: "EntryNormal",
1: "EntryConfChange",
}
+
var EntryType_value = map[string]int32{
"EntryNormal": 0,
"EntryConfChange": 1,
@@ -58,9 +44,11 @@ func (x EntryType) Enum() *EntryType {
*p = x
return p
}
+
func (x EntryType) String() string {
return proto.EnumName(EntryType_name, int32(x))
}
+
func (x *EntryType) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType")
if err != nil {
@@ -69,7 +57,10 @@ func (x *EntryType) UnmarshalJSON(data []byte) error {
*x = EntryType(value)
return nil
}
-func (EntryType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} }
+
+func (EntryType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_b042552c306ae59b, []int{0}
+}
type MessageType int32
@@ -116,6 +107,7 @@ var MessageType_name = map[int32]string{
17: "MsgPreVote",
18: "MsgPreVoteResp",
}
+
var MessageType_value = map[string]int32{
"MsgHup": 0,
"MsgBeat": 1,
@@ -143,9 +135,11 @@ func (x MessageType) Enum() *MessageType {
*p = x
return p
}
+
func (x MessageType) String() string {
return proto.EnumName(MessageType_name, int32(x))
}
+
func (x *MessageType) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType")
if err != nil {
@@ -154,7 +148,10 @@ func (x *MessageType) UnmarshalJSON(data []byte) error {
*x = MessageType(value)
return nil
}
-func (MessageType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} }
+
+func (MessageType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_b042552c306ae59b, []int{1}
+}
type ConfChangeType int32
@@ -171,6 +168,7 @@ var ConfChangeType_name = map[int32]string{
2: "ConfChangeUpdateNode",
3: "ConfChangeAddLearnerNode",
}
+
var ConfChangeType_value = map[string]int32{
"ConfChangeAddNode": 0,
"ConfChangeRemoveNode": 1,
@@ -183,9 +181,11 @@ func (x ConfChangeType) Enum() *ConfChangeType {
*p = x
return p
}
+
func (x ConfChangeType) String() string {
return proto.EnumName(ConfChangeType_name, int32(x))
}
+
func (x *ConfChangeType) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType")
if err != nil {
@@ -194,43 +194,130 @@ func (x *ConfChangeType) UnmarshalJSON(data []byte) error {
*x = ConfChangeType(value)
return nil
}
-func (ConfChangeType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} }
+
+func (ConfChangeType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_b042552c306ae59b, []int{2}
+}
type Entry struct {
- Term uint64 `protobuf:"varint,2,opt,name=Term" json:"Term"`
- Index uint64 `protobuf:"varint,3,opt,name=Index" json:"Index"`
- Type EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"`
- Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Term uint64 `protobuf:"varint,2,opt,name=Term" json:"Term"`
+ Index uint64 `protobuf:"varint,3,opt,name=Index" json:"Index"`
+ Type EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"`
+ Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data"`
+ ID uint64 `protobuf:"varint,5,opt,name=ID" json:"ID"`
+ DataType int32 `protobuf:"varint,6,opt,name=data_type,json=dataType" json:"data_type"`
+ Timestamp int64 `protobuf:"varint,7,opt,name=timestamp" json:"timestamp"`
}
-func (m *Entry) Reset() { *m = Entry{} }
-func (m *Entry) String() string { return proto.CompactTextString(m) }
-func (*Entry) ProtoMessage() {}
-func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} }
+func (m *Entry) Reset() { *m = Entry{} }
+func (m *Entry) String() string { return proto.CompactTextString(m) }
+func (*Entry) ProtoMessage() {}
+func (*Entry) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b042552c306ae59b, []int{0}
+}
+func (m *Entry) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Entry.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Entry) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Entry.Merge(m, src)
+}
+func (m *Entry) XXX_Size() int {
+ return m.Size()
+}
+func (m *Entry) XXX_DiscardUnknown() {
+ xxx_messageInfo_Entry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Entry proto.InternalMessageInfo
type SnapshotMetadata struct {
- ConfState ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state"`
- Index uint64 `protobuf:"varint,2,opt,name=index" json:"index"`
- Term uint64 `protobuf:"varint,3,opt,name=term" json:"term"`
- XXX_unrecognized []byte `json:"-"`
+ ConfState ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state"`
+ Index uint64 `protobuf:"varint,2,opt,name=index" json:"index"`
+ Term uint64 `protobuf:"varint,3,opt,name=term" json:"term"`
}
-func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} }
-func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) }
-func (*SnapshotMetadata) ProtoMessage() {}
-func (*SnapshotMetadata) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} }
+func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} }
+func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) }
+func (*SnapshotMetadata) ProtoMessage() {}
+func (*SnapshotMetadata) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b042552c306ae59b, []int{1}
+}
+func (m *SnapshotMetadata) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SnapshotMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_SnapshotMetadata.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *SnapshotMetadata) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SnapshotMetadata.Merge(m, src)
+}
+func (m *SnapshotMetadata) XXX_Size() int {
+ return m.Size()
+}
+func (m *SnapshotMetadata) XXX_DiscardUnknown() {
+ xxx_messageInfo_SnapshotMetadata.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SnapshotMetadata proto.InternalMessageInfo
type Snapshot struct {
- Data []byte `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"`
- Metadata SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"`
- XXX_unrecognized []byte `json:"-"`
+ Data []byte `protobuf:"bytes,1,opt,name=data" json:"data"`
+ Metadata SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"`
+}
+
+func (m *Snapshot) Reset() { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage() {}
+func (*Snapshot) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b042552c306ae59b, []int{2}
+}
+func (m *Snapshot) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Snapshot) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Snapshot.Merge(m, src)
+}
+func (m *Snapshot) XXX_Size() int {
+ return m.Size()
+}
+func (m *Snapshot) XXX_DiscardUnknown() {
+ xxx_messageInfo_Snapshot.DiscardUnknown(m)
}
-func (m *Snapshot) Reset() { *m = Snapshot{} }
-func (m *Snapshot) String() string { return proto.CompactTextString(m) }
-func (*Snapshot) ProtoMessage() {}
-func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} }
+var xxx_messageInfo_Snapshot proto.InternalMessageInfo
type Group struct {
NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId" json:"node_id"`
@@ -238,78 +325,216 @@ type Group struct {
// group id is the namespace id, which is unchanged in the same namespace
GroupId uint64 `protobuf:"varint,3,opt,name=group_id,json=groupId" json:"group_id"`
// replica id is the specific partition id, always increased if re-add the removed partition
- RaftReplicaId uint64 `protobuf:"varint,4,opt,name=raft_replica_id,json=raftReplicaId" json:"raft_replica_id"`
- XXX_unrecognized []byte `json:"-"`
+ RaftReplicaId uint64 `protobuf:"varint,4,opt,name=raft_replica_id,json=raftReplicaId" json:"raft_replica_id"`
+}
+
+func (m *Group) Reset() { *m = Group{} }
+func (m *Group) String() string { return proto.CompactTextString(m) }
+func (*Group) ProtoMessage() {}
+func (*Group) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b042552c306ae59b, []int{3}
+}
+func (m *Group) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Group.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Group) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Group.Merge(m, src)
+}
+func (m *Group) XXX_Size() int {
+ return m.Size()
+}
+func (m *Group) XXX_DiscardUnknown() {
+ xxx_messageInfo_Group.DiscardUnknown(m)
}
-func (m *Group) Reset() { *m = Group{} }
-func (m *Group) String() string { return proto.CompactTextString(m) }
-func (*Group) ProtoMessage() {}
-func (*Group) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{3} }
+var xxx_messageInfo_Group proto.InternalMessageInfo
type Message struct {
- Type MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"`
- To uint64 `protobuf:"varint,2,opt,name=to" json:"to"`
- From uint64 `protobuf:"varint,3,opt,name=from" json:"from"`
- Term uint64 `protobuf:"varint,4,opt,name=term" json:"term"`
- LogTerm uint64 `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"`
- Index uint64 `protobuf:"varint,6,opt,name=index" json:"index"`
- Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"`
- Commit uint64 `protobuf:"varint,8,opt,name=commit" json:"commit"`
- Snapshot Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"`
- Reject bool `protobuf:"varint,10,opt,name=reject" json:"reject"`
- RejectHint uint64 `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"`
- Context []byte `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"`
- FromGroup Group `protobuf:"bytes,13,opt,name=from_group,json=fromGroup" json:"from_group"`
- ToGroup Group `protobuf:"bytes,14,opt,name=to_group,json=toGroup" json:"to_group"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Message) Reset() { *m = Message{} }
-func (m *Message) String() string { return proto.CompactTextString(m) }
-func (*Message) ProtoMessage() {}
-func (*Message) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{4} }
+ Type MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"`
+ To uint64 `protobuf:"varint,2,opt,name=to" json:"to"`
+ From uint64 `protobuf:"varint,3,opt,name=from" json:"from"`
+ Term uint64 `protobuf:"varint,4,opt,name=term" json:"term"`
+ LogTerm uint64 `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"`
+ Index uint64 `protobuf:"varint,6,opt,name=index" json:"index"`
+ Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"`
+ Commit uint64 `protobuf:"varint,8,opt,name=commit" json:"commit"`
+ Snapshot Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"`
+ Reject bool `protobuf:"varint,10,opt,name=reject" json:"reject"`
+ RejectHint uint64 `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"`
+ Context []byte `protobuf:"bytes,12,opt,name=context" json:"context"`
+ FromGroup Group `protobuf:"bytes,13,opt,name=from_group,json=fromGroup" json:"from_group"`
+ ToGroup Group `protobuf:"bytes,14,opt,name=to_group,json=toGroup" json:"to_group"`
+}
+
+func (m *Message) Reset() { *m = Message{} }
+func (m *Message) String() string { return proto.CompactTextString(m) }
+func (*Message) ProtoMessage() {}
+func (*Message) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b042552c306ae59b, []int{4}
+}
+func (m *Message) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Message.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Message) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Message.Merge(m, src)
+}
+func (m *Message) XXX_Size() int {
+ return m.Size()
+}
+func (m *Message) XXX_DiscardUnknown() {
+ xxx_messageInfo_Message.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Message proto.InternalMessageInfo
type HardState struct {
- Term uint64 `protobuf:"varint,1,opt,name=term" json:"term"`
- Vote uint64 `protobuf:"varint,2,opt,name=vote" json:"vote"`
- Commit uint64 `protobuf:"varint,3,opt,name=commit" json:"commit"`
- XXX_unrecognized []byte `json:"-"`
+ Term uint64 `protobuf:"varint,1,opt,name=term" json:"term"`
+ Vote uint64 `protobuf:"varint,2,opt,name=vote" json:"vote"`
+ Commit uint64 `protobuf:"varint,3,opt,name=commit" json:"commit"`
+}
+
+func (m *HardState) Reset() { *m = HardState{} }
+func (m *HardState) String() string { return proto.CompactTextString(m) }
+func (*HardState) ProtoMessage() {}
+func (*HardState) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b042552c306ae59b, []int{5}
+}
+func (m *HardState) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *HardState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_HardState.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *HardState) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HardState.Merge(m, src)
+}
+func (m *HardState) XXX_Size() int {
+ return m.Size()
+}
+func (m *HardState) XXX_DiscardUnknown() {
+ xxx_messageInfo_HardState.DiscardUnknown(m)
}
-func (m *HardState) Reset() { *m = HardState{} }
-func (m *HardState) String() string { return proto.CompactTextString(m) }
-func (*HardState) ProtoMessage() {}
-func (*HardState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{5} }
+var xxx_messageInfo_HardState proto.InternalMessageInfo
type ConfState struct {
- Nodes []uint64 `protobuf:"varint,1,rep,name=nodes" json:"nodes,omitempty"`
- Groups []*Group `protobuf:"bytes,2,rep,name=groups" json:"groups,omitempty"`
- Learners []uint64 `protobuf:"varint,3,rep,name=learners" json:"learners,omitempty"`
- LearnerGroups []*Group `protobuf:"bytes,4,rep,name=learnerGroups" json:"learnerGroups,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Nodes []uint64 `protobuf:"varint,1,rep,name=nodes" json:"nodes,omitempty"`
+ Groups []*Group `protobuf:"bytes,2,rep,name=groups" json:"groups,omitempty"`
+ Learners []uint64 `protobuf:"varint,3,rep,name=learners" json:"learners,omitempty"`
+ LearnerGroups []*Group `protobuf:"bytes,4,rep,name=learnerGroups" json:"learnerGroups,omitempty"`
+}
+
+func (m *ConfState) Reset() { *m = ConfState{} }
+func (m *ConfState) String() string { return proto.CompactTextString(m) }
+func (*ConfState) ProtoMessage() {}
+func (*ConfState) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b042552c306ae59b, []int{6}
+}
+func (m *ConfState) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ConfState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ConfState.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ConfState) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConfState.Merge(m, src)
+}
+func (m *ConfState) XXX_Size() int {
+ return m.Size()
+}
+func (m *ConfState) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConfState.DiscardUnknown(m)
}
-func (m *ConfState) Reset() { *m = ConfState{} }
-func (m *ConfState) String() string { return proto.CompactTextString(m) }
-func (*ConfState) ProtoMessage() {}
-func (*ConfState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{6} }
+var xxx_messageInfo_ConfState proto.InternalMessageInfo
type ConfChange struct {
- ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"`
- Type ConfChangeType `protobuf:"varint,2,opt,name=Type,enum=raftpb.ConfChangeType" json:"Type"`
- ReplicaID uint64 `protobuf:"varint,3,opt,name=ReplicaID" json:"ReplicaID"`
- NodeGroup Group `protobuf:"bytes,4,opt,name=node_group,json=nodeGroup" json:"node_group"`
- Context []byte `protobuf:"bytes,5,opt,name=Context" json:"Context,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"`
+ Type ConfChangeType `protobuf:"varint,2,opt,name=Type,enum=raftpb.ConfChangeType" json:"Type"`
+ ReplicaID uint64 `protobuf:"varint,3,opt,name=ReplicaID" json:"ReplicaID"`
+ NodeGroup Group `protobuf:"bytes,4,opt,name=node_group,json=nodeGroup" json:"node_group"`
+ Context []byte `protobuf:"bytes,5,opt,name=Context" json:"Context"`
+}
+
+func (m *ConfChange) Reset() { *m = ConfChange{} }
+func (m *ConfChange) String() string { return proto.CompactTextString(m) }
+func (*ConfChange) ProtoMessage() {}
+func (*ConfChange) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b042552c306ae59b, []int{7}
+}
+func (m *ConfChange) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ConfChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ConfChange.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ConfChange) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConfChange.Merge(m, src)
+}
+func (m *ConfChange) XXX_Size() int {
+ return m.Size()
+}
+func (m *ConfChange) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConfChange.DiscardUnknown(m)
}
-func (m *ConfChange) Reset() { *m = ConfChange{} }
-func (m *ConfChange) String() string { return proto.CompactTextString(m) }
-func (*ConfChange) ProtoMessage() {}
-func (*ConfChange) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{7} }
+var xxx_messageInfo_ConfChange proto.InternalMessageInfo
func init() {
+ proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value)
+ proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value)
+ proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value)
proto.RegisterType((*Entry)(nil), "raftpb.Entry")
proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata")
proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot")
@@ -318,10 +543,78 @@ func init() {
proto.RegisterType((*HardState)(nil), "raftpb.HardState")
proto.RegisterType((*ConfState)(nil), "raftpb.ConfState")
proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange")
- proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value)
- proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value)
- proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value)
}
+
+func init() { proto.RegisterFile("raft.proto", fileDescriptor_b042552c306ae59b) }
+
+var fileDescriptor_b042552c306ae59b = []byte{
+ // 1015 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x55, 0x41, 0x6f, 0x23, 0x35,
+ 0x14, 0x8e, 0x93, 0x49, 0x26, 0x79, 0x69, 0x52, 0xd7, 0x5b, 0x90, 0x55, 0x2d, 0xb3, 0x61, 0x04,
+ 0x52, 0x54, 0xd8, 0x02, 0x45, 0xe2, 0xc0, 0x6d, 0xdb, 0xa2, 0x6d, 0xa4, 0x4d, 0xb5, 0x64, 0xbb,
+ 0xdc, 0x50, 0x71, 0x33, 0xee, 0x34, 0xd0, 0x19, 0x8f, 0x3c, 0xce, 0xb2, 0xbd, 0x71, 0xe3, 0x08,
+ 0x57, 0xfe, 0x51, 0x0f, 0x1c, 0x2a, 0x71, 0xe1, 0x84, 0x68, 0xfb, 0x47, 0xd0, 0xf3, 0x78, 0x92,
+ 0x49, 0x0b, 0xdc, 0xc6, 0xdf, 0xf7, 0xfc, 0xde, 0xe7, 0xef, 0x3d, 0x7b, 0x00, 0xb4, 0x38, 0x33,
+ 0x3b, 0x99, 0x56, 0x46, 0xb1, 0x16, 0x7e, 0x67, 0xa7, 0x5b, 0x9b, 0xb1, 0x8a, 0x95, 0x85, 0x3e,
+ 0xc1, 0xaf, 0x82, 0x0d, 0x6f, 0x08, 0x34, 0xbf, 0x4a, 0x8d, 0xbe, 0x64, 0x1c, 0xbc, 0x63, 0xa9,
+ 0x13, 0x5e, 0x1f, 0x90, 0xa1, 0xb7, 0xe7, 0x5d, 0xfd, 0xf5, 0xa4, 0x36, 0xb1, 0x08, 0xdb, 0x82,
+ 0xe6, 0x28, 0x8d, 0xe4, 0x5b, 0xde, 0xa8, 0x50, 0x05, 0xc4, 0x3e, 0x02, 0xef, 0xf8, 0x32, 0x93,
+ 0x9c, 0x0c, 0xc8, 0xb0, 0xbf, 0xbb, 0xb1, 0x53, 0x14, 0xdb, 0xb1, 0x29, 0x91, 0x58, 0x24, 0xba,
+ 0xcc, 0x24, 0x96, 0x38, 0x10, 0x46, 0x70, 0x6f, 0x40, 0x86, 0x6b, 0x25, 0x83, 0x08, 0xdb, 0x84,
+ 0xfa, 0xe8, 0x80, 0x37, 0x2b, 0xf9, 0xeb, 0xa3, 0x03, 0xf6, 0x3e, 0x74, 0x22, 0x61, 0xc4, 0x89,
+ 0xc1, 0x0a, 0xad, 0x01, 0x19, 0x36, 0x1d, 0xd9, 0x46, 0xd8, 0xa6, 0x0c, 0xa1, 0x63, 0x66, 0x89,
+ 0xcc, 0x8d, 0x48, 0x32, 0xee, 0x0f, 0xc8, 0xb0, 0xe1, 0x42, 0x96, 0x70, 0xf8, 0x13, 0x01, 0xfa,
+ 0x2a, 0x15, 0x59, 0x7e, 0xae, 0xcc, 0x58, 0x1a, 0x81, 0x9b, 0xd9, 0x17, 0x00, 0x53, 0x95, 0x9e,
+ 0x9d, 0xe4, 0x46, 0x98, 0x42, 0x7e, 0x77, 0x29, 0x7f, 0x5f, 0xa5, 0x67, 0xaf, 0x90, 0x28, 0x93,
+ 0x4d, 0x4b, 0x00, 0xcd, 0x98, 0x59, 0x33, 0xaa, 0x3e, 0x15, 0x10, 0x9e, 0xcf, 0xa0, 0x85, 0x55,
+ 0x9f, 0x2c, 0x12, 0x7e, 0x07, 0xed, 0x52, 0x01, 0x46, 0xa1, 0x02, 0x5b, 0x73, 0xe1, 0x82, 0xd5,
+ 0xf4, 0x25, 0xb4, 0x13, 0xa7, 0xcf, 0xa6, 0xef, 0xee, 0xf2, 0x52, 0xd1, 0x7d, 0xfd, 0xa5, 0x11,
+ 0x65, 0x7c, 0xf8, 0x0b, 0x81, 0xe6, 0x73, 0xad, 0xe6, 0x19, 0x7b, 0x0f, 0xfc, 0x54, 0x45, 0xf2,
+ 0x64, 0x16, 0xd9, 0x12, 0xa5, 0x90, 0x16, 0x82, 0xa3, 0x08, 0xcb, 0xa7, 0x22, 0x91, 0xb6, 0x40,
+ 0xa7, 0x2c, 0x8f, 0x08, 0x7b, 0x02, 0xed, 0x18, 0x33, 0xe0, 0xce, 0xea, 0x11, 0x7c, 0x8b, 0x8e,
+ 0x22, 0xf6, 0x31, 0xac, 0xa3, 0x9c, 0x13, 0x2d, 0xb3, 0x8b, 0xd9, 0x54, 0x60, 0x9c, 0x57, 0x89,
+ 0xeb, 0x21, 0x39, 0x29, 0xb8, 0x51, 0x14, 0xfe, 0xec, 0x81, 0x3f, 0x96, 0x79, 0x2e, 0x62, 0xc9,
+ 0x9e, 0x82, 0x67, 0x96, 0x63, 0xf2, 0xa8, 0x3c, 0x95, 0xa3, 0xab, 0x83, 0x82, 0x61, 0x38, 0x0e,
+ 0x46, 0xad, 0x38, 0x5c, 0x37, 0x0a, 0x95, 0x9f, 0x69, 0x75, 0xcf, 0x5e, 0x44, 0x16, 0xc6, 0x7b,
+ 0xf7, 0x8d, 0x67, 0x01, 0xf8, 0x17, 0x2a, 0xb6, 0x83, 0x5d, 0x9d, 0xae, 0x12, 0x5c, 0xb6, 0xb3,
+ 0xf5, 0xb0, 0x9d, 0x4f, 0xc1, 0x97, 0xa9, 0xd1, 0x33, 0x99, 0x73, 0x7f, 0xd0, 0x18, 0x76, 0x77,
+ 0x7b, 0x2b, 0xe3, 0x5d, 0xa6, 0x72, 0x31, 0xec, 0x31, 0xb4, 0xa6, 0x2a, 0x49, 0x66, 0x86, 0xb7,
+ 0xab, 0xb6, 0x17, 0x18, 0xdb, 0x85, 0x76, 0xee, 0x7a, 0xc8, 0x3b, 0xb6, 0xb7, 0xf4, 0x7e, 0x6f,
+ 0xcb, 0x9e, 0x96, 0x71, 0x98, 0x51, 0xcb, 0xef, 0xe5, 0xd4, 0x70, 0x18, 0x90, 0x61, 0xbb, 0xcc,
+ 0x58, 0x60, 0xec, 0x03, 0x80, 0xe2, 0xeb, 0x70, 0x96, 0x1a, 0xde, 0xad, 0xd4, 0xac, 0xe0, 0x68,
+ 0xc0, 0x54, 0xa5, 0x46, 0xbe, 0x35, 0x7c, 0xad, 0x32, 0x70, 0x25, 0xc8, 0x76, 0x01, 0xd0, 0xc2,
+ 0x13, 0xdb, 0x63, 0xde, 0xb3, 0xca, 0x16, 0xe7, 0xb4, 0x03, 0x55, 0xde, 0x01, 0x0c, 0x2b, 0x26,
+ 0x6c, 0x07, 0xda, 0x46, 0xb9, 0x1d, 0xfd, 0xff, 0xde, 0xe1, 0x1b, 0x65, 0x97, 0xe1, 0xb7, 0xd0,
+ 0x39, 0x14, 0x3a, 0x2a, 0x2e, 0x50, 0xd9, 0x2b, 0xf2, 0xa0, 0x57, 0x1c, 0xbc, 0x37, 0xca, 0xc8,
+ 0xd5, 0x17, 0x08, 0x91, 0x8a, 0xb5, 0x8d, 0x87, 0xd6, 0x86, 0xbf, 0x11, 0xe8, 0x2c, 0x6e, 0x2c,
+ 0xdb, 0x84, 0x26, 0x4e, 0x7a, 0xce, 0xc9, 0xa0, 0x31, 0xf4, 0x26, 0xc5, 0x82, 0x7d, 0x08, 0x2d,
+ 0xab, 0x37, 0xe7, 0xf5, 0xd5, 0x56, 0x5a, 0x85, 0x13, 0x47, 0xb2, 0x2d, 0x68, 0x5f, 0x48, 0xa1,
+ 0x53, 0xa9, 0x73, 0xde, 0xb0, 0xfb, 0x17, 0x6b, 0xf6, 0x39, 0xf4, 0xdc, 0xf7, 0xf3, 0x22, 0x93,
+ 0xf7, 0x6f, 0x99, 0x56, 0x63, 0xc2, 0xdf, 0x09, 0x00, 0x6a, 0xdb, 0x3f, 0x17, 0x69, 0x2c, 0xdd,
+ 0x3b, 0x47, 0xee, 0xbd, 0x73, 0x9f, 0xba, 0x47, 0xb4, 0x6e, 0x6f, 0xc7, 0xbb, 0xd5, 0x57, 0xa8,
+ 0xd8, 0xf7, 0xe0, 0x25, 0x0d, 0xa1, 0x53, 0x5e, 0xb4, 0x83, 0x15, 0x4f, 0x96, 0x30, 0x76, 0xd6,
+ 0xbe, 0x03, 0x45, 0x9f, 0xbc, 0xff, 0xe9, 0x2c, 0x86, 0x15, 0x9d, 0x0d, 0xc0, 0xdf, 0x77, 0xd3,
+ 0xd2, 0xac, 0x4e, 0x8b, 0x03, 0xb7, 0x3f, 0x83, 0xce, 0xe2, 0x69, 0x67, 0xeb, 0xd0, 0xb5, 0x8b,
+ 0x23, 0xa5, 0x13, 0x71, 0x41, 0x6b, 0xec, 0x11, 0xac, 0x5b, 0x60, 0x29, 0x9c, 0x92, 0xed, 0x3f,
+ 0xea, 0xd0, 0xad, 0xdc, 0x73, 0x06, 0xd0, 0x1a, 0xe7, 0xf1, 0xe1, 0x3c, 0xa3, 0x35, 0xd6, 0x05,
+ 0x7f, 0x9c, 0xc7, 0x7b, 0x52, 0x18, 0x4a, 0xdc, 0xe2, 0xa5, 0x56, 0x19, 0xad, 0xbb, 0xa8, 0x67,
+ 0x59, 0x46, 0x1b, 0xac, 0x0f, 0x50, 0x7c, 0x4f, 0x64, 0x9e, 0x51, 0xcf, 0x05, 0x7e, 0xa3, 0x8c,
+ 0xa4, 0x4d, 0x14, 0xe1, 0x16, 0x96, 0x6d, 0x39, 0x16, 0xef, 0x14, 0xf5, 0x19, 0x85, 0x35, 0x2c,
+ 0x26, 0x85, 0x36, 0xa7, 0x58, 0xa5, 0xcd, 0x36, 0x81, 0x56, 0x11, 0xbb, 0xa9, 0xc3, 0x18, 0xf4,
+ 0xc7, 0x79, 0xfc, 0x3a, 0xd5, 0x52, 0x4c, 0xcf, 0xc5, 0xe9, 0x85, 0xa4, 0xc0, 0x36, 0xa0, 0xe7,
+ 0x12, 0xe1, 0x60, 0xcd, 0x73, 0xda, 0x75, 0x61, 0xfb, 0xe7, 0x72, 0xfa, 0xc3, 0xd7, 0x73, 0xa5,
+ 0xe7, 0x09, 0x5d, 0x63, 0xef, 0xc0, 0xc6, 0x38, 0x8f, 0x8f, 0xb5, 0x48, 0xf3, 0x33, 0xa9, 0x5f,
+ 0x48, 0x11, 0x49, 0x4d, 0x7b, 0x6e, 0xf7, 0xf1, 0x2c, 0x91, 0x6a, 0x6e, 0x8e, 0xd4, 0x8f, 0xb4,
+ 0xef, 0xc4, 0x4c, 0xa4, 0x88, 0xec, 0xbf, 0x93, 0xae, 0x3b, 0x31, 0x0b, 0xc4, 0x8a, 0xa1, 0xee,
+ 0xbc, 0x2f, 0xb5, 0xb4, 0x47, 0xdc, 0x70, 0x55, 0xdd, 0xda, 0xc6, 0xb0, 0xed, 0x4b, 0xe8, 0xaf,
+ 0x8e, 0x07, 0xea, 0x58, 0x22, 0xcf, 0xa2, 0xe8, 0x48, 0x45, 0x92, 0xd6, 0x18, 0x87, 0xcd, 0x25,
+ 0x3c, 0x91, 0x89, 0x7a, 0x23, 0x2d, 0x43, 0x56, 0x99, 0xd7, 0x59, 0x24, 0x4c, 0xc1, 0xd4, 0xd9,
+ 0x63, 0xe0, 0x2b, 0xa9, 0x5e, 0x14, 0x23, 0x6d, 0xd9, 0xc6, 0xde, 0xe0, 0xea, 0x26, 0xa8, 0x5d,
+ 0xdf, 0x04, 0xb5, 0xab, 0xdb, 0x80, 0x5c, 0xdf, 0x06, 0xe4, 0xef, 0xdb, 0x80, 0xfc, 0x7a, 0x17,
+ 0xd4, 0xae, 0xef, 0x82, 0xda, 0x9f, 0x77, 0x41, 0xed, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x96,
+ 0x62, 0xe7, 0x50, 0x7f, 0x08, 0x00, 0x00,
+}
+
func (m *Entry) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -352,9 +645,15 @@ func (m *Entry) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintRaft(dAtA, i, uint64(len(m.Data)))
i += copy(dAtA[i:], m.Data)
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
+ dAtA[i] = 0x28
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.ID))
+ dAtA[i] = 0x30
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.DataType))
+ dAtA[i] = 0x38
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.Timestamp))
return i, nil
}
@@ -387,9 +686,6 @@ func (m *SnapshotMetadata) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x18
i++
i = encodeVarintRaft(dAtA, i, uint64(m.Term))
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
return i, nil
}
@@ -422,9 +718,6 @@ func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
return 0, err
}
i += n2
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
return i, nil
}
@@ -456,9 +749,6 @@ func (m *Group) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x20
i++
i = encodeVarintRaft(dAtA, i, uint64(m.RaftReplicaId))
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
return i, nil
}
@@ -551,9 +841,6 @@ func (m *Message) MarshalTo(dAtA []byte) (int, error) {
return 0, err
}
i += n5
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
return i, nil
}
@@ -581,9 +868,6 @@ func (m *HardState) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x18
i++
i = encodeVarintRaft(dAtA, i, uint64(m.Commit))
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
return i, nil
}
@@ -640,9 +924,6 @@ func (m *ConfState) MarshalTo(dAtA []byte) (int, error) {
i += n
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
return i, nil
}
@@ -684,9 +965,6 @@ func (m *ConfChange) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintRaft(dAtA, i, uint64(len(m.Context)))
i += copy(dAtA[i:], m.Context)
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
return i, nil
}
@@ -700,6 +978,9 @@ func encodeVarintRaft(dAtA []byte, offset int, v uint64) int {
return offset + 1
}
func (m *Entry) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
n += 1 + sovRaft(uint64(m.Type))
@@ -709,26 +990,29 @@ func (m *Entry) Size() (n int) {
l = len(m.Data)
n += 1 + l + sovRaft(uint64(l))
}
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
+ n += 1 + sovRaft(uint64(m.ID))
+ n += 1 + sovRaft(uint64(m.DataType))
+ n += 1 + sovRaft(uint64(m.Timestamp))
return n
}
func (m *SnapshotMetadata) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
l = m.ConfState.Size()
n += 1 + l + sovRaft(uint64(l))
n += 1 + sovRaft(uint64(m.Index))
n += 1 + sovRaft(uint64(m.Term))
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
return n
}
func (m *Snapshot) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
if m.Data != nil {
@@ -737,13 +1021,13 @@ func (m *Snapshot) Size() (n int) {
}
l = m.Metadata.Size()
n += 1 + l + sovRaft(uint64(l))
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
return n
}
func (m *Group) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
n += 1 + sovRaft(uint64(m.NodeId))
@@ -751,13 +1035,13 @@ func (m *Group) Size() (n int) {
n += 1 + l + sovRaft(uint64(l))
n += 1 + sovRaft(uint64(m.GroupId))
n += 1 + sovRaft(uint64(m.RaftReplicaId))
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
return n
}
func (m *Message) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
n += 1 + sovRaft(uint64(m.Type))
@@ -785,25 +1069,25 @@ func (m *Message) Size() (n int) {
n += 1 + l + sovRaft(uint64(l))
l = m.ToGroup.Size()
n += 1 + l + sovRaft(uint64(l))
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
return n
}
func (m *HardState) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
n += 1 + sovRaft(uint64(m.Term))
n += 1 + sovRaft(uint64(m.Vote))
n += 1 + sovRaft(uint64(m.Commit))
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
return n
}
func (m *ConfState) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
if len(m.Nodes) > 0 {
@@ -828,13 +1112,13 @@ func (m *ConfState) Size() (n int) {
n += 1 + l + sovRaft(uint64(l))
}
}
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
return n
}
func (m *ConfChange) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
n += 1 + sovRaft(uint64(m.ID))
@@ -846,9 +1130,6 @@ func (m *ConfChange) Size() (n int) {
l = len(m.Context)
n += 1 + l + sovRaft(uint64(l))
}
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
return n
}
@@ -880,7 +1161,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -908,7 +1189,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Type |= (EntryType(b) & 0x7F) << shift
+ m.Type |= EntryType(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -927,7 +1208,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Term |= (uint64(b) & 0x7F) << shift
+ m.Term |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -946,7 +1227,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Index |= (uint64(b) & 0x7F) << shift
+ m.Index |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -965,7 +1246,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= (int(b) & 0x7F) << shift
+ byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -974,6 +1255,9 @@ func (m *Entry) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRaft
}
postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -982,6 +1266,63 @@ func (m *Entry) Unmarshal(dAtA []byte) error {
m.Data = []byte{}
}
iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DataType", wireType)
+ }
+ m.DataType = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.DataType |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ }
+ m.Timestamp = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Timestamp |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
default:
iNdEx = preIndex
skippy, err := skipRaft(dAtA[iNdEx:])
@@ -991,10 +1332,12 @@ func (m *Entry) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthRaft
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthRaft
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -1019,7 +1362,7 @@ func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1047,7 +1390,7 @@ func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1056,6 +1399,9 @@ func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRaft
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -1077,7 +1423,7 @@ func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Index |= (uint64(b) & 0x7F) << shift
+ m.Index |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1096,7 +1442,7 @@ func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Term |= (uint64(b) & 0x7F) << shift
+ m.Term |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1110,10 +1456,12 @@ func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthRaft
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthRaft
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -1138,7 +1486,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1166,7 +1514,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= (int(b) & 0x7F) << shift
+ byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1175,6 +1523,9 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRaft
}
postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -1197,7 +1548,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1206,6 +1557,9 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRaft
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -1222,10 +1576,12 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthRaft
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthRaft
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -1250,7 +1606,7 @@ func (m *Group) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1278,7 +1634,7 @@ func (m *Group) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.NodeId |= (uint64(b) & 0x7F) << shift
+ m.NodeId |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1297,7 +1653,7 @@ func (m *Group) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1307,6 +1663,9 @@ func (m *Group) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRaft
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -1326,7 +1685,7 @@ func (m *Group) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.GroupId |= (uint64(b) & 0x7F) << shift
+ m.GroupId |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1345,7 +1704,7 @@ func (m *Group) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.RaftReplicaId |= (uint64(b) & 0x7F) << shift
+ m.RaftReplicaId |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1359,10 +1718,12 @@ func (m *Group) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthRaft
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthRaft
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -1387,7 +1748,7 @@ func (m *Message) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1415,7 +1776,7 @@ func (m *Message) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Type |= (MessageType(b) & 0x7F) << shift
+ m.Type |= MessageType(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1434,7 +1795,7 @@ func (m *Message) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.To |= (uint64(b) & 0x7F) << shift
+ m.To |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1453,7 +1814,7 @@ func (m *Message) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.From |= (uint64(b) & 0x7F) << shift
+ m.From |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1472,7 +1833,7 @@ func (m *Message) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Term |= (uint64(b) & 0x7F) << shift
+ m.Term |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1491,7 +1852,7 @@ func (m *Message) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.LogTerm |= (uint64(b) & 0x7F) << shift
+ m.LogTerm |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1510,7 +1871,7 @@ func (m *Message) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Index |= (uint64(b) & 0x7F) << shift
+ m.Index |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1529,7 +1890,7 @@ func (m *Message) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1538,6 +1899,9 @@ func (m *Message) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRaft
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -1560,7 +1924,7 @@ func (m *Message) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Commit |= (uint64(b) & 0x7F) << shift
+ m.Commit |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1579,7 +1943,7 @@ func (m *Message) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1588,6 +1952,9 @@ func (m *Message) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRaft
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -1609,7 +1976,7 @@ func (m *Message) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- v |= (int(b) & 0x7F) << shift
+ v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1629,7 +1996,7 @@ func (m *Message) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.RejectHint |= (uint64(b) & 0x7F) << shift
+ m.RejectHint |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1648,7 +2015,7 @@ func (m *Message) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= (int(b) & 0x7F) << shift
+ byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1657,6 +2024,9 @@ func (m *Message) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRaft
}
postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -1679,7 +2049,7 @@ func (m *Message) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1688,6 +2058,9 @@ func (m *Message) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRaft
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -1709,7 +2082,7 @@ func (m *Message) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1718,6 +2091,9 @@ func (m *Message) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRaft
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -1734,10 +2110,12 @@ func (m *Message) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthRaft
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthRaft
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -1762,7 +2140,7 @@ func (m *HardState) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1790,7 +2168,7 @@ func (m *HardState) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Term |= (uint64(b) & 0x7F) << shift
+ m.Term |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1809,7 +2187,7 @@ func (m *HardState) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Vote |= (uint64(b) & 0x7F) << shift
+ m.Vote |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1828,7 +2206,7 @@ func (m *HardState) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Commit |= (uint64(b) & 0x7F) << shift
+ m.Commit |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1842,10 +2220,12 @@ func (m *HardState) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthRaft
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthRaft
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -1870,7 +2250,7 @@ func (m *ConfState) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1896,7 +2276,7 @@ func (m *ConfState) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- v |= (uint64(b) & 0x7F) << shift
+ v |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1913,7 +2293,7 @@ func (m *ConfState) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- packedLen |= (int(b) & 0x7F) << shift
+ packedLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1922,9 +2302,23 @@ func (m *ConfState) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRaft
}
postIndex := iNdEx + packedLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
+ var elementCount int
+ var count int
+ for _, integer := range dAtA[iNdEx:postIndex] {
+ if integer < 128 {
+ count++
+ }
+ }
+ elementCount = count
+ if elementCount != 0 && len(m.Nodes) == 0 {
+ m.Nodes = make([]uint64, 0, elementCount)
+ }
for iNdEx < postIndex {
var v uint64
for shift := uint(0); ; shift += 7 {
@@ -1936,7 +2330,7 @@ func (m *ConfState) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- v |= (uint64(b) & 0x7F) << shift
+ v |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1960,7 +2354,7 @@ func (m *ConfState) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1969,6 +2363,9 @@ func (m *ConfState) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRaft
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -1989,7 +2386,7 @@ func (m *ConfState) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- v |= (uint64(b) & 0x7F) << shift
+ v |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -2006,7 +2403,7 @@ func (m *ConfState) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- packedLen |= (int(b) & 0x7F) << shift
+ packedLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -2015,9 +2412,23 @@ func (m *ConfState) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRaft
}
postIndex := iNdEx + packedLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
+ var elementCount int
+ var count int
+ for _, integer := range dAtA[iNdEx:postIndex] {
+ if integer < 128 {
+ count++
+ }
+ }
+ elementCount = count
+ if elementCount != 0 && len(m.Learners) == 0 {
+ m.Learners = make([]uint64, 0, elementCount)
+ }
for iNdEx < postIndex {
var v uint64
for shift := uint(0); ; shift += 7 {
@@ -2029,7 +2440,7 @@ func (m *ConfState) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- v |= (uint64(b) & 0x7F) << shift
+ v |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -2053,7 +2464,7 @@ func (m *ConfState) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -2062,6 +2473,9 @@ func (m *ConfState) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRaft
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -2079,10 +2493,12 @@ func (m *ConfState) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthRaft
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthRaft
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -2107,7 +2523,7 @@ func (m *ConfChange) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -2135,7 +2551,7 @@ func (m *ConfChange) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.ID |= (uint64(b) & 0x7F) << shift
+ m.ID |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -2154,7 +2570,7 @@ func (m *ConfChange) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Type |= (ConfChangeType(b) & 0x7F) << shift
+ m.Type |= ConfChangeType(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -2173,7 +2589,7 @@ func (m *ConfChange) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.ReplicaID |= (uint64(b) & 0x7F) << shift
+ m.ReplicaID |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -2192,7 +2608,7 @@ func (m *ConfChange) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -2201,6 +2617,9 @@ func (m *ConfChange) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRaft
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -2222,7 +2641,7 @@ func (m *ConfChange) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= (int(b) & 0x7F) << shift
+ byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -2231,6 +2650,9 @@ func (m *ConfChange) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRaft
}
postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -2248,10 +2670,12 @@ func (m *ConfChange) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthRaft
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthRaft
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -2315,10 +2739,13 @@ func skipRaft(dAtA []byte) (n int, err error) {
break
}
}
- iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthRaft
}
+ iNdEx += length
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthRaft
+ }
return iNdEx, nil
case 3:
for {
@@ -2347,6 +2774,9 @@ func skipRaft(dAtA []byte) (n int, err error) {
return 0, err
}
iNdEx = start + next
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthRaft
+ }
}
return iNdEx, nil
case 4:
@@ -2365,70 +2795,3 @@ var (
ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow")
)
-
-func init() { proto.RegisterFile("raft.proto", fileDescriptorRaft) }
-
-var fileDescriptorRaft = []byte{
- // 963 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x55, 0x4d, 0x6f, 0x23, 0x45,
- 0x10, 0xf5, 0xd8, 0x63, 0x8f, 0x5d, 0x8e, 0x9d, 0x4e, 0xad, 0x41, 0xad, 0x68, 0xf1, 0x5a, 0x16,
- 0x48, 0x56, 0x60, 0x03, 0x18, 0x89, 0x03, 0xb7, 0x4d, 0x82, 0x36, 0x96, 0xd6, 0xd1, 0xe2, 0xcd,
- 0x72, 0x40, 0x42, 0x51, 0xc7, 0xd3, 0x9e, 0x18, 0x3c, 0xd3, 0xa3, 0x9e, 0xf6, 0xb2, 0xb9, 0x20,
- 0xf8, 0x05, 0x5c, 0xe1, 0x0f, 0xa1, 0x1c, 0x57, 0xe2, 0x8e, 0xd8, 0xf0, 0x47, 0x50, 0x7f, 0x8c,
- 0x3d, 0x93, 0x00, 0xb7, 0xae, 0xf7, 0xaa, 0xab, 0x5f, 0xd7, 0xab, 0x9e, 0x01, 0x90, 0x6c, 0xa1,
- 0x0e, 0x53, 0x29, 0x94, 0xc0, 0x86, 0x5e, 0xa7, 0x97, 0xfb, 0xbd, 0x48, 0x44, 0xc2, 0x40, 0x1f,
- 0xeb, 0x95, 0x65, 0x87, 0x3f, 0x42, 0xfd, 0xcb, 0x44, 0xc9, 0x6b, 0xfc, 0x10, 0xfc, 0xf3, 0xeb,
- 0x94, 0x53, 0x6f, 0xe0, 0x8d, 0xba, 0xe3, 0xbd, 0x43, 0xbb, 0xeb, 0xd0, 0x90, 0x9a, 0x38, 0xf2,
- 0x6f, 0xfe, 0x7c, 0x54, 0x99, 0x99, 0x24, 0xa4, 0xe0, 0x9f, 0x73, 0x19, 0xd3, 0xea, 0xc0, 0x1b,
- 0xf9, 0x1b, 0x86, 0xcb, 0x18, 0xf7, 0xa1, 0x3e, 0x49, 0x42, 0xfe, 0x9a, 0xd6, 0x0a, 0x94, 0x85,
- 0x10, 0xc1, 0x3f, 0x61, 0x8a, 0x51, 0x7f, 0xe0, 0x8d, 0x76, 0x66, 0x66, 0x3d, 0xfc, 0xc9, 0x03,
- 0xf2, 0x22, 0x61, 0x69, 0x76, 0x25, 0xd4, 0x94, 0x2b, 0x16, 0x32, 0xc5, 0xf0, 0x73, 0x80, 0xb9,
- 0x48, 0x16, 0x17, 0x99, 0x62, 0xca, 0x2a, 0x6a, 0x6f, 0x15, 0x1d, 0x8b, 0x64, 0xf1, 0x42, 0x13,
- 0xae, 0x78, 0x6b, 0x9e, 0x03, 0xfa, 0xf0, 0xa5, 0x39, 0xbc, 0xa8, 0xcb, 0x42, 0x5a, 0xb2, 0xd2,
- 0x92, 0x8b, 0xba, 0x0c, 0x32, 0xfc, 0x06, 0x9a, 0xb9, 0x02, 0x2d, 0x51, 0x2b, 0x30, 0x67, 0xee,
- 0xcc, 0xcc, 0x1a, 0xbf, 0x80, 0x66, 0xec, 0x94, 0x99, 0xc2, 0xed, 0x31, 0xcd, 0xb5, 0xdc, 0x55,
- 0xee, 0xea, 0x6e, 0xf2, 0x87, 0xbf, 0x78, 0x50, 0x7f, 0x2a, 0xc5, 0x3a, 0xc5, 0xf7, 0x20, 0x48,
- 0x44, 0xc8, 0x2f, 0x96, 0xa1, 0x29, 0x9e, 0x4b, 0x68, 0x68, 0x70, 0x12, 0x6a, 0x79, 0x09, 0x8b,
- 0xb9, 0x39, 0xa0, 0x95, 0xcb, 0xd3, 0x08, 0x3e, 0x82, 0x66, 0xa4, 0x2b, 0xe8, 0x9d, 0x45, 0xf1,
- 0x81, 0x41, 0x27, 0x21, 0x7e, 0x04, 0xbb, 0x5a, 0xce, 0x85, 0xe4, 0xe9, 0x6a, 0x39, 0x67, 0x3a,
- 0xcf, 0x2f, 0xe4, 0x75, 0x34, 0x39, 0xb3, 0xdc, 0x24, 0x1c, 0xfe, 0xec, 0x43, 0x30, 0xe5, 0x59,
- 0xc6, 0x22, 0x8e, 0x8f, 0xc1, 0x57, 0x5b, 0xcf, 0x1f, 0xe4, 0xb7, 0x72, 0x74, 0xd1, 0x75, 0x9d,
- 0x86, 0x3d, 0xa8, 0x2a, 0x51, 0xea, 0x6d, 0x55, 0x09, 0xad, 0x7c, 0x21, 0xc5, 0x9d, 0xc6, 0x6a,
- 0x64, 0xd3, 0x72, 0xff, 0x6e, 0xcb, 0xb1, 0x0f, 0xc1, 0x4a, 0x44, 0x66, 0x84, 0xea, 0xc5, 0x2b,
- 0x39, 0x70, 0x6b, 0x64, 0xe3, 0xbe, 0x91, 0x8f, 0x21, 0xe0, 0x89, 0x92, 0x4b, 0x9e, 0xd1, 0x60,
- 0x50, 0x1b, 0xb5, 0xc7, 0x9d, 0xd2, 0xac, 0xe6, 0xa5, 0x5c, 0x0e, 0x3e, 0x84, 0xc6, 0x5c, 0xc4,
- 0xf1, 0x52, 0xd1, 0x66, 0xb1, 0xed, 0x16, 0xc3, 0x31, 0x34, 0x33, 0xe7, 0x21, 0x6d, 0x19, 0x6f,
- 0xc9, 0x5d, 0x6f, 0x73, 0x4f, 0xf3, 0x3c, 0x5d, 0x51, 0xf2, 0xef, 0xf8, 0x5c, 0x51, 0x18, 0x78,
- 0xa3, 0x66, 0x5e, 0xd1, 0x62, 0xf8, 0x3e, 0x80, 0x5d, 0x9d, 0x2e, 0x13, 0x45, 0xdb, 0x85, 0x33,
- 0x0b, 0x38, 0x52, 0x08, 0xe6, 0x22, 0x51, 0xfc, 0xb5, 0xa2, 0x3b, 0x66, 0xd4, 0xf2, 0x10, 0xc7,
- 0x00, 0xba, 0x79, 0x17, 0xc6, 0x5d, 0xda, 0x31, 0x9a, 0x36, 0x37, 0x34, 0xa3, 0x94, 0xcf, 0xbd,
- 0x4e, 0xb3, 0xb3, 0x75, 0x08, 0x4d, 0x25, 0xdc, 0x8e, 0xee, 0x7f, 0xef, 0x08, 0x94, 0x30, 0xe1,
- 0xf0, 0x5b, 0x68, 0x9d, 0x32, 0x19, 0xda, 0x47, 0x93, 0xbb, 0xe4, 0xdd, 0x73, 0x89, 0x82, 0xff,
- 0x4a, 0x28, 0x5e, 0x7e, 0xe5, 0x1a, 0x29, 0x34, 0xb5, 0x76, 0xbf, 0xa9, 0xc3, 0xdf, 0x3c, 0x68,
- 0x6d, 0x5e, 0x29, 0xf6, 0xa0, 0xae, 0x67, 0x3c, 0xa3, 0xde, 0xa0, 0x36, 0xf2, 0x67, 0x36, 0xc0,
- 0x0f, 0xa0, 0x61, 0xf4, 0x66, 0xb4, 0x5a, 0x36, 0xd1, 0x28, 0x9c, 0x39, 0x12, 0xf7, 0xa1, 0xb9,
- 0xe2, 0x4c, 0x26, 0x5c, 0x66, 0xb4, 0x66, 0xf6, 0x6f, 0x62, 0xfc, 0x0c, 0x3a, 0x6e, 0xfd, 0xd4,
- 0x56, 0xf2, 0xff, 0xad, 0x52, 0x39, 0x67, 0xf8, 0xbb, 0x07, 0xa0, 0xb5, 0x1d, 0x5f, 0xb1, 0x24,
- 0x32, 0x23, 0x3d, 0x39, 0x29, 0x5d, 0xbd, 0x3a, 0x39, 0xc1, 0x4f, 0xdc, 0xb7, 0xb0, 0x6a, 0xde,
- 0xc5, 0xbb, 0xc5, 0x2f, 0x8f, 0xdd, 0x77, 0xef, 0x83, 0x38, 0x84, 0x56, 0xfe, 0xc4, 0x4e, 0x4a,
- 0x3d, 0xd9, 0xc2, 0xda, 0x59, 0xf3, 0x05, 0xb0, 0x3e, 0xf9, 0xff, 0xe3, 0xac, 0x4e, 0xb3, 0xce,
- 0x52, 0x08, 0x8e, 0xdd, 0x9c, 0xd4, 0xed, 0x9c, 0xb8, 0xf0, 0xe0, 0x53, 0x68, 0x6d, 0xbe, 0xcd,
- 0xb8, 0x0b, 0x6d, 0x13, 0x9c, 0x09, 0x19, 0xb3, 0x15, 0xa9, 0xe0, 0x03, 0xd8, 0x35, 0xc0, 0x56,
- 0x32, 0xf1, 0x0e, 0xfe, 0xa8, 0x42, 0xbb, 0xf0, 0xb6, 0x11, 0xa0, 0x31, 0xcd, 0xa2, 0xd3, 0x75,
- 0x4a, 0x2a, 0xd8, 0x86, 0x60, 0x9a, 0x45, 0x47, 0x9c, 0x29, 0xe2, 0xb9, 0xe0, 0xb9, 0x14, 0x29,
- 0xa9, 0xba, 0xac, 0x27, 0x69, 0x4a, 0x6a, 0xd8, 0x05, 0xb0, 0xeb, 0x19, 0xcf, 0x52, 0xe2, 0xbb,
- 0xc4, 0xaf, 0x85, 0xe2, 0xa4, 0xae, 0x45, 0xb8, 0xc0, 0xb0, 0x0d, 0xc7, 0xea, 0x77, 0x44, 0x02,
- 0x24, 0xb0, 0xa3, 0x0f, 0xe3, 0x4c, 0xaa, 0x4b, 0x7d, 0x4a, 0x13, 0x7b, 0x40, 0x8a, 0x88, 0xd9,
- 0xd4, 0x42, 0x84, 0xee, 0x34, 0x8b, 0x5e, 0x26, 0x92, 0xb3, 0xf9, 0x15, 0xbb, 0x5c, 0x71, 0x02,
- 0xb8, 0x07, 0x1d, 0x57, 0x48, 0x8f, 0xd4, 0x3a, 0x23, 0x6d, 0x97, 0x76, 0x7c, 0xc5, 0xe7, 0xdf,
- 0x7f, 0xb5, 0x16, 0x72, 0x1d, 0x93, 0x1d, 0x7c, 0x07, 0xf6, 0xa6, 0x59, 0x74, 0x2e, 0x59, 0x92,
- 0x2d, 0xb8, 0x7c, 0xc6, 0x59, 0xc8, 0x25, 0xe9, 0xb8, 0xdd, 0xe7, 0xcb, 0x98, 0x8b, 0xb5, 0x3a,
- 0x13, 0x3f, 0x90, 0xae, 0x13, 0x33, 0xe3, 0x2c, 0x34, 0x7f, 0x26, 0xb2, 0xeb, 0xc4, 0x6c, 0x10,
- 0x23, 0x86, 0xb8, 0xfb, 0x3e, 0x97, 0xdc, 0x5c, 0x71, 0xcf, 0x9d, 0xea, 0x62, 0x93, 0x83, 0x07,
- 0xd7, 0xd0, 0x2d, 0x0f, 0x86, 0xd6, 0xb1, 0x45, 0x9e, 0x84, 0xe1, 0x99, 0x08, 0x39, 0xa9, 0x20,
- 0x85, 0xde, 0x16, 0x9e, 0xf1, 0x58, 0xbc, 0xe2, 0x86, 0xf1, 0xca, 0xcc, 0xcb, 0x34, 0x64, 0xca,
- 0x32, 0x55, 0x7c, 0x08, 0xb4, 0x54, 0xea, 0x99, 0x1d, 0x66, 0xc3, 0xd6, 0x8e, 0xe8, 0xcd, 0xdb,
- 0x7e, 0xe5, 0xcd, 0xdb, 0x7e, 0xe5, 0xe6, 0xb6, 0xef, 0xbd, 0xb9, 0xed, 0x7b, 0x7f, 0xdd, 0xf6,
- 0xbd, 0x5f, 0xff, 0xee, 0x57, 0xfe, 0x09, 0x00, 0x00, 0xff, 0xff, 0x50, 0x69, 0xba, 0x8c, 0x01,
- 0x08, 0x00, 0x00,
-}
diff --git a/raft/raftpb/raft.proto b/raft/raftpb/raft.proto
index 21a6320b..25df190a 100644
--- a/raft/raftpb/raft.proto
+++ b/raft/raftpb/raft.proto
@@ -19,6 +19,9 @@ message Entry {
optional uint64 Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
optional EntryType Type = 1 [(gogoproto.nullable) = false];
optional bytes Data = 4;
+ optional uint64 ID = 5 [(gogoproto.nullable) = false];
+ optional int32 data_type = 6 [(gogoproto.nullable) = false];
+ optional int64 timestamp = 7 [(gogoproto.nullable) = false];
}
message SnapshotMetadata {
diff --git a/raft/rawnode.go b/raft/rawnode.go
index 29f2bd0e..37733274 100644
--- a/raft/rawnode.go
+++ b/raft/rawnode.go
@@ -17,7 +17,7 @@ package raft
import (
"errors"
- pb "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ pb "github.com/youzan/ZanRedisDB/raft/raftpb"
)
// ErrStepLocalMsg is returned when try to step a local raft message
@@ -36,43 +36,14 @@ type RawNode struct {
prevHardSt pb.HardState
}
-func (rn *RawNode) newReady() Ready {
- return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt)
-}
-
-func (rn *RawNode) commitReady(rd Ready) {
- if rd.SoftState != nil {
- rn.prevSoftSt = rd.SoftState
- }
- if !IsEmptyHardState(rd.HardState) {
- rn.prevHardSt = rd.HardState
- }
- if rn.prevHardSt.Commit != 0 {
- // In most cases, prevHardSt and rd.HardState will be the same
- // because when there are new entries to apply we just sent a
- // HardState with an updated Commit value. However, on initial
- // startup the two are different because we don't send a HardState
- // until something changes, but we do send any un-applied but
- // committed entries (and previously-committed entries may be
- // incorporated into the snapshot, even if rd.CommittedEntries is
- // empty). Therefore we mark all committed entries as applied
- // whether they were included in rd.HardState or not.
- rn.raft.raftLog.appliedTo(rn.prevHardSt.Commit)
- }
- if len(rd.Entries) > 0 {
- e := rd.Entries[len(rd.Entries)-1]
- rn.raft.raftLog.stableTo(e.Index, e.Term)
- }
- if !IsEmptySnap(rd.Snapshot) {
- rn.raft.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index)
- }
- if len(rd.ReadStates) != 0 {
- rn.raft.readStates = nil
- }
-}
-
-// NewRawNode returns a new RawNode given configuration and a list of raft peers.
-func NewRawNode(config *Config, peers []Peer) (*RawNode, error) {
+// NewRawNode instantiates a RawNode from the given configuration.
+//
+// See Bootstrap() for bootstrapping an initial state; this replaces the former
+// 'peers' argument to this method (with identical behavior). However, It is
+// recommended that instead of calling Bootstrap, applications bootstrap their
+// state manually by setting up a Storage that has a first index > 1 and which
+// stores the desired ConfState as its InitialState.
+func NewRawNode(config *Config) (*RawNode, error) {
if config.ID == 0 {
panic("config.ID must not be zero")
}
@@ -80,44 +51,10 @@ func NewRawNode(config *Config, peers []Peer) (*RawNode, error) {
rn := &RawNode{
raft: r,
}
- lastIndex, err := config.Storage.LastIndex()
- if err != nil {
- panic(err) // TODO(bdarnell)
- }
- // If the log is empty, this is a new RawNode (like StartNode); otherwise it's
- // restoring an existing RawNode (like RestartNode).
- // TODO(bdarnell): rethink RawNode initialization and whether the application needs
- // to be able to tell us when it expects the RawNode to exist.
- if lastIndex == 0 {
- r.becomeFollower(1, None)
- ents := make([]pb.Entry, len(peers))
- for i, peer := range peers {
- cc := pb.ConfChange{Type: pb.ConfChangeAddNode,
- ReplicaID: peer.ReplicaID,
- NodeGroup: pb.Group{NodeId: peer.NodeID, GroupId: r.group.GroupId, RaftReplicaId: peer.ReplicaID},
- Context: peer.Context}
- data, err := cc.Marshal()
- if err != nil {
- panic("unexpected marshal error")
- }
-
- ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data}
- }
- r.raftLog.append(ents...)
- r.raftLog.committed = uint64(len(ents))
- for _, peer := range peers {
- r.addNode(peer.ReplicaID,
- pb.Group{NodeId: peer.NodeID, GroupId: r.group.GroupId, RaftReplicaId: peer.ReplicaID})
- }
- }
// Set the initial hard and soft states after performing all initialization.
rn.prevSoftSt = r.softState()
- if lastIndex == 0 {
- rn.prevHardSt = emptyState
- } else {
- rn.prevHardSt = r.hardState()
- }
+ rn.prevHardSt = r.hardState()
return rn, nil
}
@@ -205,13 +142,35 @@ func (rn *RawNode) Step(m pb.Message) error {
return ErrStepPeerNotFound
}
-// Ready returns the current point-in-time state of this RawNode.
-func (rn *RawNode) Ready() Ready {
- rd := rn.newReady()
- rn.raft.msgs = nil
+// Ready returns the outstanding work that the application needs to handle. This
+// includes appending and applying entries or a snapshot, updating the HardState,
+// and sending messages. The returned Ready() *must* be handled and subsequently
+// passed back via Advance().
+func (rn *RawNode) Ready(moreEntriesToApply bool) Ready {
+ rd := rn.readyWithoutAccept(moreEntriesToApply)
+ rn.acceptReady(rd)
return rd
}
+// readyWithoutAccept returns a Ready. This is a read-only operation, i.e. there
+// is no obligation that the Ready must be handled.
+func (rn *RawNode) readyWithoutAccept(moreEntriesToApply bool) Ready {
+ return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt, moreEntriesToApply)
+}
+
+// acceptReady is called when the consumer of the RawNode has decided to go
+// ahead and handle a Ready. Nothing must alter the state of the RawNode between
+// this call and the prior call to Ready().
+func (rn *RawNode) acceptReady(rd Ready) {
+ if rd.SoftState != nil {
+ rn.prevSoftSt = rd.SoftState
+ }
+ if len(rd.ReadStates) != 0 {
+ rn.raft.readStates = nil
+ }
+ rn.raft.msgs = nil
+}
+
// HasReady called when RawNode user need to check if any Ready pending.
// Checking logic in this method should be consistent with Ready.containsUpdates().
func (rn *RawNode) HasReady() bool {
@@ -222,7 +181,7 @@ func (rn *RawNode) HasReady() bool {
if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) {
return true
}
- if r.raftLog.unstable.snapshot != nil && !IsEmptySnap(*r.raftLog.unstable.snapshot) {
+ if r.raftLog.hasPendingSnapshot() {
return true
}
if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() {
@@ -237,7 +196,10 @@ func (rn *RawNode) HasReady() bool {
// Advance notifies the RawNode that the application has applied and saved progress in the
// last Ready results.
func (rn *RawNode) Advance(rd Ready) {
- rn.commitReady(rd)
+ if !IsEmptyHardState(rd.HardState) {
+ rn.prevHardSt = rd.HardState
+ }
+ rn.raft.advance(rd)
}
// Status returns the current status of the given group.
@@ -246,6 +208,39 @@ func (rn *RawNode) Status() *Status {
return &status
}
+// StatusWithoutProgress returns a Status without populating the Progress field
+// (and returns the Status as a value to avoid forcing it onto the heap). This
+// is more performant if the Progress is not required. See WithProgress for an
+// allocation-free way to introspect the Progress.
+func (rn *RawNode) StatusWithoutProgress() Status {
+ return getStatusWithoutProgress(rn.raft)
+}
+
+// ProgressType indicates the type of replica a Progress corresponds to.
+type ProgressType byte
+
+const (
+ // ProgressTypePeer accompanies a Progress for a regular peer replica.
+ ProgressTypePeer ProgressType = iota
+ // ProgressTypeLearner accompanies a Progress for a learner replica.
+ ProgressTypeLearner
+)
+
+// WithProgress is a helper to introspect the Progress for this node and its
+// peers.
+func (rn *RawNode) WithProgress(visitor func(id uint64, typ ProgressType, pr Progress)) {
+ for id, pr := range rn.raft.prs {
+ pr := *pr
+ pr.ins = nil
+ visitor(id, ProgressTypePeer, pr)
+ }
+ for id, pr := range rn.raft.learnerPrs {
+ pr := *pr
+ pr.ins = nil
+ visitor(id, ProgressTypeLearner, pr)
+ }
+}
+
// ReportUnreachable reports the given node is not reachable for the last send.
func (rn *RawNode) ReportUnreachable(id uint64) {
_ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id})
diff --git a/raft/rawnode_test.go b/raft/rawnode_test.go
index fed7ea99..b5f82dcf 100644
--- a/raft/rawnode_test.go
+++ b/raft/rawnode_test.go
@@ -16,28 +16,55 @@ package raft
import (
"bytes"
+ "fmt"
+ "math"
"reflect"
"testing"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
)
// TestRawNodeStep ensures that RawNode.Step ignore local message.
func TestRawNodeStep(t *testing.T) {
for i, msgn := range raftpb.MessageType_name {
- s := NewMemoryStorage()
- rawNode, err := NewRawNode(newTestConfig(1, nil, 10, 1, s), []Peer{{NodeID: 1, ReplicaID: 1}})
- if err != nil {
- t.Fatal(err)
- }
- msgt := raftpb.MessageType(i)
- err = rawNode.Step(raftpb.Message{Type: msgt})
- // LocalMsg should be ignored.
- if IsLocalMsg(msgt) {
- if err != ErrStepLocalMsg {
- t.Errorf("%d: step should ignore %s", msgt, msgn)
+ t.Run(msgn, func(t *testing.T) {
+ s := NewMemoryStorage()
+ defer s.Close()
+ s.SetHardState(raftpb.HardState{Term: 1, Commit: 1})
+ s.Append([]raftpb.Entry{{Term: 1, Index: 1}})
+ peerGrps := make([]*raftpb.Group, 0)
+ for _, pid := range []uint64{1} {
+ grp := raftpb.Group{
+ NodeId: pid,
+ RaftReplicaId: pid,
+ GroupId: 1,
+ }
+ peerGrps = append(peerGrps, &grp)
}
- }
+ if err := s.ApplySnapshot(raftpb.Snapshot{Metadata: raftpb.SnapshotMetadata{
+ ConfState: raftpb.ConfState{
+ Nodes: []uint64{1},
+ Groups: peerGrps,
+ },
+ Index: 1,
+ Term: 1,
+ }}); err != nil {
+ t.Fatal(err)
+ }
+
+ rawNode, err := NewRawNode(newTestConfig(1, nil, 10, 1, s))
+ if err != nil {
+ t.Fatal(err)
+ }
+ msgt := raftpb.MessageType(i)
+ err = rawNode.Step(raftpb.Message{Type: msgt})
+ // LocalMsg should be ignored.
+ if IsLocalMsg(msgt) {
+ if err != ErrStepLocalMsg {
+ t.Errorf("%d: step should ignore %s", msgt, msgn)
+ }
+ }
+ })
}
}
@@ -48,14 +75,12 @@ func TestRawNodeStep(t *testing.T) {
// send the given proposal and ConfChange to the underlying raft.
func TestRawNodeProposeAndConfChange(t *testing.T) {
s := NewMemoryStorage()
+ defer s.Close()
var err error
- rawNode, err := NewRawNode(newTestConfig(1, nil, 10, 1, s), []Peer{{NodeID: 1, ReplicaID: 1}})
+ rawNode, err := NewRawNode(newTestConfig(1, []uint64{1}, 10, 1, s))
if err != nil {
t.Fatal(err)
}
- rd := rawNode.Ready()
- s.Append(rd.Entries)
- rawNode.Advance(rd)
rawNode.Campaign()
proposed := false
@@ -64,11 +89,14 @@ func TestRawNodeProposeAndConfChange(t *testing.T) {
ccdata []byte
)
for {
- rd = rawNode.Ready()
+ rd := rawNode.Ready(true)
s.Append(rd.Entries)
+ rawNode.Advance(rd)
// Once we are the leader, propose a command and a ConfChange.
if !proposed && rd.SoftState.Lead == rawNode.raft.id {
- rawNode.Propose([]byte("somedata"))
+ if err = rawNode.Propose([]byte("somedata")); err != nil {
+ t.Fatal(err)
+ }
grp := raftpb.Group{
NodeId: 1,
@@ -83,16 +111,13 @@ func TestRawNodeProposeAndConfChange(t *testing.T) {
rawNode.ProposeConfChange(cc)
proposed = true
- }
- rawNode.Advance(rd)
-
- // Exit when we have four entries: one ConfChange, one no-op for the election,
- // our proposed command and proposed ConfChange.
- lastIndex, err = s.LastIndex()
- if err != nil {
- t.Fatal(err)
- }
- if lastIndex >= 4 {
+ } else if proposed {
+ // We proposed last cycle, which means we appended the conf change
+ // in this cycle.
+ lastIndex, err = s.LastIndex()
+ if err != nil {
+ t.Fatal(err)
+ }
break
}
}
@@ -119,17 +144,18 @@ func TestRawNodeProposeAndConfChange(t *testing.T) {
// not affect the later propose to add new node.
func TestRawNodeProposeAddDuplicateNode(t *testing.T) {
s := NewMemoryStorage()
- rawNode, err := NewRawNode(newTestConfig(1, nil, 10, 1, s), []Peer{{NodeID: 1, ReplicaID: 1}})
+ defer s.Close()
+ rawNode, err := NewRawNode(newTestConfig(1, []uint64{1}, 10, 1, s))
if err != nil {
t.Fatal(err)
}
- rd := rawNode.Ready()
+ rd := rawNode.Ready(true)
s.Append(rd.Entries)
rawNode.Advance(rd)
rawNode.Campaign()
for {
- rd = rawNode.Ready()
+ rd = rawNode.Ready(true)
s.Append(rd.Entries)
if rd.SoftState.Lead == rawNode.raft.id {
rawNode.Advance(rd)
@@ -140,7 +166,7 @@ func TestRawNodeProposeAddDuplicateNode(t *testing.T) {
proposeConfChangeAndApply := func(cc raftpb.ConfChange) {
rawNode.ProposeConfChange(cc)
- rd = rawNode.Ready()
+ rd = rawNode.Ready(true)
s.Append(rd.Entries)
for _, entry := range rd.CommittedEntries {
if entry.Type == raftpb.EntryConfChange {
@@ -202,8 +228,9 @@ func TestRawNodeReadIndex(t *testing.T) {
wrs := []ReadState{{Index: uint64(1), RequestCtx: []byte("somedata")}}
s := NewMemoryStorage()
- c := newTestConfig(1, nil, 10, 1, s)
- rawNode, err := NewRawNode(c, []Peer{{NodeID: 1, ReplicaID: 1}})
+ defer s.Close()
+ c := newTestConfig(1, []uint64{1}, 10, 1, s)
+ rawNode, err := NewRawNode(c)
if err != nil {
t.Fatal(err)
}
@@ -213,7 +240,7 @@ func TestRawNodeReadIndex(t *testing.T) {
if !hasReady {
t.Errorf("HasReady() returns %t, want %t", hasReady, true)
}
- rd := rawNode.Ready()
+ rd := rawNode.Ready(true)
if !reflect.DeepEqual(rd.ReadStates, wrs) {
t.Errorf("ReadStates = %d, want %d", rd.ReadStates, wrs)
}
@@ -227,7 +254,7 @@ func TestRawNodeReadIndex(t *testing.T) {
wrequestCtx := []byte("somedata2")
rawNode.Campaign()
for {
- rd = rawNode.Ready()
+ rd = rawNode.Ready(true)
s.Append(rd.Entries)
if rd.SoftState.Lead == rawNode.raft.id {
@@ -261,70 +288,119 @@ func TestRawNodeReadIndex(t *testing.T) {
// TestNodeStop from node_test.go has no equivalent in rawNode because there is
// no goroutine in RawNode.
-// TestRawNodeStart ensures that a node can be started correctly. The node should
-// start with correct configuration change entries, and can accept and commit
-// proposals.
+// TestRawNodeStart ensures that a node can be started correctly. Note that RawNode
+// requires the application to bootstrap the state, i.e. it does not accept peers
+// and will not create faux configuration change entries.
func TestRawNodeStart(t *testing.T) {
+ want := Ready{
+ SoftState: &SoftState{Lead: 1, RaftState: StateLeader},
+ HardState: raftpb.HardState{Term: 1, Commit: 3, Vote: 1},
+ Entries: []raftpb.Entry{
+ {Term: 1, Index: 2, Data: nil},
+ {Term: 1, Index: 3, Data: []byte("foo")},
+ },
+ CommittedEntries: []raftpb.Entry{
+ {Term: 1, Index: 2, Data: nil},
+ {Term: 1, Index: 3, Data: []byte("foo")}},
+ MustSync: true,
+ }
+
+ storage := NewRealMemoryStorage()
+ defer storage.Close()
+ storage.ents[0].Index = 1
+
+ // TODO(tbg): this is a first prototype of what bootstrapping could look
+ // like (without the annoying faux ConfChanges). We want to persist a
+ // ConfState at some index and make sure that this index can't be reached
+ // from log position 1, so that followers are forced to pick up the
+ // ConfState in order to move away from log position 1 (unless they got
+ // bootstrapped in the same way already). Failing to do so would mean that
+ // followers diverge from the bootstrapped nodes and don't learn about the
+ // initial config.
+ //
+ // NB: this is exactly what CockroachDB does. The Raft log really begins at
+ // index 10, so empty followers (at index 1) always need a snapshot first.
+ type appenderStorage interface {
+ Storage
+ ApplySnapshot(raftpb.Snapshot) error
+ }
+ bootstrap := func(storage appenderStorage, cs raftpb.ConfState) error {
+ if len(cs.Nodes) == 0 {
+ return fmt.Errorf("no voters specified")
+ }
+ fi, err := storage.FirstIndex()
+ if err != nil {
+ return err
+ }
+ if fi < 2 {
+ return fmt.Errorf("FirstIndex >= 2 is prerequisite for bootstrap")
+ }
+ if _, err = storage.Entries(fi, fi, math.MaxUint64); err == nil {
+ // TODO(tbg): match exact error
+ return fmt.Errorf("should not have been able to load first index")
+ }
+ li, err := storage.LastIndex()
+ if err != nil {
+ return err
+ }
+ if _, err = storage.Entries(li, li, math.MaxUint64); err == nil {
+ return fmt.Errorf("should not have been able to load last index")
+ }
+ hs, ics, err := storage.InitialState()
+ if err != nil {
+ return err
+ }
+ if !IsEmptyHardState(hs) {
+ return fmt.Errorf("HardState not empty")
+ }
+ if len(ics.Nodes) != 0 {
+ return fmt.Errorf("ConfState not empty")
+ }
+
+ meta := raftpb.SnapshotMetadata{
+ Index: 1,
+ Term: 0,
+ ConfState: cs,
+ }
+ snap := raftpb.Snapshot{Metadata: meta}
+ return storage.ApplySnapshot(snap)
+ }
+
grp := raftpb.Group{
NodeId: 1,
GroupId: 1,
RaftReplicaId: 1,
}
- cc := raftpb.ConfChange{Type: raftpb.ConfChangeAddNode, ReplicaID: 1, NodeGroup: grp}
- ccdata, err := cc.Marshal()
- if err != nil {
- t.Fatalf("unexpected marshal error: %v", err)
- }
- wants := []Ready{
- {
- HardState: raftpb.HardState{Term: 1, Commit: 1, Vote: 0},
- Entries: []raftpb.Entry{
- {Type: raftpb.EntryConfChange, Term: 1, Index: 1, Data: ccdata},
- },
- CommittedEntries: []raftpb.Entry{
- {Type: raftpb.EntryConfChange, Term: 1, Index: 1, Data: ccdata},
- },
- MustSync: true,
- },
- {
- HardState: raftpb.HardState{Term: 2, Commit: 3, Vote: 1},
- Entries: []raftpb.Entry{{Term: 2, Index: 3, Data: []byte("foo")}},
- CommittedEntries: []raftpb.Entry{{Term: 2, Index: 3, Data: []byte("foo")}},
- MustSync: true,
- },
+ if err := bootstrap(storage, raftpb.ConfState{Nodes: []uint64{1}, Groups: []*raftpb.Group{&grp}}); err != nil {
+ t.Fatal(err)
}
- storage := NewMemoryStorage()
- rawNode, err := NewRawNode(newTestConfig(1, nil, 10, 1, storage), []Peer{{NodeID: 1, ReplicaID: 1}})
+ rawNode, err := NewRawNode(newTestConfig(1, nil, 10, 1, storage))
if err != nil {
t.Fatal(err)
}
- rd := rawNode.Ready()
- t.Logf("rd %v", rd)
- if !reflect.DeepEqual(rd, wants[0]) {
- t.Fatalf("#%d: g = %+v,\n w %+v", 1, rd, wants[0])
- } else {
- storage.Append(rd.Entries)
- rawNode.Advance(rd)
+
+ if rawNode.HasReady() {
+ t.Fatalf("unexpected ready: %+v", rawNode.Ready(true))
}
- storage.Append(rd.Entries)
- rawNode.Advance(rd)
rawNode.Campaign()
- rd = rawNode.Ready()
+ rawNode.Propose([]byte("foo"))
+ if !rawNode.HasReady() {
+ t.Fatal("expected a Ready")
+ }
+ rd := rawNode.Ready(true)
storage.Append(rd.Entries)
rawNode.Advance(rd)
- rawNode.Propose([]byte("foo"))
- if rd = rawNode.Ready(); !reflect.DeepEqual(rd, wants[1]) {
- t.Errorf("#%d: g = %+v,\n w %+v", 2, rd, wants[1])
- } else {
- storage.Append(rd.Entries)
- rawNode.Advance(rd)
+ rd.SoftState, want.SoftState = nil, nil
+
+ if !reflect.DeepEqual(rd, want) {
+ t.Fatalf("unexpected Ready:\n%+v\nvs\n%+v", rd, want)
}
if rawNode.HasReady() {
- t.Errorf("unexpected Ready: %+v", rawNode.Ready())
+ t.Errorf("unexpected Ready: %+v", rawNode.Ready(true))
}
}
@@ -339,23 +415,24 @@ func TestRawNodeRestart(t *testing.T) {
HardState: emptyState,
// commit up to commit index in st
CommittedEntries: entries[:st.Commit],
- MustSync: true,
+ MustSync: false,
}
storage := NewMemoryStorage()
+ defer storage.Close()
storage.SetHardState(st)
storage.Append(entries)
- rawNode, err := NewRawNode(newTestConfig(1, nil, 10, 1, storage), nil)
+ rawNode, err := NewRawNode(newTestConfig(1, []uint64{1}, 10, 1, storage))
if err != nil {
t.Fatal(err)
}
- rd := rawNode.Ready()
+ rd := rawNode.Ready(true)
if !reflect.DeepEqual(rd, want) {
t.Errorf("g = %+v,\n w %+v", rd, want)
}
rawNode.Advance(rd)
if rawNode.HasReady() {
- t.Errorf("unexpected Ready: %+v", rawNode.Ready())
+ t.Errorf("unexpected Ready: %+v", rawNode.Ready(true))
}
}
@@ -386,18 +463,19 @@ func TestRawNodeRestartFromSnapshot(t *testing.T) {
HardState: emptyState,
// commit up to commit index in st
CommittedEntries: entries,
- MustSync: true,
+ MustSync: false,
}
s := NewMemoryStorage()
+ defer s.Close()
s.SetHardState(st)
s.ApplySnapshot(snap)
s.Append(entries)
- rawNode, err := NewRawNode(newTestConfig(1, nil, 10, 1, s), nil)
+ rawNode, err := NewRawNode(newTestConfig(1, nil, 10, 1, s))
if err != nil {
t.Fatal(err)
}
- if rd := rawNode.Ready(); !reflect.DeepEqual(rd, want) {
+ if rd := rawNode.Ready(true); !reflect.DeepEqual(rd, want) {
t.Errorf("g = %+v,\n w %+v", rd, want)
} else {
rawNode.Advance(rd)
@@ -412,7 +490,8 @@ func TestRawNodeRestartFromSnapshot(t *testing.T) {
func TestRawNodeStatus(t *testing.T) {
storage := NewMemoryStorage()
- rawNode, err := NewRawNode(newTestConfig(1, nil, 10, 1, storage), []Peer{{NodeID: 1, ReplicaID: 1}})
+ defer storage.Close()
+ rawNode, err := NewRawNode(newTestConfig(1, nil, 10, 1, storage))
if err != nil {
t.Fatal(err)
}
@@ -421,3 +500,186 @@ func TestRawNodeStatus(t *testing.T) {
t.Errorf("expected status struct, got nil")
}
}
+
+// TestRawNodeCommitPaginationAfterRestart is the RawNode version of
+// TestNodeCommitPaginationAfterRestart. The anomaly here was even worse as the
+// Raft group would forget to apply entries:
+//
+// - node learns that index 11 is committed
+// - nextEnts returns index 1..10 in CommittedEntries (but index 10 already
+// exceeds maxBytes), which isn't noticed internally by Raft
+// - Commit index gets bumped to 10
+// - the node persists the HardState, but crashes before applying the entries
+// - upon restart, the storage returns the same entries, but `slice` takes a
+// different code path and removes the last entry.
+// - Raft does not emit a HardState, but when the app calls Advance(), it bumps
+// its internal applied index cursor to 10 (when it should be 9)
+// - the next Ready asks the app to apply index 11 (omitting index 10), losing a
+// write.
+func TestRawNodeCommitPaginationAfterRestart(t *testing.T) {
+ s := &ignoreSizeHintMemStorage{
+ MemoryStorage: NewRealMemoryStorage(),
+ }
+ persistedHardState := raftpb.HardState{
+ Term: 1,
+ Vote: 1,
+ Commit: 10,
+ }
+
+ s.hardState = persistedHardState
+ s.ents = make([]raftpb.Entry, 10)
+ var size uint64
+ for i := range s.ents {
+ ent := raftpb.Entry{
+ Term: 1,
+ Index: uint64(i + 1),
+ Type: raftpb.EntryNormal,
+ Data: []byte("a"),
+ }
+
+ s.ents[i] = ent
+ size += uint64(ent.Size())
+ }
+
+ cfg := newTestConfig(1, []uint64{1}, 10, 1, s)
+ // Set a MaxSizePerMsg that would suggest to Raft that the last committed entry should
+ // not be included in the initial rd.CommittedEntries. However, our storage will ignore
+ // this and *will* return it (which is how the Commit index ended up being 10 initially).
+ cfg.MaxSizePerMsg = size - uint64(s.ents[len(s.ents)-1].Size()) - 1
+
+ s.ents = append(s.ents, raftpb.Entry{
+ Term: 1,
+ Index: uint64(11),
+ Type: raftpb.EntryNormal,
+ Data: []byte("boom"),
+ })
+
+ rawNode, err := NewRawNode(cfg)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for highestApplied := uint64(0); highestApplied != 11; {
+ rd := rawNode.Ready(true)
+ n := len(rd.CommittedEntries)
+ if n == 0 {
+ t.Fatalf("stopped applying entries at index %d", highestApplied)
+ }
+ if next := rd.CommittedEntries[0].Index; highestApplied != 0 && highestApplied+1 != next {
+ t.Fatalf("attempting to apply index %d after index %d, leaving a gap", next, highestApplied)
+ }
+ highestApplied = rd.CommittedEntries[n-1].Index
+ rawNode.Advance(rd)
+ rawNode.Step(raftpb.Message{
+ Type: raftpb.MsgHeartbeat,
+ To: 1,
+ From: 1, // illegal, but we get away with it
+ Term: 1,
+ Commit: 11,
+ })
+ }
+}
+
+func BenchmarkStatusProgress(b *testing.B) {
+ setup := func(members int) *RawNode {
+ peers := make([]uint64, members)
+ for i := range peers {
+ peers[i] = uint64(i + 1)
+ }
+ cfg := newTestConfig(1, peers, 3, 1, NewMemoryStorage())
+ cfg.Logger = discardLogger
+ r := newRaft(cfg)
+ r.becomeFollower(1, 1)
+ r.becomeCandidate()
+ r.becomeLeader()
+ return &RawNode{raft: r}
+ }
+
+ for _, members := range []int{1, 3, 5, 100} {
+ b.Run(fmt.Sprintf("members=%d", members), func(b *testing.B) {
+ // NB: call getStatus through rn.Status because that incurs an additional
+ // allocation.
+ rn := setup(members)
+
+ b.Run("Status", func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ _ = rn.Status()
+ }
+ })
+
+ b.Run("Status-example", func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ s := rn.Status()
+ var n uint64
+ for _, pr := range s.Progress {
+ n += pr.Match
+ }
+ _ = n
+ }
+ })
+
+ b.Run("StatusWithoutProgress", func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ _ = rn.StatusWithoutProgress()
+ }
+ })
+
+ b.Run("WithProgress", func(b *testing.B) {
+ b.ReportAllocs()
+ visit := func(uint64, ProgressType, Progress) {}
+
+ for i := 0; i < b.N; i++ {
+ rn.WithProgress(visit)
+ }
+ })
+ b.Run("WithProgress-example", func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ var n uint64
+ visit := func(_ uint64, _ ProgressType, pr Progress) {
+ n += pr.Match
+ }
+ rn.WithProgress(visit)
+ _ = n
+ }
+ })
+ })
+ }
+}
+
+func TestRawNodeConsumeReady(t *testing.T) {
+ // Check that readyWithoutAccept() does not call acceptReady (which resets
+ // the messages) but Ready() does.
+ s := NewMemoryStorage()
+ rn := newTestRawNode(1, []uint64{1}, 3, 1, s)
+ m1 := raftpb.Message{Context: []byte("foo")}
+ m2 := raftpb.Message{Context: []byte("bar")}
+
+ // Inject first message, make sure it's visible via readyWithoutAccept.
+ rn.raft.msgs = append(rn.raft.msgs, m1)
+ rd := rn.readyWithoutAccept(true)
+ if len(rd.Messages) != 1 || !reflect.DeepEqual(rd.Messages[0], m1) {
+ t.Fatalf("expected only m1 sent, got %+v", rd.Messages)
+ }
+ if len(rn.raft.msgs) != 1 || !reflect.DeepEqual(rn.raft.msgs[0], m1) {
+ t.Fatalf("expected only m1 in raft.msgs, got %+v", rn.raft.msgs)
+ }
+ // Now call Ready() which should move the message into the Ready (as opposed
+ // to leaving it in both places).
+ rd = rn.Ready(true)
+ if len(rn.raft.msgs) > 0 {
+ t.Fatalf("messages not reset: %+v", rn.raft.msgs)
+ }
+ if len(rd.Messages) != 1 || !reflect.DeepEqual(rd.Messages[0], m1) {
+ t.Fatalf("expected only m1 sent, got %+v", rd.Messages)
+ }
+ // Add a message to raft to make sure that Advance() doesn't drop it.
+ rn.raft.msgs = append(rn.raft.msgs, m2)
+ rn.Advance(rd)
+ if len(rn.raft.msgs) != 1 || !reflect.DeepEqual(rn.raft.msgs[0], m2) {
+ t.Fatalf("expected only m2 in raft.msgs, got %+v", rn.raft.msgs)
+ }
+}
diff --git a/raft/read_only.go b/raft/read_only.go
index ebdaba64..01f19de3 100644
--- a/raft/read_only.go
+++ b/raft/read_only.go
@@ -16,7 +16,7 @@ package raft
import (
"bytes"
- pb "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ pb "github.com/youzan/ZanRedisDB/raft/raftpb"
)
// ReadState provides state for read only query.
diff --git a/raft/rocksdb_storage.go b/raft/rocksdb_storage.go
new file mode 100644
index 00000000..a92632ad
--- /dev/null
+++ b/raft/rocksdb_storage.go
@@ -0,0 +1,579 @@
+package raft
+
+import (
+ "encoding/binary"
+ "errors"
+ "math"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/engine"
+ pb "github.com/youzan/ZanRedisDB/raft/raftpb"
+)
+
+const (
+ startSep byte = ':'
+ stopSep byte = startSep + 1
+ maxWriteBatch = 1000
+ slowStorage = time.Millisecond * 100
+)
+
+// RocksStorage implements the Storage interface backed by rocksdb.
+type RocksStorage struct {
+ // Protects access to all fields. Most methods of MemoryStorage are
+ // run on the raft goroutine, but Append() is run on an application
+ // goroutine.
+ sync.Mutex
+ hardState pb.HardState
+ snapshot pb.Snapshot
+
+ entryDB engine.KVEngine
+ wb engine.WriteBatch
+ firstIndex uint64
+ lastIndex uint64
+ id uint64
+ gid uint32
+ engShared bool
+}
+
+func NewRocksStorage(id uint64, gid uint32, shared bool, db engine.KVEngine) *RocksStorage {
+ ms := &RocksStorage{
+ entryDB: db,
+ wb: db.NewWriteBatch(),
+ id: id,
+ gid: gid,
+ engShared: shared,
+ }
+
+ snap, err := ms.Snapshot()
+ if !IsEmptySnap(snap) {
+ return ms
+ }
+
+ _, err = ms.FirstIndex()
+ if err == errNotFound {
+ // When starting from scratch populate the list with a dummy entry at term zero.
+ ents := make([]pb.Entry, 1)
+ err = ms.reset(ents)
+ if err != nil {
+ panic(err)
+ }
+ }
+ return ms
+}
+
+func (ms *RocksStorage) Eng() engine.KVEngine {
+ return ms.entryDB
+}
+
+func (ms *RocksStorage) Close() {
+ if !ms.engShared {
+ ms.entryDB.CloseAll()
+ }
+}
+
+func (ms *RocksStorage) entryKey(idx uint64) []byte {
+ b := make([]byte, 20+1)
+ binary.BigEndian.PutUint64(b[0:8], ms.id)
+ binary.BigEndian.PutUint32(b[8:12], ms.gid)
+ b[12] = startSep
+ binary.BigEndian.PutUint64(b[13:21], idx)
+ return b
+}
+
+func (ms *RocksStorage) parseIndex(key []byte) uint64 {
+ return binary.BigEndian.Uint64(key[13:21])
+}
+
+// reset resets the entries. Used for testing.
+func (ms *RocksStorage) reset(es []pb.Entry) error {
+ // Clean out the state.
+ batch := ms.wb
+ err := ms.deleteFrom(batch, 0)
+ if err != nil {
+ return err
+ }
+ err = ms.commitBatch(batch)
+ if err != nil {
+ return err
+ }
+
+ err = ms.writeEnts(batch, es)
+ if err != nil {
+ return err
+ }
+ // clear cached index
+ ms.setCachedFirstIndex(0)
+ ms.setCachedLastIndex(0)
+ return ms.commitBatch(batch)
+}
+
+func (ms *RocksStorage) entryPrefixStart() []byte {
+ b := make([]byte, 13)
+ binary.BigEndian.PutUint64(b[0:8], ms.id)
+ binary.BigEndian.PutUint32(b[8:12], ms.gid)
+ b[12] = startSep
+ return b
+}
+
+func (ms *RocksStorage) entryPrefixEnd() []byte {
+ b := make([]byte, 13)
+ binary.BigEndian.PutUint64(b[0:8], ms.id)
+ binary.BigEndian.PutUint32(b[8:12], ms.gid)
+ b[12] = stopSep
+ return b
+}
+
+// InitialState implements the Storage interface.
+func (ms *RocksStorage) InitialState() (pb.HardState, pb.ConfState, error) {
+ return ms.hardState, ms.snapshot.Metadata.ConfState, nil
+}
+
+// SetHardState saves the current HardState.
+func (ms *RocksStorage) SetHardState(st pb.HardState) error {
+ ms.Lock()
+ defer ms.Unlock()
+ ms.hardState = st
+ return nil
+}
+
+// Entries implements the Storage interface.
+func (ms *RocksStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) {
+ s := time.Now()
+ first, err := ms.FirstIndex()
+ if err != nil {
+ return nil, err
+ }
+ if lo < first {
+ return nil, ErrCompacted
+ }
+
+ last, err := ms.LastIndex()
+ if err != nil {
+ return nil, err
+ }
+ if hi > last+1 {
+ return nil, ErrUnavailable
+ }
+
+ es, err := ms.allEntries(lo, hi, maxSize)
+ cost := time.Since(s)
+ if cost > slowStorage {
+ raftLogger.Infof("entries from raft storage slow: %v(%v-%v), %v", len(es), lo, hi, cost)
+ }
+ return es, err
+}
+
+func (ms *RocksStorage) seekEntry(e *pb.Entry, seekTo uint64, reverse bool) (uint64, error) {
+ start := ms.entryKey(seekTo)
+ stop := ms.entryPrefixEnd()
+ if reverse {
+ stop = start
+ start = ms.entryPrefixStart()
+ }
+ //raftLogger.Infof("seek %v from %v to %v", seekTo, start, stop)
+ opts := engine.IteratorOpts{
+ Range: engine.Range{Min: start, Max: stop, Type: common.RangeClose},
+ Reverse: reverse,
+ IgnoreDel: true,
+ }
+ it, err := engine.NewDBRangeIteratorWithOpts(ms.entryDB, opts)
+ if err != nil {
+ return 0, err
+ }
+ defer it.Close()
+ if !it.Valid() {
+ return 0, errNotFound
+ }
+ index := ms.parseIndex(it.RefKey())
+ //raftLogger.Infof("seeked: %v", index)
+ if e == nil {
+ return index, nil
+ }
+ v := it.RefValue()
+ err = e.Unmarshal(v)
+ return index, err
+}
+
+// Term implements the Storage interface.
+func (ms *RocksStorage) Term(idx uint64) (uint64, error) {
+ first, err := ms.FirstIndex()
+ if err != nil {
+ return 0, err
+ }
+ if idx < first-1 {
+ return 0, ErrCompacted
+ }
+
+ var e pb.Entry
+ if _, err := ms.seekEntry(&e, idx, false); err == errNotFound {
+ return 0, ErrUnavailable
+ } else if err != nil {
+ return 0, err
+ }
+ if idx < e.Index {
+ return 0, ErrCompacted
+ }
+ return e.Term, nil
+}
+
+// LastIndex implements the Storage interface.
+func (ms *RocksStorage) LastIndex() (uint64, error) {
+ index := ms.lastIndexCached()
+ if index > 0 {
+ return index, nil
+ }
+ index, err := ms.seekEntry(nil, math.MaxUint64, true)
+ if err != nil {
+ raftLogger.Infof("failed to found last index: %v", err.Error())
+ } else {
+ ms.setCachedLastIndex(index)
+ }
+ return index, err
+}
+
+// FirstIndex implements the Storage interface.
+func (ms *RocksStorage) FirstIndex() (uint64, error) {
+ index := ms.firstIndexCached()
+ if index > 0 {
+ return index, nil
+ }
+ index, err := ms.seekEntry(nil, 0, false)
+ if err == nil {
+ ms.setCachedFirstIndex(index + 1)
+ }
+ return index + 1, err
+}
+
+func (ms *RocksStorage) setCachedFirstIndex(index uint64) {
+ ms.Lock()
+ ms.firstIndex = index
+ ms.Unlock()
+}
+
+func (ms *RocksStorage) setCachedLastIndex(index uint64) {
+ atomic.StoreUint64(&ms.lastIndex, index)
+}
+
+func (ms *RocksStorage) lastIndexCached() uint64 {
+ return atomic.LoadUint64(&ms.lastIndex)
+}
+
+func (ms *RocksStorage) firstIndexCached() uint64 {
+ ms.Lock()
+ defer ms.Unlock()
+ snap := ms.snapshot
+ if !IsEmptySnap(snap) {
+ return snap.Metadata.Index + 1
+ }
+ if ms.firstIndex > 0 {
+ return ms.firstIndex
+ }
+ return 0
+}
+
+// Delete all entries from [0, until), i.e. excluding until.
+// Keep the entry at the snapshot index, for simplification of logic.
+// It is the application's responsibility to not attempt to deleteUntil an index
+// greater than raftLog.applied.
+func (ms *RocksStorage) deleteUntil(batch engine.WriteBatch, until uint64) error {
+ start := ms.entryKey(0)
+ stop := ms.entryKey(until)
+ //raftLogger.Infof("compact raft storage to %d, %v~%v ", until, start, stop)
+ rg := engine.CRange{
+ Start: start,
+ Limit: stop,
+ }
+ ms.entryDB.DeleteFilesInRange(rg)
+ //batch.DeleteRange(start, stop)
+ opts := engine.IteratorOpts{
+ Range: engine.Range{Min: start, Max: stop, Type: common.RangeROpen},
+ Reverse: false,
+ }
+ it, err := engine.NewDBRangeIteratorWithOpts(ms.entryDB, opts)
+ if err != nil {
+ return err
+ }
+ defer it.Close()
+ cnt := 0
+ for ; it.Valid(); it.Next() {
+ batch.Delete(it.RefKey())
+ cnt++
+ }
+ //raftLogger.Infof("compact raft storage to %d , cnt: %v", until, cnt)
+ ms.entryDB.AddDeletedCnt(int64(cnt))
+ return nil
+}
+
+// NumEntries return the number of all entries in db
+func (ms *RocksStorage) NumEntries() (int, error) {
+ var count int
+ start := ms.entryKey(0)
+ stop := ms.entryPrefixEnd() // Not included in results.
+ opts := engine.IteratorOpts{
+ Range: engine.Range{Min: start, Max: stop, Type: common.RangeROpen},
+ Reverse: false,
+ }
+ it, err := engine.NewDBRangeIteratorWithOpts(ms.entryDB, opts)
+ if err != nil {
+ return 0, err
+ }
+ defer it.Close()
+ for ; it.Valid(); it.Next() {
+ count++
+ }
+ return count, nil
+}
+
+func (ms *RocksStorage) allEntries(lo, hi, maxSize uint64) (es []pb.Entry, rerr error) {
+ if hi-lo == 1 { // We only need one entry.
+ v, err := ms.entryDB.GetBytesNoLock(ms.entryKey(lo))
+ if err != nil {
+ return nil, err
+ }
+ var e pb.Entry
+ if err = e.Unmarshal(v); err != nil {
+ raftLogger.Infof("failed to unmarshal: %v", v)
+ return nil, err
+ }
+ es = append(es, e)
+ return es, nil
+ }
+ start := ms.entryKey(lo)
+ stop := ms.entryKey(hi) // Not included in results.
+ opts := engine.IteratorOpts{
+ Range: engine.Range{Min: start, Max: stop, Type: common.RangeROpen},
+ Reverse: false,
+ IgnoreDel: true,
+ }
+ it, err := engine.NewDBRangeIteratorWithOpts(ms.entryDB, opts)
+ if err != nil {
+ return nil, err
+ }
+ defer it.Close()
+ size := uint64(0)
+ for ; it.Valid(); it.Next() {
+ v := it.RefValue()
+ var e pb.Entry
+ if err = e.Unmarshal(v); err != nil {
+ raftLogger.Infof("failed to unmarshal: %v", v)
+ return nil, err
+ }
+ size += uint64(e.Size())
+ if size > maxSize && len(es) > 0 {
+ break
+ }
+ es = append(es, e)
+ }
+ return es, err
+}
+
+// Snapshot implements the Storage interface.
+func (ms *RocksStorage) Snapshot() (pb.Snapshot, error) {
+ ms.Lock()
+ defer ms.Unlock()
+ return ms.snapshot, nil
+}
+
+// ApplySnapshot overwrites the contents of this Storage object with
+// those of the given snapshot.
+// delete all the entries up until the snapshot
+// index. But, keep the raft entry at the snapshot index, to make it easier to build the logic; like
+// the dummy entry in RocksStorage.
+func (ms *RocksStorage) ApplySnapshot(snap pb.Snapshot) error {
+ ms.Lock()
+
+ //handle check for old snapshot being applied
+ msIndex := ms.snapshot.Metadata.Index
+ snapIndex := snap.Metadata.Index
+ if msIndex >= snapIndex {
+ ms.Unlock()
+ return ErrSnapOutOfDate
+ }
+ ms.snapshot = snap
+ // clear cached first index
+ ms.firstIndex = 0
+ ms.setCachedLastIndex(0)
+ ms.Unlock()
+
+ batch := ms.wb
+ e := pb.Entry{Term: snap.Metadata.Term, Index: snap.Metadata.Index}
+ data, err := e.Marshal()
+ if err != nil {
+ return err
+ }
+ batch.Put(ms.entryKey(e.Index), data)
+ err = ms.deleteUntil(batch, e.Index)
+ if err != nil {
+ return err
+ }
+ return ms.commitBatch(batch)
+}
+
+// CreateSnapshot makes a snapshot which can be retrieved with Snapshot() and
+// can be used to reconstruct the state at that point.
+// If any configuration changes have been made since the last compaction,
+// the result of the last ApplyConfChange must be passed in.
+func (ms *RocksStorage) CreateSnapshot(i uint64, cs *pb.ConfState, data []byte) (pb.Snapshot, error) {
+ first, err := ms.FirstIndex()
+ if err != nil {
+ return pb.Snapshot{}, err
+ }
+ if i < first {
+ return pb.Snapshot{}, ErrSnapOutOfDate
+ }
+
+ var e pb.Entry
+ if _, err := ms.seekEntry(&e, i, false); err != nil {
+ return pb.Snapshot{}, err
+ }
+ if e.Index != i {
+ return pb.Snapshot{}, errNotFound
+ }
+
+ ms.Lock()
+ defer ms.Unlock()
+ ms.snapshot.Metadata.Index = i
+ ms.snapshot.Metadata.Term = e.Term
+ if cs != nil {
+ ms.snapshot.Metadata.ConfState = *cs
+ }
+ ms.snapshot.Data = data
+ snap := ms.snapshot
+ // no need clear first and last index in db since no changed on db
+
+ return snap, nil
+}
+
+// Compact discards all log entries prior to compactIndex.
+// It is the application's responsibility to not attempt to compact an index
+// greater than raftLog.applied.
+func (ms *RocksStorage) Compact(compactIndex uint64) error {
+ // we should use seek here, since FirstIndex() will return snapshot index
+ first, err := ms.seekEntry(nil, 0, false)
+ if err != nil {
+ return err
+ }
+ if compactIndex <= first {
+ return ErrCompacted
+ }
+ li, err := ms.LastIndex()
+ if err != nil {
+ return err
+ }
+ if compactIndex > li {
+ raftLogger.Errorf("compact %d is out of bound lastindex(%d)", compactIndex, li)
+ return errors.New("compact is out of bound lastindex")
+ }
+ ms.setCachedFirstIndex(0)
+ batch := ms.entryDB.NewWriteBatch()
+ defer batch.Destroy()
+ // TODO: delete too much will slow down the write in db, improve this to avoid high rt.
+ err = ms.deleteUntil(batch, compactIndex)
+ if err != nil {
+ return err
+ }
+ return ms.commitBatch(batch)
+}
+
+func (ms *RocksStorage) commitBatch(batch engine.WriteBatch) error {
+ err := ms.entryDB.Write(batch)
+ batch.Clear()
+ return err
+}
+
+// Append the new entries to storage.
+func (ms *RocksStorage) Append(entries []pb.Entry) error {
+ if len(entries) == 0 {
+ return nil
+ }
+ batch := ms.wb
+ err := ms.addEntries(batch, entries)
+ if err != nil {
+ return err
+ }
+ err = ms.commitBatch(batch)
+ return err
+}
+
+func (ms *RocksStorage) addEntries(batch engine.WriteBatch, entries []pb.Entry) error {
+ if len(entries) == 0 {
+ return nil
+ }
+
+ first, err := ms.FirstIndex()
+ if err != nil {
+ return err
+ }
+ entryFirst := entries[0].Index
+ entryLast := entryFirst + uint64(len(entries)) - 1
+
+ // shortcut if there is no new entry.
+ if entryLast < first {
+ return nil
+ }
+ // truncate compacted entries
+ if first > entryFirst {
+ entries = entries[first-entryFirst:]
+ }
+
+ last, err := ms.LastIndex()
+ if err != nil {
+ return err
+ }
+
+ ms.writeEnts(batch, entries)
+ laste := entries[len(entries)-1].Index
+ ms.setCachedLastIndex(laste)
+ if laste < last {
+ err = ms.deleteFrom(batch, laste+1)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (ms *RocksStorage) writeEnts(batch engine.WriteBatch, es []pb.Entry) error {
+ total := len(es)
+ for idx, e := range es {
+ data, err := e.Marshal()
+ if err != nil {
+ return err
+ }
+ k := ms.entryKey(e.Index)
+ batch.Put(k, data)
+ if (idx+1)%maxWriteBatch == 0 && idx < total-maxWriteBatch {
+ err = ms.commitBatch(batch)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (ms *RocksStorage) deleteFrom(batch engine.WriteBatch, from uint64) error {
+ start := ms.entryKey(from)
+ stop := ms.entryPrefixEnd()
+ //batch.DeleteRange(start, stop)
+ opts := engine.IteratorOpts{
+ Range: engine.Range{Min: start, Max: stop, Type: common.RangeROpen},
+ Reverse: false,
+ IgnoreDel: true,
+ }
+ it, err := engine.NewDBRangeIteratorWithOpts(ms.entryDB, opts)
+ if err != nil {
+ return err
+ }
+ defer it.Close()
+ cnt := 0
+ for ; it.Valid(); it.Next() {
+ batch.Delete(it.RefKey())
+ cnt++
+ }
+ ms.entryDB.AddDeletedCnt(int64(cnt))
+ return nil
+}
diff --git a/raft/status.go b/raft/status.go
index 095cc1c7..a0d57ae9 100644
--- a/raft/status.go
+++ b/raft/status.go
@@ -17,7 +17,7 @@ package raft
import (
"fmt"
- pb "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ pb "github.com/youzan/ZanRedisDB/raft/raftpb"
)
type Status struct {
@@ -31,8 +31,19 @@ type Status struct {
LeadTransferee uint64
}
-// getStatus gets a copy of the current raft status.
-func getStatus(r *raft) Status {
+func getProgressCopy(r *raft) map[uint64]Progress {
+ prs := make(map[uint64]Progress)
+ for id, p := range r.prs {
+ prs[id] = *p
+ }
+
+ for id, p := range r.learnerPrs {
+ prs[id] = *p
+ }
+ return prs
+}
+
+func getStatusWithoutProgress(r *raft) Status {
s := Status{
ID: r.id,
LeadTransferee: r.leadTransferee,
@@ -41,17 +52,15 @@ func getStatus(r *raft) Status {
s.SoftState = *r.softState()
s.Applied = r.raftLog.applied
+ return s
+}
+// getStatus gets a copy of the current raft status.
+func getStatus(r *raft) Status {
+ s := getStatusWithoutProgress(r)
if s.RaftState == StateLeader {
- s.Progress = make(map[uint64]Progress)
- for id, p := range r.prs {
- s.Progress[id] = *p
- }
- for id, p := range r.learnerPrs {
- s.Progress[id] = *p
- }
+ s.Progress = getProgressCopy(r)
}
-
return s
}
diff --git a/raft/storage.go b/raft/storage.go
index 57d4e403..0567ae8c 100644
--- a/raft/storage.go
+++ b/raft/storage.go
@@ -16,9 +16,13 @@ package raft
import (
"errors"
+ "fmt"
+ "io/ioutil"
"sync"
+ "time"
- pb "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/engine"
+ pb "github.com/youzan/ZanRedisDB/raft/raftpb"
)
// ErrCompacted is returned by Storage.Entries/Compact when a requested
@@ -69,6 +73,19 @@ type Storage interface {
Snapshot() (pb.Snapshot, error)
}
+type IExtRaftStorage interface {
+ Storage
+ // Close closes the Storage and performs finalization.
+ Close()
+ ApplySnapshot(pb.Snapshot) error
+ SetHardState(pb.HardState) error
+ CreateSnapshot(uint64, *pb.ConfState, []byte) (pb.Snapshot, error)
+ Compact(uint64) error
+ Append([]pb.Entry) error
+}
+
+var errNotFound = errors.New("Unable to find raft entry")
+
// MemoryStorage implements the Storage interface backed by an
// in-memory array.
type MemoryStorage struct {
@@ -83,12 +100,54 @@ type MemoryStorage struct {
ents []pb.Entry
}
-// NewMemoryStorage creates an empty MemoryStorage.
-func NewMemoryStorage() *MemoryStorage {
- return &MemoryStorage{
+// NewMemoryStorage creates an default MemoryStorage.
+// This should only be used in test
+func NewMemoryStorage() IExtRaftStorage {
+ return newDefaultRaftStorage(0, 0)
+}
+
+func NewRealMemoryStorage() *MemoryStorage {
+ ms := &MemoryStorage{
+ // When starting from scratch populate the list with a dummy entry at term zero.
+ ents: make([]pb.Entry, 1),
+ }
+ return ms
+}
+
+func newDefaultRaftStorage(id uint64, gid uint32) IExtRaftStorage {
+ tmpDir, _ := ioutil.TempDir("", fmt.Sprintf("raft-storage-%v-%v-%d", id, gid, time.Now().UnixNano()))
+ cfg := engine.NewRockConfig()
+ cfg.DataDir = tmpDir
+ cfg.DisableWAL = true
+ cfg.UseSharedCache = true
+ cfg.UseSharedRateLimiter = true
+ cfg.DisableMergeCounter = true
+ cfg.EnableTableCounter = false
+ cfg.OptimizeFiltersForHits = true
+ // basically, we no need compress wal since it will be cleaned after snapshot
+ cfg.MinLevelToCompress = 5
+ // use memtable_insert_with_hint_prefix_extractor to speed up insert
+ if cfg.InsertHintFixedLen == 0 {
+ cfg.InsertHintFixedLen = 10
+ }
+ scf, _ := engine.NewSharedEngConfig(cfg.RockOptions)
+ cfg.SharedConfig = scf
+ db, err := engine.NewRockEng(cfg)
+ if err == nil {
+ err = db.OpenEng()
+ if err == nil {
+ return NewRocksStorage(id, gid, false, db)
+ }
+ }
+ raftLogger.Warningf("failed to open rocks raft db: %v, fallback to memory entries", err.Error())
+ ms := &MemoryStorage{
// When starting from scratch populate the list with a dummy entry at term zero.
ents: make([]pb.Entry, 1),
}
+ return ms
+}
+
+func (ms *MemoryStorage) Close() {
}
// InitialState implements the Storage interface.
diff --git a/raft/storage_test.go b/raft/storage_test.go
index 02151eee..d09970bf 100644
--- a/raft/storage_test.go
+++ b/raft/storage_test.go
@@ -19,9 +19,22 @@ import (
"reflect"
"testing"
- pb "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ pb "github.com/youzan/ZanRedisDB/raft/raftpb"
)
+func allEntries(s IExtRaftStorage) []pb.Entry {
+ ms, ok := s.(*MemoryStorage)
+ if ok {
+ return ms.ents
+ }
+ rs, ok := s.(*RocksStorage)
+ if ok {
+ all, _ := rs.allEntries(0, math.MaxUint64, math.MaxUint64)
+ return all
+ }
+ panic("unknown raft storage")
+}
+
func TestStorageTerm(t *testing.T) {
ents := []pb.Entry{{Index: 3, Term: 3}, {Index: 4, Term: 4}, {Index: 5, Term: 5}}
tests := []struct {
@@ -39,7 +52,8 @@ func TestStorageTerm(t *testing.T) {
}
for i, tt := range tests {
- s := &MemoryStorage{ents: ents}
+ s := newInitedMemoryStorage(ents)
+ defer s.Close()
func() {
defer func() {
@@ -86,7 +100,8 @@ func TestStorageEntries(t *testing.T) {
}
for i, tt := range tests {
- s := &MemoryStorage{ents: ents}
+ s := newInitedMemoryStorage(ents)
+ defer s.Close()
entries, err := s.Entries(tt.lo, tt.hi, tt.maxsize)
if err != tt.werr {
t.Errorf("#%d: err = %v, want %v", i, err, tt.werr)
@@ -99,7 +114,8 @@ func TestStorageEntries(t *testing.T) {
func TestStorageLastIndex(t *testing.T) {
ents := []pb.Entry{{Index: 3, Term: 3}, {Index: 4, Term: 4}, {Index: 5, Term: 5}}
- s := &MemoryStorage{ents: ents}
+ s := newInitedMemoryStorage(ents)
+ defer s.Close()
last, err := s.LastIndex()
if err != nil {
@@ -120,10 +136,21 @@ func TestStorageLastIndex(t *testing.T) {
}
func TestStorageFirstIndex(t *testing.T) {
+ emptyStorage := NewMemoryStorage()
+ first, err := emptyStorage.FirstIndex()
+ if err != nil {
+ t.Errorf("err = %v, want nil", err)
+ }
+ if first != 1 {
+ t.Errorf("first = %d, want %d", first, 1)
+ }
+ emptyStorage.Close()
+
ents := []pb.Entry{{Index: 3, Term: 3}, {Index: 4, Term: 4}, {Index: 5, Term: 5}}
- s := &MemoryStorage{ents: ents}
+ s := newInitedMemoryStorage(ents)
+ defer s.Close()
- first, err := s.FirstIndex()
+ first, err = s.FirstIndex()
if err != nil {
t.Errorf("err = %v, want nil", err)
}
@@ -158,19 +185,22 @@ func TestStorageCompact(t *testing.T) {
}
for i, tt := range tests {
- s := &MemoryStorage{ents: ents}
+ s := newInitedMemoryStorage(ents)
+ defer s.Close()
err := s.Compact(tt.i)
if err != tt.werr {
t.Errorf("#%d: err = %v, want %v", i, err, tt.werr)
}
- if s.ents[0].Index != tt.windex {
- t.Errorf("#%d: index = %d, want %d", i, s.ents[0].Index, tt.windex)
+ fi, _ := s.FirstIndex()
+ if fi-1 != tt.windex {
+ t.Errorf("#%d: index = %d, want %d", i, fi-1, tt.windex)
}
- if s.ents[0].Term != tt.wterm {
- t.Errorf("#%d: term = %d, want %d", i, s.ents[0].Term, tt.wterm)
+ all := allEntries(s)
+ if len(all) != tt.wlen {
+ t.Errorf("#%d: len = %d, want %d", i, len(all), tt.wlen)
}
- if len(s.ents) != tt.wlen {
- t.Errorf("#%d: len = %d, want %d", i, len(s.ents), tt.wlen)
+ if all[0].Term != tt.wterm {
+ t.Errorf("#%d: term = %d, want %d", i, all[0].Term, tt.wterm)
}
}
}
@@ -191,7 +221,8 @@ func TestStorageCreateSnapshot(t *testing.T) {
}
for i, tt := range tests {
- s := &MemoryStorage{ents: ents}
+ s := newInitedMemoryStorage(ents)
+ defer s.Close()
snap, err := s.CreateSnapshot(tt.i, cs, data)
if err != tt.werr {
t.Errorf("#%d: err = %v, want %v", i, err, tt.werr)
@@ -210,6 +241,11 @@ func TestStorageAppend(t *testing.T) {
werr error
wentries []pb.Entry
}{
+ {
+ []pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}},
+ nil,
+ []pb.Entry{{Index: 3, Term: 3}, {Index: 4, Term: 4}, {Index: 5, Term: 5}},
+ },
{
[]pb.Entry{{Index: 3, Term: 3}, {Index: 4, Term: 4}, {Index: 5, Term: 5}},
nil,
@@ -246,13 +282,15 @@ func TestStorageAppend(t *testing.T) {
}
for i, tt := range tests {
- s := &MemoryStorage{ents: ents}
+ s := newInitedMemoryStorage(ents)
+ defer s.Close()
err := s.Append(tt.entries)
if err != tt.werr {
t.Errorf("#%d: err = %v, want %v", i, err, tt.werr)
}
- if !reflect.DeepEqual(s.ents, tt.wentries) {
- t.Errorf("#%d: entries = %v, want %v", i, s.ents, tt.wentries)
+ all := allEntries(s)
+ if !reflect.DeepEqual(all, tt.wentries) {
+ t.Errorf("#%d: entries = %v, want %v", i, all, tt.wentries)
}
}
}
@@ -266,6 +304,7 @@ func TestStorageApplySnapshot(t *testing.T) {
}
s := NewMemoryStorage()
+ defer s.Close()
//Apply Snapshot successful
i := 0
diff --git a/raft/util.go b/raft/util.go
index a58c9849..3225073d 100644
--- a/raft/util.go
+++ b/raft/util.go
@@ -18,7 +18,7 @@ import (
"bytes"
"fmt"
- pb "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ pb "github.com/youzan/ZanRedisDB/raft/raftpb"
)
func (st StateType) MarshalJSON() ([]byte, error) {
@@ -77,10 +77,7 @@ func DescribeMessage(m pb.Message, f EntryFormatter) string {
var buf bytes.Buffer
fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index)
if m.Reject {
- fmt.Fprintf(&buf, " Rejected")
- if m.RejectHint != 0 {
- fmt.Fprintf(&buf, "(Hint:%d)", m.RejectHint)
- }
+ fmt.Fprintf(&buf, " Rejected (Hint: %d)", m.RejectHint)
}
if m.Commit != 0 {
fmt.Fprintf(&buf, " Commit:%d", m.Commit)
diff --git a/raft/util_test.go b/raft/util_test.go
index f30bb3d8..c668eec7 100644
--- a/raft/util_test.go
+++ b/raft/util_test.go
@@ -20,7 +20,7 @@ import (
"strings"
"testing"
- pb "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ pb "github.com/youzan/ZanRedisDB/raft/raftpb"
)
var testFormatter EntryFormatter = func(data []byte) string {
diff --git a/rockredis/const.go b/rockredis/const.go
index 434c740e..0ef85b62 100644
--- a/rockredis/const.go
+++ b/rockredis/const.go
@@ -2,6 +2,8 @@ package rockredis
import (
"errors"
+
+ "github.com/youzan/ZanRedisDB/common"
)
const (
@@ -36,7 +38,9 @@ const (
SetType byte = 29
SSizeType byte = 30
- JSONType byte = 31
+ JSONType byte = 31
+ BitmapType byte = 32
+ BitmapMetaType byte = 33
ColumnType byte = 38 // used for column store for OLAP
@@ -79,11 +83,10 @@ var (
const (
defaultScanCount int = 100
MAX_BATCH_NUM = 5000
- RangeDeleteNum = 500
+ RangeDeleteNum = 5000
)
var (
- errKeySize = errors.New("invalid key size")
errValueSize = errors.New("invalid value size")
errZSetMemberSize = errors.New("invalid zset member size")
errTooMuchBatchSize = errors.New("the batch size exceed the limit")
@@ -98,19 +101,13 @@ const (
MaxTableNameLen int = 255
MaxColumnLen int = 255
//max key size
- MaxKeySize int = 10240
-
- //max hash field size
- MaxHashFieldSize int = 10240
-
- //max zset member size
- MaxZSetMemberSize int = 10240
+ MaxKeySize int = common.MaxKeySize
- //max set member size
- MaxSetMemberSize int = 10240
+ // subkey length for hash/set/zset
+ MaxSubKeyLen int = common.MaxSubKeyLen
//max value size
- MaxValueSize int = 1024 * 1024 * 8
+ MaxValueSize int = common.MaxValueSize
)
var (
diff --git a/rockredis/fullscan.go b/rockredis/fullscan.go
index 9d4e1aae..be1ef361 100644
--- a/rockredis/fullscan.go
+++ b/rockredis/fullscan.go
@@ -5,8 +5,9 @@ import (
"encoding/base64"
"encoding/binary"
- "github.com/absolute8511/ZanRedisDB/common"
"github.com/gobwas/glob"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/engine"
)
type ItemContainer struct {
@@ -16,7 +17,7 @@ type ItemContainer struct {
cursor []byte
}
-type itemFunc func(*RangeLimitedIterator, glob.Glob) (*ItemContainer, error)
+type itemFunc func(*engine.RangeLimitedIterator, glob.Glob) (*ItemContainer, error)
func buildErrFullScanResult(err error, dataType common.DataType) *common.FullScanResult {
return &common.FullScanResult{
@@ -86,7 +87,7 @@ func (db *RockDB) kvFullScan(key []byte, count int,
match string, inputBuffer []interface{}) *common.FullScanResult {
return db.fullScanCommon(KVType, key, count, match,
- func(it *RangeLimitedIterator, r glob.Glob) (*ItemContainer, error) {
+ func(it *engine.RangeLimitedIterator, r glob.Glob) (*ItemContainer, error) {
if t, k, _, err := decodeFullScanKey(KVType, it.Key()); err != nil {
return nil, err
} else if r != nil && !r.Match(string(k)) {
@@ -102,7 +103,7 @@ func (db *RockDB) hashFullScan(key []byte, count int,
match string, inputBuffer []interface{}) *common.FullScanResult {
return db.fullScanCommon(HashType, key, count, match,
- func(it *RangeLimitedIterator, r glob.Glob) (*ItemContainer, error) {
+ func(it *engine.RangeLimitedIterator, r glob.Glob) (*ItemContainer, error) {
var t, k, f []byte
var err error
if t, k, f, err = decodeFullScanKey(HashType, it.Key()); err != nil {
@@ -123,7 +124,7 @@ func (db *RockDB) listFullScan(key []byte, count int,
match string, inputBuffer []interface{}) *common.FullScanResult {
return db.fullScanCommon(ListType, key, count, match,
- func(it *RangeLimitedIterator, r glob.Glob) (*ItemContainer, error) {
+ func(it *engine.RangeLimitedIterator, r glob.Glob) (*ItemContainer, error) {
var t, k, seq []byte
var err error
if t, k, seq, err = decodeFullScanKey(ListType, it.Key()); err != nil {
@@ -141,7 +142,7 @@ func (db *RockDB) setFullScan(key []byte, count int,
match string, inputBuffer []interface{}) *common.FullScanResult {
return db.fullScanCommon(SetType, key, count, match,
- func(it *RangeLimitedIterator, r glob.Glob) (*ItemContainer, error) {
+ func(it *engine.RangeLimitedIterator, r glob.Glob) (*ItemContainer, error) {
var t, k, m []byte
var err error
if t, k, m, err = decodeFullScanKey(SetType, it.Key()); err != nil {
@@ -158,7 +159,7 @@ func (db *RockDB) zsetFullScan(key []byte, count int,
match string, inputBuffer []interface{}) *common.FullScanResult {
return db.fullScanCommon(ZSetType, key, count, match,
- func(it *RangeLimitedIterator, r glob.Glob) (*ItemContainer, error) {
+ func(it *engine.RangeLimitedIterator, r glob.Glob) (*ItemContainer, error) {
var t, k, m []byte
var err error
var s float64
@@ -270,7 +271,7 @@ func (db *RockDB) fullScanCommon(tp byte, key []byte, count int, match string,
}
func (db *RockDB) buildFullScanIterator(storeDataType byte, table,
- key []byte, count int) (*RangeLimitedIterator, error) {
+ key []byte, count int) (*engine.RangeLimitedIterator, error) {
k, c, err := decodeFullScanCursor(key)
if err != nil {
return nil, err
@@ -287,7 +288,7 @@ func (db *RockDB) buildFullScanIterator(storeDataType byte, table,
dbLog.Debugf("full scan range: %v, %v, %v, %v", minKey, maxKey, string(minKey), string(maxKey))
// minKey = minKey[:0]
- it, err := NewDBRangeLimitIterator(db.eng, minKey, maxKey, common.RangeOpen, 0, count+1, false)
+ it, err := db.NewDBRangeLimitIterator(minKey, maxKey, common.RangeOpen, 0, count+1, false)
if err != nil {
return nil, err
}
@@ -315,11 +316,7 @@ func encodeFullScanMinKey(storeDataType byte, table, key, cursor []byte) ([]byte
func encodeFullScanKey(storeDataType byte, table, key, cursor []byte) ([]byte, error) {
switch storeDataType {
case KVType:
- var newKey []byte
- newKey = append(newKey, table...)
- newKey = append(newKey, []byte(":")...)
- newKey = append(newKey, key...)
- return encodeKVKey(newKey), nil
+ return encodeKVKey(packRedisKey(table, key)), nil
case ListType:
var seq int64
var err error
diff --git a/rockredis/index_mgr.go b/rockredis/index_mgr.go
index 97d28876..50e8c010 100644
--- a/rockredis/index_mgr.go
+++ b/rockredis/index_mgr.go
@@ -5,8 +5,7 @@ import (
"errors"
"sync"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/gorocksdb"
+ "github.com/youzan/ZanRedisDB/common"
)
var (
@@ -432,12 +431,12 @@ func (im *IndexMgr) dobuildIndexes(db *RockDB, stopChan chan struct{}) {
}
pkList = pkList[:0]
var err error
- pkList, err = db.ScanWithBuffer(common.HASH, cursor, buildIndexBlock, "", pkList)
+ pkList, err = db.ScanWithBuffer(common.HASH, cursor, buildIndexBlock, "", pkList, false)
if err != nil {
dbLog.Infof("rebuild index for table %v error %v", buildTable, err)
return true, err
}
- wb := gorocksdb.NewWriteBatch()
+ wb := db.rockEng.NewWriteBatch()
defer wb.Destroy()
for _, pk := range pkList {
if !bytes.HasPrefix(pk, origPrefix) {
@@ -464,7 +463,7 @@ func (im *IndexMgr) dobuildIndexes(db *RockDB, stopChan chan struct{}) {
if len(pkList) < buildIndexBlock {
cursor = nil
}
- db.eng.Write(db.defaultWriteOpts, wb)
+ db.rockEng.Write(wb)
if len(cursor) == 0 {
return true, nil
} else {
diff --git a/rockredis/index_types.pb.go b/rockredis/index_types.pb.go
index 624fc762..0fe91354 100644
--- a/rockredis/index_types.pb.go
+++ b/rockredis/index_types.pb.go
@@ -1,24 +1,15 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: index_types.proto
-/*
- Package rockredis is a generated protocol buffer package.
-
- It is generated from these files:
- index_types.proto
-
- It has these top-level messages:
- HsetIndexInfo
- HsetIndexList
-*/
package rockredis
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
-
-import io "io"
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@@ -29,7 +20,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type IndexPropertyDType int32
@@ -44,6 +35,7 @@ var IndexPropertyDType_name = map[int32]string{
1: "Int32V",
2: "StringV",
}
+
var IndexPropertyDType_value = map[string]int32{
"Int64V": 0,
"Int32V": 1,
@@ -53,7 +45,10 @@ var IndexPropertyDType_value = map[string]int32{
func (x IndexPropertyDType) String() string {
return proto.EnumName(IndexPropertyDType_name, int32(x))
}
-func (IndexPropertyDType) EnumDescriptor() ([]byte, []int) { return fileDescriptorIndexTypes, []int{0} }
+
+func (IndexPropertyDType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_65a2d0bf1752f5d6, []int{0}
+}
type IndexState int32
@@ -72,6 +67,7 @@ var IndexState_name = map[int32]string{
3: "ReadyIndex",
4: "DeletedIndex",
}
+
var IndexState_value = map[string]int32{
"InitIndex": 0,
"BuildingIndex": 1,
@@ -83,7 +79,10 @@ var IndexState_value = map[string]int32{
func (x IndexState) String() string {
return proto.EnumName(IndexState_name, int32(x))
}
-func (IndexState) EnumDescriptor() ([]byte, []int) { return fileDescriptorIndexTypes, []int{1} }
+
+func (IndexState) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_65a2d0bf1752f5d6, []int{1}
+}
type HsetIndexInfo struct {
Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
@@ -94,26 +93,115 @@ type HsetIndexInfo struct {
State IndexState `protobuf:"varint,6,opt,name=state,proto3,enum=rockredis.IndexState" json:"state,omitempty"`
}
-func (m *HsetIndexInfo) Reset() { *m = HsetIndexInfo{} }
-func (m *HsetIndexInfo) String() string { return proto.CompactTextString(m) }
-func (*HsetIndexInfo) ProtoMessage() {}
-func (*HsetIndexInfo) Descriptor() ([]byte, []int) { return fileDescriptorIndexTypes, []int{0} }
+func (m *HsetIndexInfo) Reset() { *m = HsetIndexInfo{} }
+func (m *HsetIndexInfo) String() string { return proto.CompactTextString(m) }
+func (*HsetIndexInfo) ProtoMessage() {}
+func (*HsetIndexInfo) Descriptor() ([]byte, []int) {
+ return fileDescriptor_65a2d0bf1752f5d6, []int{0}
+}
+func (m *HsetIndexInfo) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *HsetIndexInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_HsetIndexInfo.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *HsetIndexInfo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HsetIndexInfo.Merge(m, src)
+}
+func (m *HsetIndexInfo) XXX_Size() int {
+ return m.Size()
+}
+func (m *HsetIndexInfo) XXX_DiscardUnknown() {
+ xxx_messageInfo_HsetIndexInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HsetIndexInfo proto.InternalMessageInfo
type HsetIndexList struct {
- HsetIndexes []HsetIndexInfo `protobuf:"bytes,1,rep,name=hset_indexes,json=hsetIndexes" json:"hset_indexes"`
+ HsetIndexes []HsetIndexInfo `protobuf:"bytes,1,rep,name=hset_indexes,json=hsetIndexes,proto3" json:"hset_indexes"`
+}
+
+func (m *HsetIndexList) Reset() { *m = HsetIndexList{} }
+func (m *HsetIndexList) String() string { return proto.CompactTextString(m) }
+func (*HsetIndexList) ProtoMessage() {}
+func (*HsetIndexList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_65a2d0bf1752f5d6, []int{1}
+}
+func (m *HsetIndexList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *HsetIndexList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_HsetIndexList.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *HsetIndexList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HsetIndexList.Merge(m, src)
+}
+func (m *HsetIndexList) XXX_Size() int {
+ return m.Size()
+}
+func (m *HsetIndexList) XXX_DiscardUnknown() {
+ xxx_messageInfo_HsetIndexList.DiscardUnknown(m)
}
-func (m *HsetIndexList) Reset() { *m = HsetIndexList{} }
-func (m *HsetIndexList) String() string { return proto.CompactTextString(m) }
-func (*HsetIndexList) ProtoMessage() {}
-func (*HsetIndexList) Descriptor() ([]byte, []int) { return fileDescriptorIndexTypes, []int{1} }
+var xxx_messageInfo_HsetIndexList proto.InternalMessageInfo
func init() {
- proto.RegisterType((*HsetIndexInfo)(nil), "rockredis.HsetIndexInfo")
- proto.RegisterType((*HsetIndexList)(nil), "rockredis.HsetIndexList")
proto.RegisterEnum("rockredis.IndexPropertyDType", IndexPropertyDType_name, IndexPropertyDType_value)
proto.RegisterEnum("rockredis.IndexState", IndexState_name, IndexState_value)
+ proto.RegisterType((*HsetIndexInfo)(nil), "rockredis.HsetIndexInfo")
+ proto.RegisterType((*HsetIndexList)(nil), "rockredis.HsetIndexList")
}
+
+func init() { proto.RegisterFile("index_types.proto", fileDescriptor_65a2d0bf1752f5d6) }
+
+var fileDescriptor_65a2d0bf1752f5d6 = []byte{
+ // 401 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0xcf, 0x8a, 0x13, 0x41,
+ 0x10, 0xc6, 0xbb, 0xf3, 0x4f, 0x52, 0xf9, 0xc3, 0x6c, 0xa1, 0xd2, 0x08, 0xdb, 0x3b, 0xec, 0x29,
+ 0xac, 0x10, 0x21, 0x2b, 0x82, 0xe0, 0xc5, 0x10, 0xc4, 0xc0, 0x1e, 0x64, 0x56, 0x72, 0x0d, 0xd1,
+ 0xa9, 0x64, 0x1b, 0xc7, 0xee, 0x71, 0xba, 0x47, 0x76, 0xde, 0xc2, 0xc7, 0xca, 0x31, 0x47, 0x4f,
+ 0xe2, 0x26, 0xe0, 0x73, 0xc8, 0x74, 0xaf, 0xba, 0xea, 0xed, 0xab, 0x5f, 0xd5, 0x57, 0x7c, 0xd5,
+ 0x34, 0x1c, 0x29, 0x9d, 0xd2, 0xf5, 0xd2, 0x55, 0x39, 0xd9, 0x71, 0x5e, 0x18, 0x67, 0xb0, 0x5b,
+ 0x98, 0xf7, 0x1f, 0x0a, 0x4a, 0x95, 0x7d, 0x74, 0x7f, 0x63, 0x36, 0xc6, 0xd3, 0x27, 0xb5, 0x0a,
+ 0x03, 0xa7, 0x3f, 0x38, 0x0c, 0x5e, 0x5b, 0x72, 0xf3, 0xda, 0x3a, 0xd7, 0x6b, 0x83, 0x08, 0x2d,
+ 0xbd, 0xfa, 0x48, 0x82, 0xc7, 0x7c, 0xd4, 0x4f, 0xbc, 0xc6, 0x13, 0xe8, 0x85, 0xdd, 0x6b, 0x45,
+ 0x59, 0x2a, 0x1a, 0xbe, 0x05, 0x1e, 0xbd, 0xaa, 0x09, 0x1e, 0x03, 0xe4, 0x05, 0xad, 0xd5, 0xf5,
+ 0x32, 0x23, 0x2d, 0x9a, 0x31, 0x1f, 0xb5, 0x93, 0x6e, 0x20, 0x17, 0xa4, 0xf1, 0x21, 0x74, 0x4a,
+ 0xad, 0x3e, 0x95, 0x24, 0x5a, 0xbe, 0x75, 0x5b, 0xe1, 0x0b, 0x80, 0xcf, 0xab, 0xac, 0x24, 0x9f,
+ 0x59, 0xb4, 0x63, 0x3e, 0x1a, 0x4e, 0x8e, 0xc7, 0xbf, 0x33, 0x8f, 0x7d, 0xaa, 0x37, 0x85, 0xc9,
+ 0xa9, 0x70, 0xd5, 0xec, 0x6d, 0x95, 0x53, 0xd2, 0xf5, 0x86, 0x5a, 0xe2, 0x63, 0x68, 0x5b, 0xb7,
+ 0x72, 0x24, 0x3a, 0xde, 0xf8, 0xe0, 0x5f, 0xe3, 0x65, 0xdd, 0x4c, 0xc2, 0xcc, 0x69, 0x72, 0xe7,
+ 0xce, 0x0b, 0x65, 0x1d, 0xbe, 0x84, 0xfe, 0x95, 0x25, 0xb7, 0xf4, 0x57, 0x90, 0x15, 0x3c, 0x6e,
+ 0x8e, 0x7a, 0x13, 0x71, 0x67, 0xc9, 0x5f, 0xef, 0x32, 0x6d, 0x6d, 0xbf, 0x9d, 0xb0, 0xa4, 0x77,
+ 0xf5, 0x0b, 0x92, 0x3d, 0x7b, 0x0e, 0xf8, 0x7f, 0x42, 0x04, 0xe8, 0xcc, 0xb5, 0x7b, 0xf6, 0x74,
+ 0x11, 0xb1, 0x5b, 0x7d, 0x3e, 0x59, 0x44, 0x1c, 0x7b, 0x70, 0xef, 0xd2, 0x15, 0x4a, 0x6f, 0x16,
+ 0x51, 0xe3, 0x2c, 0x05, 0xf8, 0x93, 0x11, 0x07, 0xd0, 0x9d, 0x6b, 0x15, 0xf6, 0x46, 0x0c, 0x8f,
+ 0x60, 0x30, 0x2d, 0x55, 0x96, 0x2a, 0xbd, 0x09, 0x88, 0x23, 0xc2, 0xd0, 0xa3, 0x99, 0xd1, 0x14,
+ 0x58, 0x03, 0x87, 0x00, 0x09, 0xad, 0xd2, 0x2a, 0xd4, 0x4d, 0x8c, 0xa0, 0x3f, 0xa3, 0x8c, 0x1c,
+ 0xa5, 0x81, 0xb4, 0xa6, 0xf1, 0xf6, 0x46, 0xb2, 0xdd, 0x8d, 0x64, 0xdb, 0xbd, 0xe4, 0xbb, 0xbd,
+ 0xe4, 0xdf, 0xf7, 0x92, 0x7f, 0x39, 0x48, 0xb6, 0x3b, 0x48, 0xf6, 0xf5, 0x20, 0xd9, 0xbb, 0x8e,
+ 0xff, 0x06, 0xe7, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x14, 0x03, 0x0b, 0xa1, 0x3c, 0x02, 0x00,
+ 0x00,
+}
+
func (m *HsetIndexInfo) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -204,6 +292,9 @@ func encodeVarintIndexTypes(dAtA []byte, offset int, v uint64) int {
return offset + 1
}
func (m *HsetIndexInfo) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
l = len(m.Name)
@@ -230,6 +321,9 @@ func (m *HsetIndexInfo) Size() (n int) {
}
func (m *HsetIndexList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
if len(m.HsetIndexes) > 0 {
@@ -269,7 +363,7 @@ func (m *HsetIndexInfo) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -297,7 +391,7 @@ func (m *HsetIndexInfo) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= (int(b) & 0x7F) << shift
+ byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -306,6 +400,9 @@ func (m *HsetIndexInfo) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthIndexTypes
}
postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthIndexTypes
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -328,7 +425,7 @@ func (m *HsetIndexInfo) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= (int(b) & 0x7F) << shift
+ byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -337,6 +434,9 @@ func (m *HsetIndexInfo) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthIndexTypes
}
postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthIndexTypes
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -359,7 +459,7 @@ func (m *HsetIndexInfo) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.PrefixLen |= (int32(b) & 0x7F) << shift
+ m.PrefixLen |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -378,7 +478,7 @@ func (m *HsetIndexInfo) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Unique |= (int32(b) & 0x7F) << shift
+ m.Unique |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -397,7 +497,7 @@ func (m *HsetIndexInfo) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.ValueType |= (IndexPropertyDType(b) & 0x7F) << shift
+ m.ValueType |= IndexPropertyDType(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -416,7 +516,7 @@ func (m *HsetIndexInfo) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.State |= (IndexState(b) & 0x7F) << shift
+ m.State |= IndexState(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -430,6 +530,9 @@ func (m *HsetIndexInfo) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthIndexTypes
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthIndexTypes
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -457,7 +560,7 @@ func (m *HsetIndexList) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -485,7 +588,7 @@ func (m *HsetIndexList) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -494,6 +597,9 @@ func (m *HsetIndexList) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthIndexTypes
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthIndexTypes
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -511,6 +617,9 @@ func (m *HsetIndexList) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthIndexTypes
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthIndexTypes
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -577,10 +686,13 @@ func skipIndexTypes(dAtA []byte) (n int, err error) {
break
}
}
- iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthIndexTypes
}
+ iNdEx += length
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthIndexTypes
+ }
return iNdEx, nil
case 3:
for {
@@ -609,6 +721,9 @@ func skipIndexTypes(dAtA []byte) (n int, err error) {
return 0, err
}
iNdEx = start + next
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthIndexTypes
+ }
}
return iNdEx, nil
case 4:
@@ -627,34 +742,3 @@ var (
ErrInvalidLengthIndexTypes = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowIndexTypes = fmt.Errorf("proto: integer overflow")
)
-
-func init() { proto.RegisterFile("index_types.proto", fileDescriptorIndexTypes) }
-
-var fileDescriptorIndexTypes = []byte{
- // 393 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x91, 0xdf, 0x8a, 0xd3, 0x40,
- 0x14, 0xc6, 0x33, 0xfd, 0x27, 0x3d, 0x69, 0x4b, 0xf6, 0xa0, 0x32, 0x08, 0x9b, 0x0d, 0x7b, 0x15,
- 0x56, 0xa8, 0xd0, 0x15, 0x41, 0xf0, 0xc6, 0x52, 0xc4, 0xc0, 0x5e, 0x48, 0x56, 0x7a, 0x5b, 0xa2,
- 0x39, 0xcd, 0x0e, 0xc6, 0x99, 0x98, 0x99, 0xc8, 0xe6, 0x4d, 0x7c, 0xa4, 0x5e, 0xee, 0x13, 0x88,
- 0x5b, 0xc1, 0xe7, 0x90, 0xcc, 0xac, 0x5a, 0xf5, 0xee, 0x3b, 0xbf, 0x73, 0xbe, 0xc3, 0x77, 0x66,
- 0xe0, 0x48, 0xc8, 0x9c, 0xae, 0x37, 0xa6, 0xad, 0x48, 0xcf, 0xab, 0x5a, 0x19, 0x85, 0xe3, 0x5a,
- 0xbd, 0xff, 0x50, 0x53, 0x2e, 0xf4, 0xa3, 0xfb, 0x85, 0x2a, 0x94, 0xa5, 0x4f, 0x3a, 0xe5, 0x06,
- 0x4e, 0x7f, 0x30, 0x98, 0xbe, 0xd6, 0x64, 0x92, 0xce, 0x9a, 0xc8, 0xad, 0x42, 0x84, 0x81, 0xcc,
- 0x3e, 0x12, 0x67, 0x11, 0x8b, 0x27, 0xa9, 0xd5, 0x78, 0x02, 0xbe, 0xdb, 0xbd, 0x15, 0x54, 0xe6,
- 0xbc, 0x67, 0x5b, 0x60, 0xd1, 0xab, 0x8e, 0xe0, 0x31, 0x40, 0x55, 0xd3, 0x56, 0x5c, 0x6f, 0x4a,
- 0x92, 0xbc, 0x1f, 0xb1, 0x78, 0x98, 0x8e, 0x1d, 0xb9, 0x20, 0x89, 0x0f, 0x61, 0xd4, 0x48, 0xf1,
- 0xa9, 0x21, 0x3e, 0xb0, 0xad, 0xbb, 0x0a, 0x5f, 0x00, 0x7c, 0xce, 0xca, 0x86, 0x6c, 0x66, 0x3e,
- 0x8c, 0x58, 0x3c, 0x5b, 0x1c, 0xcf, 0x7f, 0x67, 0x9e, 0xdb, 0x54, 0x6f, 0x6a, 0x55, 0x51, 0x6d,
- 0xda, 0xd5, 0xdb, 0xb6, 0xa2, 0x74, 0x6c, 0x0d, 0x9d, 0xc4, 0xc7, 0x30, 0xd4, 0x26, 0x33, 0xc4,
- 0x47, 0xd6, 0xf8, 0xe0, 0x5f, 0xe3, 0x65, 0xd7, 0x4c, 0xdd, 0xcc, 0x69, 0x7a, 0x70, 0xe7, 0x85,
- 0xd0, 0x06, 0x5f, 0xc2, 0xe4, 0x4a, 0x93, 0xd9, 0xd8, 0x2b, 0x48, 0x73, 0x16, 0xf5, 0x63, 0x7f,
- 0xc1, 0x0f, 0x96, 0xfc, 0xf5, 0x2e, 0xcb, 0xc1, 0xee, 0xeb, 0x89, 0x97, 0xfa, 0x57, 0xbf, 0x20,
- 0xe9, 0xb3, 0xe7, 0x80, 0xff, 0x27, 0x44, 0x80, 0x51, 0x22, 0xcd, 0xb3, 0xa7, 0xeb, 0xc0, 0xbb,
- 0xd3, 0xe7, 0x8b, 0x75, 0xc0, 0xd0, 0x87, 0x7b, 0x97, 0xa6, 0x16, 0xb2, 0x58, 0x07, 0xbd, 0xb3,
- 0x1c, 0xe0, 0x4f, 0x46, 0x9c, 0xc2, 0x38, 0x91, 0xc2, 0xed, 0x0d, 0x3c, 0x3c, 0x82, 0xe9, 0xb2,
- 0x11, 0x65, 0x2e, 0x64, 0xe1, 0x10, 0x43, 0x84, 0x99, 0x45, 0x2b, 0x25, 0xc9, 0xb1, 0x1e, 0xce,
- 0x00, 0x52, 0xca, 0xf2, 0xd6, 0xd5, 0x7d, 0x0c, 0x60, 0xb2, 0xa2, 0x92, 0x0c, 0xe5, 0x8e, 0x0c,
- 0x96, 0x7c, 0x77, 0x1b, 0x7a, 0x37, 0xb7, 0xa1, 0xb7, 0xdb, 0x87, 0xec, 0x66, 0x1f, 0xb2, 0x6f,
- 0xfb, 0x90, 0x7d, 0xf9, 0x1e, 0x7a, 0xef, 0x46, 0xf6, 0xfb, 0xcf, 0x7f, 0x06, 0x00, 0x00, 0xff,
- 0xff, 0x0b, 0x2d, 0x74, 0xe7, 0x34, 0x02, 0x00, 0x00,
-}
diff --git a/rockredis/iterator.go b/rockredis/iterator.go
deleted file mode 100644
index 294e884d..00000000
--- a/rockredis/iterator.go
+++ /dev/null
@@ -1,338 +0,0 @@
-package rockredis
-
-import (
- "bytes"
-
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/gorocksdb"
-)
-
-type Iterator interface {
- Next()
- Prev()
- Valid() bool
- Seek([]byte)
- SeekForPrev([]byte)
- SeekToFirst()
- SeekToLast()
- Close()
- RefKey() []byte
- Key() []byte
- RefValue() []byte
- Value() []byte
- NoTimestamp(vt byte)
-}
-
-type Range struct {
- Min []byte
- Max []byte
- Type uint8
-}
-
-type Limit struct {
- Offset int
- Count int
-}
-
-type DBIterator struct {
- *gorocksdb.Iterator
- snap *gorocksdb.Snapshot
- ro *gorocksdb.ReadOptions
- db *gorocksdb.DB
- upperBound *gorocksdb.IterBound
- lowerBound *gorocksdb.IterBound
- removeTsType byte
-}
-
-// low_bound is inclusive
-// upper bound is exclusive
-func NewDBIterator(db *gorocksdb.DB, withSnap bool, prefixSame bool, lowbound []byte, upbound []byte, ignoreDel bool) (*DBIterator, error) {
- db.RLock()
- dbit := &DBIterator{
- db: db,
- }
- readOpts := gorocksdb.NewDefaultReadOptions()
- readOpts.SetFillCache(false)
- readOpts.SetVerifyChecksums(false)
- if prefixSame {
- readOpts.SetPrefixSameAsStart(true)
- }
- if lowbound != nil {
- dbit.lowerBound = gorocksdb.NewIterBound(lowbound)
- readOpts.SetIterLowerBound(dbit.lowerBound)
- }
- if upbound != nil {
- dbit.upperBound = gorocksdb.NewIterBound(upbound)
- readOpts.SetIterUpperBound(dbit.upperBound)
- }
- if ignoreDel {
- // may iterator some deleted keys still not compacted.
- readOpts.SetIgnoreRangeDeletions(true)
- }
- dbit.ro = readOpts
- var err error
- if withSnap {
- dbit.snap, err = db.NewSnapshot()
- if err != nil {
- dbit.Close()
- return nil, err
- }
- readOpts.SetSnapshot(dbit.snap)
- }
- dbit.Iterator, err = db.NewIterator(readOpts)
- if err != nil {
- dbit.Close()
- return nil, err
- }
- return dbit, nil
-}
-
-func (it *DBIterator) RefKey() []byte {
- return it.Iterator.Key().Data()
-}
-
-func (it *DBIterator) Key() []byte {
- return it.Iterator.Key().Bytes()
-}
-
-func (it *DBIterator) RefValue() []byte {
- v := it.Iterator.Value().Data()
- if (it.removeTsType == KVType || it.removeTsType == HashType) && len(v) >= tsLen {
- v = v[:len(v)-tsLen]
- }
- return v
-}
-
-func (it *DBIterator) Value() []byte {
- v := it.Iterator.Value().Bytes()
- if (it.removeTsType == KVType || it.removeTsType == HashType) && len(v) >= tsLen {
- v = v[:len(v)-tsLen]
- }
- return v
-}
-
-func (it *DBIterator) NoTimestamp(vt byte) {
- it.removeTsType = vt
-}
-
-func (it *DBIterator) Close() {
- if it.Iterator != nil {
- it.Iterator.Close()
- }
- if it.ro != nil {
- it.ro.Destroy()
- }
- if it.snap != nil {
- it.snap.Release()
- }
- if it.upperBound != nil {
- it.upperBound.Destroy()
- }
- if it.lowerBound != nil {
- it.lowerBound.Destroy()
- }
- it.db.RUnlock()
-}
-
-// note: all the iterator use the prefix iterator flag. Which means it may skip the keys for different table
-// prefix.
-func NewDBRangeLimitIterator(db *gorocksdb.DB, min []byte, max []byte, rtype uint8,
- offset int, count int, reverse bool) (*RangeLimitedIterator, error) {
- upperBound := max
- lowerBound := min
- if rtype&common.RangeROpen <= 0 && upperBound != nil {
- // range right not open, we need inclusive the max,
- // however upperBound is exclusive
- upperBound = append(upperBound, 0)
- }
-
- //dbLog.Infof("iterator %v : %v", lowerBound, upperBound)
- dbit, err := NewDBIterator(db, false, true, lowerBound, upperBound, false)
- if err != nil {
- return nil, err
- }
- if !reverse {
- return NewRangeLimitIterator(dbit, &Range{Min: min, Max: max, Type: rtype},
- &Limit{Offset: offset, Count: count}), nil
- } else {
- return NewRevRangeLimitIterator(dbit, &Range{Min: min, Max: max, Type: rtype},
- &Limit{Offset: offset, Count: count}), nil
- }
-}
-
-func NewSnapshotDBRangeLimitIterator(db *gorocksdb.DB, min []byte, max []byte, rtype uint8,
- offset int, count int, reverse bool) (*RangeLimitedIterator, error) {
- upperBound := max
- lowerBound := min
- if rtype&common.RangeROpen <= 0 && upperBound != nil {
- // range right not open, we need inclusive the max,
- // however upperBound is exclusive
- upperBound = append(upperBound, 0)
- }
- dbit, err := NewDBIterator(db, true, true, lowerBound, upperBound, false)
- if err != nil {
- return nil, err
- }
- if !reverse {
- return NewRangeLimitIterator(dbit, &Range{Min: min, Max: max, Type: rtype},
- &Limit{Offset: offset, Count: count}), nil
- } else {
- return NewRevRangeLimitIterator(dbit, &Range{Min: min, Max: max, Type: rtype},
- &Limit{Offset: offset, Count: count}), nil
- }
-}
-
-func NewDBRangeIterator(db *gorocksdb.DB, min []byte, max []byte, rtype uint8,
- reverse bool) (*RangeLimitedIterator, error) {
- upperBound := max
- lowerBound := min
- if rtype&common.RangeROpen <= 0 && upperBound != nil {
- // range right not open, we need inclusive the max,
- // however upperBound is exclusive
- upperBound = append(upperBound, 0)
- }
- dbit, err := NewDBIterator(db, false, true, lowerBound, upperBound, false)
- if err != nil {
- return nil, err
- }
- if !reverse {
- return NewRangeIterator(dbit, &Range{Min: min, Max: max, Type: rtype}), nil
- } else {
- return NewRevRangeIterator(dbit, &Range{Min: min, Max: max, Type: rtype}), nil
- }
-}
-
-func NewSnapshotDBRangeIterator(db *gorocksdb.DB, min []byte, max []byte, rtype uint8,
- reverse bool) (*RangeLimitedIterator, error) {
- upperBound := max
- lowerBound := min
- if rtype&common.RangeROpen <= 0 && upperBound != nil {
- // range right not open, we need inclusive the max,
- // however upperBound is exclusive
- upperBound = append(upperBound, 0)
- }
- dbit, err := NewDBIterator(db, true, true, lowerBound, upperBound, false)
- if err != nil {
- return nil, err
- }
- if !reverse {
- return NewRangeIterator(dbit, &Range{Min: min, Max: max, Type: rtype}), nil
- } else {
- return NewRevRangeIterator(dbit, &Range{Min: min, Max: max, Type: rtype}), nil
- }
-}
-
-type RangeLimitedIterator struct {
- Iterator
- l Limit
- r Range
- // maybe step should not auto increase, we need count for actually element
- step int
- reverse bool
-}
-
-func (it *RangeLimitedIterator) Valid() bool {
- if it.l.Offset < 0 {
- return false
- }
- if it.l.Count >= 0 && it.step >= it.l.Count {
- return false
- }
- if !it.Iterator.Valid() {
- return false
- }
-
- if !it.reverse {
- if it.r.Max != nil {
- r := bytes.Compare(it.Iterator.RefKey(), it.r.Max)
- if it.r.Type&common.RangeROpen > 0 {
- return !(r >= 0)
- } else {
- return !(r > 0)
- }
- }
- } else {
- if it.r.Min != nil {
- r := bytes.Compare(it.Iterator.RefKey(), it.r.Min)
- if it.r.Type&common.RangeLOpen > 0 {
- return !(r <= 0)
- } else {
- return !(r < 0)
- }
- }
- }
- return true
-}
-
-func (it *RangeLimitedIterator) Next() {
- it.step++
- if !it.reverse {
- it.Iterator.Next()
- } else {
- it.Iterator.Prev()
- }
-}
-
-func NewRangeLimitIterator(i Iterator, r *Range, l *Limit) *RangeLimitedIterator {
- return rangeLimitIterator(i, r, l, false)
-}
-func NewRevRangeLimitIterator(i Iterator, r *Range, l *Limit) *RangeLimitedIterator {
- return rangeLimitIterator(i, r, l, true)
-}
-func NewRangeIterator(i Iterator, r *Range) *RangeLimitedIterator {
- return rangeLimitIterator(i, r, &Limit{0, -1}, false)
-}
-func NewRevRangeIterator(i Iterator, r *Range) *RangeLimitedIterator {
- return rangeLimitIterator(i, r, &Limit{0, -1}, true)
-}
-func rangeLimitIterator(i Iterator, r *Range, l *Limit, reverse bool) *RangeLimitedIterator {
- it := &RangeLimitedIterator{
- Iterator: i,
- l: *l,
- r: *r,
- reverse: reverse,
- step: 0,
- }
- if l.Offset < 0 {
- return it
- }
- if !reverse {
- if r.Min == nil {
- it.Iterator.SeekToFirst()
- } else {
- it.Iterator.Seek(r.Min)
- if r.Type&common.RangeLOpen > 0 {
- if it.Iterator.Valid() && bytes.Equal(it.Iterator.RefKey(), r.Min) {
- it.Iterator.Next()
- }
- }
- }
- } else {
- if r.Max == nil {
- it.Iterator.SeekToLast()
- } else {
- it.Iterator.SeekForPrev(r.Max)
- if !it.Iterator.Valid() {
- it.Iterator.SeekToLast()
- if it.Iterator.Valid() && bytes.Compare(it.Iterator.RefKey(), r.Max) == 1 {
- dbLog.Infof("iterator seek to last key %v should not great than seek to max %v", it.Iterator.RefKey(), r.Max)
- }
- }
- if r.Type&common.RangeROpen > 0 {
- if it.Iterator.Valid() && bytes.Equal(it.Iterator.RefKey(), r.Max) {
- it.Iterator.Prev()
- }
- }
- }
- }
- for i := 0; i < l.Offset; i++ {
- if it.Iterator.Valid() {
- if !it.reverse {
- it.Iterator.Next()
- } else {
- it.Iterator.Prev()
- }
- }
- }
- return it
-}
diff --git a/rockredis/rockredis.go b/rockredis/rockredis.go
index 1288fddc..f9b688ff 100644
--- a/rockredis/rockredis.go
+++ b/rockredis/rockredis.go
@@ -1,6 +1,7 @@
package rockredis
import (
+ "bytes"
"errors"
"fmt"
"hash"
@@ -16,19 +17,28 @@ import (
"sync/atomic"
"time"
- "github.com/spaolacci/murmur3"
+ "github.com/twmb/murmur3"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/gorocksdb"
- "github.com/shirou/gopsutil/mem"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/engine"
+ "github.com/youzan/ZanRedisDB/metric"
)
const (
- MAX_CHECKPOINT_NUM = 10
- HLLCacheSize = 512
+ MaxCheckpointNum = 10
+ MaxRemoteCheckpointNum = 3
+ HLLReadCacheSize = 1024
+ HLLWriteCacheSize = 32
+ writeTmpSize = 1024 * 512
+ minExpiredPossible = 1500000000
)
-var dbLog = common.NewLevelLogger(common.LOG_INFO, common.NewDefaultLogger("db"))
+var (
+ lazyCleanExpired = time.Hour * 48
+ timeUpdateFreq = 10000
+)
+
+var dbLog = common.NewLevelLogger(common.LOG_INFO, common.NewLogger())
func SetLogLevel(level int32) {
dbLog.SetLevel(level)
@@ -45,170 +55,23 @@ func GetCheckpointDir(term uint64, index uint64) string {
var batchableCmds map[string]bool
-type RockOptions struct {
- VerifyReadChecksum bool `json:"verify_read_checksum"`
- BlockSize int `json:"block_size"`
- BlockCache int64 `json:"block_cache"`
- CacheIndexAndFilterBlocks bool `json:"cache_index_and_filter_blocks"`
- WriteBufferSize int `json:"write_buffer_size"`
- MaxWriteBufferNumber int `json:"max_write_buffer_number"`
- MinWriteBufferNumberToMerge int `json:"min_write_buffer_number_to_merge"`
- Level0FileNumCompactionTrigger int `json:"level0_file_num_compaction_trigger"`
- MaxBytesForLevelBase uint64 `json:"max_bytes_for_level_base"`
- TargetFileSizeBase uint64 `json:"target_file_size_base"`
- MaxBackgroundFlushes int `json:"max_background_flushes"`
- MaxBackgroundCompactions int `json:"max_background_compactions"`
- MinLevelToCompress int `json:"min_level_to_compress"`
- MaxMainifestFileSize uint64 `json:"max_mainifest_file_size"`
- RateBytesPerSec int64 `json:"rate_bytes_per_sec"`
- BackgroundHighThread int `json:"background_high_thread,omitempty"`
- BackgroundLowThread int `json:"background_low_thread,omitempty"`
- AdjustThreadPool bool `json:"adjust_thread_pool,omitempty"`
- UseSharedCache bool `json:"use_shared_cache,omitempty"`
- UseSharedRateLimiter bool `json:"use_shared_rate_limiter,omitempty"`
- DisableWAL bool `json:"disable_wal,omitempty"`
- DisableMergeCounter bool `json:"disable_merge_counter,omitempty"`
-}
-
-func FillDefaultOptions(opts *RockOptions) {
- // use large block to reduce index block size for hdd
- // if using ssd, should use the default value
- if opts.BlockSize <= 0 {
- // for hdd use 64KB and above
- // for ssd use 32KB and below
- opts.BlockSize = 1024 * 32
- }
- // should about 20% less than host RAM
- // http://smalldatum.blogspot.com/2016/09/tuning-rocksdb-block-cache.html
- if opts.BlockCache <= 0 {
- v, err := mem.VirtualMemory()
- if err != nil {
- opts.BlockCache = 1024 * 1024 * 128
- } else {
- opts.BlockCache = int64(v.Total / 100)
- if opts.UseSharedCache {
- opts.BlockCache *= 10
- } else {
- if opts.BlockCache < 1024*1024*64 {
- opts.BlockCache = 1024 * 1024 * 64
- } else if opts.BlockCache > 1024*1024*1024*8 {
- opts.BlockCache = 1024 * 1024 * 1024 * 8
- }
- }
- }
- }
- // keep level0_file_num_compaction_trigger * write_buffer_size * min_write_buffer_number_tomerge = max_bytes_for_level_base to minimize write amplification
- if opts.WriteBufferSize <= 0 {
- opts.WriteBufferSize = 1024 * 1024 * 64
- }
- if opts.MaxWriteBufferNumber <= 0 {
- opts.MaxWriteBufferNumber = 6
- }
- if opts.MinWriteBufferNumberToMerge <= 0 {
- opts.MinWriteBufferNumberToMerge = 2
- }
- if opts.Level0FileNumCompactionTrigger <= 0 {
- opts.Level0FileNumCompactionTrigger = 2
- }
- if opts.MaxBytesForLevelBase <= 0 {
- opts.MaxBytesForLevelBase = 1024 * 1024 * 256
- }
- if opts.TargetFileSizeBase <= 0 {
- opts.TargetFileSizeBase = 1024 * 1024 * 64
- }
- if opts.MaxBackgroundFlushes <= 0 {
- opts.MaxBackgroundFlushes = 2
- }
- if opts.MaxBackgroundCompactions <= 0 {
- opts.MaxBackgroundCompactions = 4
- }
- if opts.MinLevelToCompress <= 0 {
- opts.MinLevelToCompress = 3
- }
- if opts.MaxMainifestFileSize <= 0 {
- opts.MaxMainifestFileSize = 1024 * 1024 * 32
- }
- if opts.AdjustThreadPool {
- if opts.BackgroundHighThread <= 0 {
- opts.BackgroundHighThread = 2
- }
- if opts.BackgroundLowThread <= 0 {
- opts.BackgroundLowThread = 4
- }
- }
-}
-
-type SharedRockConfig struct {
- SharedCache *gorocksdb.Cache
- SharedEnv *gorocksdb.Env
- SharedRateLimiter *gorocksdb.RateLimiter
-}
-type RockConfig struct {
- DataDir string
- EnableTableCounter bool
+type RockRedisDBConfig struct {
+ engine.RockEngConfig
+ KeepBackup int
// this will ignore all update and non-exist delete
EstimateTableCounter bool
ExpirationPolicy common.ExpirationPolicy
- DefaultReadOpts *gorocksdb.ReadOptions
- DefaultWriteOpts *gorocksdb.WriteOptions
- SharedConfig *SharedRockConfig
- RockOptions
+ DataVersion common.DataVersionT
}
-func NewRockConfig() *RockConfig {
- c := &RockConfig{
- DefaultReadOpts: gorocksdb.NewDefaultReadOptions(),
- DefaultWriteOpts: gorocksdb.NewDefaultWriteOptions(),
- EnableTableCounter: true,
+func NewRockRedisDBConfig() *RockRedisDBConfig {
+ c := &RockRedisDBConfig{
EstimateTableCounter: false,
}
- c.DefaultReadOpts.SetVerifyChecksums(false)
- FillDefaultOptions(&c.RockOptions)
+ c.RockEngConfig = *engine.NewRockConfig()
return c
}
-func NewSharedRockConfig(opt RockOptions) *SharedRockConfig {
- rc := &SharedRockConfig{}
- if opt.UseSharedCache {
- if opt.BlockCache <= 0 {
- v, err := mem.VirtualMemory()
- if err != nil {
- opt.BlockCache = 1024 * 1024 * 128 * 10
- } else {
- opt.BlockCache = int64(v.Total / 10)
- }
- }
- rc.SharedCache = gorocksdb.NewLRUCache(opt.BlockCache)
- }
- if opt.AdjustThreadPool {
- rc.SharedEnv = gorocksdb.NewDefaultEnv()
- if opt.BackgroundHighThread <= 0 {
- opt.BackgroundHighThread = 3
- }
- if opt.BackgroundLowThread <= 0 {
- opt.BackgroundLowThread = 6
- }
- rc.SharedEnv.SetBackgroundThreads(opt.BackgroundLowThread)
- rc.SharedEnv.SetHighPriorityBackgroundThreads(opt.BackgroundHighThread)
- }
- if opt.UseSharedRateLimiter && opt.RateBytesPerSec > 0 {
- rc.SharedRateLimiter = gorocksdb.NewGenericRateLimiter(opt.RateBytesPerSec, 100*1000, 10)
- }
- return rc
-}
-
-func (src *SharedRockConfig) Destroy() {
- if src.SharedCache != nil {
- src.SharedCache.Destroy()
- }
- if src.SharedEnv != nil {
- src.SharedEnv.Destroy()
- }
- if src.SharedRateLimiter != nil {
- src.SharedRateLimiter.Destroy()
- }
-}
-
type CheckpointSortNames []string
func (self CheckpointSortNames) Len() int {
@@ -240,7 +103,32 @@ func (self CheckpointSortNames) Less(i, j int) bool {
return lterm < rterm
}
-func purgeOldCheckpoint(keepNum int, checkpointDir string) {
+func GetLatestCheckpoint(checkpointDir string, skipN int, matchFunc func(string) bool) string {
+ checkpointList, err := filepath.Glob(path.Join(checkpointDir, "*-*"))
+ if err != nil {
+ return ""
+ }
+ if len(checkpointList) <= skipN {
+ return ""
+ }
+
+ sortedNameList := CheckpointSortNames(checkpointList)
+ sort.Sort(sortedNameList)
+ startIndex := len(sortedNameList) - 1
+ for i := startIndex; i >= 0; i-- {
+ curDir := sortedNameList[i]
+ if matchFunc(curDir) {
+ if skipN > 0 {
+ skipN--
+ continue
+ }
+ return curDir
+ }
+ }
+ return ""
+}
+
+func purgeOldCheckpoint(keepNum int, checkpointDir string, latestSnapIndex uint64) {
defer func() {
if e := recover(); e != nil {
dbLog.Infof("purge old checkpoint failed: %v", e)
@@ -254,157 +142,186 @@ func purgeOldCheckpoint(keepNum int, checkpointDir string) {
sortedNameList := CheckpointSortNames(checkpointList)
sort.Sort(sortedNameList)
for i := 0; i < len(sortedNameList)-keepNum; i++ {
+ fn := path.Base(sortedNameList[i+keepNum])
+ subs := strings.Split(fn, "-")
+ if len(subs) != 2 {
+ continue
+ }
+ sindex, err := strconv.ParseUint(subs[1], 16, 64)
+ if err != nil {
+ dbLog.Infof("checkpoint name index invalid: %v, %v", subs, err.Error())
+ continue
+ }
+ if sindex >= latestSnapIndex {
+ break
+ }
os.RemoveAll(sortedNameList[i])
dbLog.Infof("clean checkpoint : %v", sortedNameList[i])
}
}
}
+type rockCompactFilter struct {
+ rdb *RockDB
+ checkedCnt int64
+ cachedTimeSec int64
+ metric.CompactFilterStats
+}
+
+func (cf *rockCompactFilter) Name() string {
+ return "rockredis.compactfilter"
+}
+
+func (cf *rockCompactFilter) Stats() metric.CompactFilterStats {
+ var s metric.CompactFilterStats
+ s.ExpiredCleanCnt = atomic.LoadInt64(&cf.ExpiredCleanCnt)
+ s.VersionCleanCnt = atomic.LoadInt64(&cf.VersionCleanCnt)
+ s.DelCleanCnt = atomic.LoadInt64(&cf.DelCleanCnt)
+ return s
+}
+
+func (cf *rockCompactFilter) lazyExpireCheck(h headerMetaValue, curCnt int64) bool {
+ if h.ExpireAt == 0 {
+ return false
+ }
+ if h.ExpireAt <= minExpiredPossible {
+ dbLog.Infof("db %s key %v has invalid small expired timestamp: %v", cf.rdb.GetDataDir(), h.UserData, h.ExpireAt)
+ return false
+ }
+ // avoid call now() too much, we cache the time for a while
+ ts := atomic.LoadInt64(&cf.cachedTimeSec)
+ if curCnt > int64(timeUpdateFreq) || ts <= 0 {
+ ts = time.Now().Unix()
+ atomic.StoreInt64(&cf.cachedTimeSec, ts)
+ atomic.StoreInt64(&cf.checkedCnt, 0)
+ }
+ if int64(h.ExpireAt)+lazyCleanExpired.Nanoseconds()/int64(time.Second) < ts {
+ //dbLog.Debugf("db %s key %v clean since expired timestamp: %v, %v", cf.rdb.GetDataDir(), h.UserData, h.ExpireAt, ts)
+ atomic.AddInt64(&cf.ExpiredCleanCnt, 1)
+ return true
+ }
+ return false
+}
+
+func (cf *rockCompactFilter) Filter(level int, key, value []byte) (bool, []byte) {
+ // dbLog.Debugf("db %v level %v compacting: %v, %s", cf.rdb.GetDataDir(), level, key, value)
+ // check key type
+ if len(key) < 1 {
+ return false, nil
+ }
+ newCnt := atomic.AddInt64(&cf.checkedCnt, 1)
+ switch key[0] {
+ case KVType, HSizeType, LMetaType, SSizeType, ZSizeType, BitmapMetaType:
+ var h headerMetaValue
+ _, err := h.decode(value)
+ if err != nil {
+ return false, nil
+ }
+ return cf.lazyExpireCheck(h, newCnt), nil
+ case HashType, ListType, SetType, ZSetType, ZScoreType, BitmapType:
+ dt, rawKey, ver, err := convertCollDBKeyToRawKey(key)
+ if err != nil {
+ return false, nil
+ }
+ if ver == 0 {
+ return false, nil
+ }
+ // the version is timestamp in nano, check lazy expire
+ ts := atomic.LoadInt64(&cf.cachedTimeSec)
+ if int64(ver)+lazyCleanExpired.Nanoseconds() >= ts*int64(time.Second) {
+ return false, nil
+ }
+ metak, err := encodeMetaKey(dt, rawKey)
+ if err != nil {
+ return false, nil
+ }
+ // maybe cache the meta value if the collection has too many subkeys
+ metav, err := cf.rdb.GetBytesNoLock(metak)
+ if err != nil {
+ return false, nil
+ }
+ if metav == nil {
+ // collection meta not found, it means the whole collection is deleted
+ //dbLog.Debugf("db %s key %v clean since meta not exist: %v, %v, %v", cf.rdb.GetDataDir(), key, metak, rawKey, ver)
+ atomic.AddInt64(&cf.DelCleanCnt, 1)
+ return true, nil
+ }
+ var h headerMetaValue
+ _, err = h.decode(metav)
+ if err != nil {
+ return false, nil
+ }
+ if h.ValueVersion == 0 {
+ // maybe no version?
+ return false, nil
+ }
+ // how to lazy clean for version mismatch?
+ if h.ValueVersion != ver {
+ //dbLog.Debugf("db %s key %v clean since mismatch version: %v, %v", cf.rdb.GetDataDir(), key, h, ver)
+ atomic.AddInt64(&cf.VersionCleanCnt, 1)
+ return true, nil
+ }
+ return cf.lazyExpireCheck(h, newCnt), nil
+ }
+ return false, nil
+}
+
type RockDB struct {
expiration
- cfg *RockConfig
- eng *gorocksdb.DB
- dbOpts *gorocksdb.Options
- defaultWriteOpts *gorocksdb.WriteOptions
- defaultReadOpts *gorocksdb.ReadOptions
- wb *gorocksdb.WriteBatch
- lruCache *gorocksdb.Cache
- rl *gorocksdb.RateLimiter
+ cfg *RockRedisDBConfig
+ rockEng engine.KVEngine
+ wb engine.WriteBatch
+ writeTmpBuf []byte
quit chan struct{}
wg sync.WaitGroup
backupC chan *BackupInfo
- engOpened int32
indexMgr *IndexMgr
isBatching int32
- checkpointDirLock sync.Mutex
+ checkpointDirLock sync.RWMutex
hasher64 hash.Hash64
hllCache *hllCache
stopping int32
+ engOpened int32
+ latestSnapIndex uint64
+ topLargeCollKeys *metric.CollSizeHeap
+ compactFilter *rockCompactFilter
}
-func OpenRockDB(cfg *RockConfig) (*RockDB, error) {
- if len(cfg.DataDir) == 0 {
- return nil, errors.New("config error")
- }
-
- if cfg.DisableWAL {
- cfg.DefaultWriteOpts.DisableWAL(true)
- }
- os.MkdirAll(cfg.DataDir, common.DIR_PERM)
- // options need be adjust due to using hdd or sdd, please reference
- // https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide
- bbto := gorocksdb.NewDefaultBlockBasedTableOptions()
- // use large block to reduce index block size for hdd
- // if using ssd, should use the default value
- bbto.SetBlockSize(cfg.BlockSize)
- // should about 20% less than host RAM
- // http://smalldatum.blogspot.com/2016/09/tuning-rocksdb-block-cache.html
- var lru *gorocksdb.Cache
- if cfg.RockOptions.UseSharedCache {
- if cfg.SharedConfig == nil || cfg.SharedConfig.SharedCache == nil {
- return nil, errors.New("missing shared cache instance")
- }
- bbto.SetBlockCache(cfg.SharedConfig.SharedCache)
- dbLog.Infof("use shared cache: %v", cfg.SharedConfig.SharedCache)
- } else {
- lru = gorocksdb.NewLRUCache(cfg.BlockCache)
- bbto.SetBlockCache(lru)
- }
- // cache index and filter blocks can save some memory,
- // if not cache, the index and filter will be pre-loaded in memory
- bbto.SetCacheIndexAndFilterBlocks(cfg.CacheIndexAndFilterBlocks)
- // /* filter should not block_based, use sst based to reduce cpu */
- filter := gorocksdb.NewBloomFilter(10, false)
- bbto.SetFilterPolicy(filter)
- opts := gorocksdb.NewDefaultOptions()
- // optimize filter for hit, use less memory since last level will has no bloom filter
- // opts.OptimizeFilterForHits(true)
- opts.SetBlockBasedTableFactory(bbto)
- if cfg.RockOptions.AdjustThreadPool {
- if cfg.SharedConfig == nil || cfg.SharedConfig.SharedEnv == nil {
- return nil, errors.New("missing shared env instance")
- }
- opts.SetEnv(cfg.SharedConfig.SharedEnv)
- dbLog.Infof("use shared env: %v", cfg.SharedConfig.SharedEnv)
- }
-
- var rl *gorocksdb.RateLimiter
- if cfg.RateBytesPerSec > 0 {
- if cfg.UseSharedRateLimiter {
- if cfg.SharedConfig == nil {
- return nil, errors.New("missing shared instance")
- }
- opts.SetRateLimiter(cfg.SharedConfig.SharedRateLimiter)
- dbLog.Infof("use shared rate limiter: %v", cfg.SharedConfig.SharedRateLimiter)
- } else {
- rl = gorocksdb.NewGenericRateLimiter(cfg.RateBytesPerSec, 100*1000, 10)
- opts.SetRateLimiter(rl)
- }
- }
-
- opts.SetCreateIfMissing(true)
- opts.SetMaxOpenFiles(-1)
- // keep level0_file_num_compaction_trigger * write_buffer_size * min_write_buffer_number_tomerge = max_bytes_for_level_base to minimize write amplification
- opts.SetWriteBufferSize(cfg.WriteBufferSize)
- opts.SetMaxWriteBufferNumber(cfg.MaxWriteBufferNumber)
- opts.SetMinWriteBufferNumberToMerge(cfg.MinWriteBufferNumberToMerge)
- opts.SetLevel0FileNumCompactionTrigger(cfg.Level0FileNumCompactionTrigger)
- opts.SetMaxBytesForLevelBase(cfg.MaxBytesForLevelBase)
- opts.SetTargetFileSizeBase(cfg.TargetFileSizeBase)
- opts.SetMaxBackgroundFlushes(cfg.MaxBackgroundFlushes)
- opts.SetMaxBackgroundCompactions(cfg.MaxBackgroundCompactions)
- opts.SetMinLevelToCompress(cfg.MinLevelToCompress)
- // we use table, so we use prefix seek feature
- opts.SetPrefixExtractor(gorocksdb.NewFixedPrefixTransform(3))
- opts.SetMemtablePrefixBloomSizeRatio(0.1)
- opts.EnableStatistics()
- opts.SetMaxLogFileSize(1024 * 1024 * 32)
- opts.SetLogFileTimeToRoll(3600 * 24 * 3)
- opts.SetMaxManifestFileSize(cfg.MaxMainifestFileSize)
- opts.SetMaxSuccessiveMerges(1000)
- // https://github.com/facebook/mysql-5.6/wiki/my.cnf-tuning
- // rate limiter need to reduce the compaction io
- if !cfg.DisableMergeCounter {
- if cfg.EnableTableCounter {
- opts.SetUint64AddMergeOperator()
- }
- } else {
- cfg.EnableTableCounter = false
+func OpenRockDB(cfg *RockRedisDBConfig) (*RockDB, error) {
+ eng, err := engine.NewKVEng(&cfg.RockEngConfig)
+ if err != nil {
+ return nil, err
}
db := &RockDB{
cfg: cfg,
- dbOpts: opts,
- lruCache: lru,
- rl: rl,
- defaultReadOpts: cfg.DefaultReadOpts,
- defaultWriteOpts: cfg.DefaultWriteOpts,
- wb: gorocksdb.NewWriteBatch(),
+ rockEng: eng,
+ writeTmpBuf: make([]byte, writeTmpSize),
backupC: make(chan *BackupInfo),
quit: make(chan struct{}),
hasher64: murmur3.New64(),
+ topLargeCollKeys: metric.NewCollSizeHeap(metric.DefaultHeapCapacity),
}
switch cfg.ExpirationPolicy {
- case common.ConsistencyDeletion:
- db.expiration = newConsistencyExpiration(db)
-
case common.LocalDeletion:
db.expiration = newLocalExpiration(db)
-
- //TODO
- //case common.PeriodicalRotation:
+ case common.WaitCompact:
+ db.expiration = newCompactExpiration(db)
+ // register the compact callback to lazy clean the expired data
+ db.RegisterCompactCallback()
default:
return nil, errors.New("unsupported ExpirationPolicy")
}
- err := db.reOpenEng()
+ err = db.reOpenEng()
if err != nil {
return nil, err
}
- os.MkdirAll(db.GetBackupDir(), common.DIR_PERM)
- dbLog.Infof("rocksdb opened: %v", db.GetDataDir())
+ if !cfg.ReadOnly {
+ os.MkdirAll(db.GetBackupDir(), common.DIR_PERM)
+ }
db.wg.Add(1)
go func() {
@@ -419,103 +336,119 @@ func GetBackupDir(base string) string {
return path.Join(base, "rocksdb_backup")
}
-func (r *RockDB) CheckExpiredData(buffer common.ExpiredDataBuffer, stop chan struct{}) error {
- if r.cfg.ExpirationPolicy != common.ConsistencyDeletion {
- return fmt.Errorf("can not check expired data at the expiration-policy:%d", r.cfg.ExpirationPolicy)
- }
- return r.expiration.check(buffer, stop)
+func GetBackupDirForRemote(base string) string {
+ return path.Join(base, "rocksdb_backup", "remote")
+}
+
+func (r *RockDB) SetLatestSnapIndex(i uint64) {
+ atomic.StoreUint64(&r.latestSnapIndex, i)
}
func (r *RockDB) GetBackupBase() string {
return r.cfg.DataDir
}
+func (r *RockDB) GetBackupDirForRemote() string {
+ return GetBackupDirForRemote(r.cfg.DataDir)
+}
+
func (r *RockDB) GetBackupDir() string {
return GetBackupDir(r.cfg.DataDir)
}
-func GetDataDirFromBase(base string) string {
- return path.Join(base, "rocksdb")
+func (r *RockDB) GetDataDir() string {
+ return r.rockEng.GetDataDir()
}
-func (r *RockDB) GetDataDir() string {
- return path.Join(r.cfg.DataDir, "rocksdb")
+func (r *RockDB) GetCompactFilterStats() metric.CompactFilterStats {
+ if r.compactFilter != nil {
+ return r.compactFilter.Stats()
+ }
+ return metric.CompactFilterStats{}
+}
+
+func (r *RockDB) RegisterCompactCallback() {
+ filter := &rockCompactFilter{
+ rdb: r,
+ }
+ r.compactFilter = filter
+ r.rockEng.SetCompactionFilter(filter)
}
func (r *RockDB) reOpenEng() error {
var err error
- hcache, err := newHLLCache(HLLCacheSize, r)
+ hcache, err := newHLLCache(HLLReadCacheSize, HLLWriteCacheSize, r)
if err != nil {
return err
}
r.hllCache = hcache
-
- r.eng, err = gorocksdb.OpenDb(r.dbOpts, r.GetDataDir())
r.indexMgr = NewIndexMgr()
+
+ err = r.rockEng.OpenEng()
if err != nil {
return err
}
+ r.wb = r.rockEng.DefaultWriteBatch()
+
err = r.indexMgr.LoadIndexes(r)
if err != nil {
- dbLog.Infof("rocksdb %v load index failed: %v", r.GetDataDir(), err)
- r.eng.Close()
+ dbLog.Warningf("rocksdb %v load index failed: %v", r.GetDataDir(), err)
+ r.rockEng.CloseEng()
return err
}
-
- r.expiration.Start()
+ if r.expiration != nil && !r.cfg.ReadOnly {
+ r.expiration.Start()
+ }
atomic.StoreInt32(&r.engOpened, 1)
- dbLog.Infof("rocksdb reopened: %v", r.GetDataDir())
return nil
}
-func (r *RockDB) getDBEng() *gorocksdb.DB {
- e := r.eng
- return e
-}
-
func (r *RockDB) getIndexer() *IndexMgr {
e := r.indexMgr
return e
}
-func (r *RockDB) CompactRange() {
- var rg gorocksdb.Range
- r.eng.CompactRange(rg)
+func (r *RockDB) SetMaxBackgroundOptions(maxCompact int, maxBackJobs int) error {
+ return r.rockEng.SetMaxBackgroundOptions(maxCompact, maxBackJobs)
}
-// [start, end)
-func (r *RockDB) CompactTableRange(table string) {
- dts := []byte{KVType, HashType, ListType, SetType, ZSetType}
- dtsMeta := []byte{KVType, HSizeType, LMetaType, SSizeType, ZSizeType}
- for i, dt := range dts {
- rgs, err := getTableDataRange(dt, []byte(table), nil, nil)
- if err != nil {
- dbLog.Infof("failed to build dt %v data range: %v", dt, err)
- continue
- }
- // compact data range
- dbLog.Infof("compacting dt %v data range: %v", dt, rgs)
- for _, rg := range rgs {
- r.eng.CompactRange(rg)
- }
- // compact meta range
- minKey, maxKey, err := getTableMetaRange(dtsMeta[i], []byte(table), nil, nil)
- var rg gorocksdb.Range
- rg.Start = minKey
- rg.Limit = maxKey
- dbLog.Infof("compacting dt %v meta range: %v, %v", dt, minKey, maxKey)
- r.eng.CompactRange(rg)
- }
+// interrupt the manual compact to avoid stall too long?
+func (r *RockDB) DisableManualCompact(disable bool) {
+ r.rockEng.DisableManualCompact(disable)
+}
+
+func (r *RockDB) CompactAllRange() {
+ r.rockEng.CompactAllRange()
+}
+
+func (r *RockDB) CompactOldExpireData() {
+ now := time.Now().Unix()
+ minKey := expEncodeTimeKey(NoneType, nil, 0)
+ maxKey := expEncodeTimeKey(maxDataType, nil, now)
+ r.CompactRange(minKey, maxKey)
+}
+
+func (r *RockDB) CompactRange(minKey []byte, maxKey []byte) {
+ var rg engine.CRange
+ rg.Start = minKey
+ rg.Limit = maxKey
+ dbLog.Infof("compacting range: %v, %v", minKey, maxKey)
+ r.rockEng.CompactRange(rg)
}
func (r *RockDB) closeEng() {
- if r.eng != nil {
- if atomic.CompareAndSwapInt32(&r.engOpened, 1, 0) {
+ if atomic.CompareAndSwapInt32(&r.engOpened, 1, 0) {
+ if r.hllCache != nil {
r.hllCache.Flush()
+ }
+ if r.indexMgr != nil {
r.indexMgr.Close()
+ }
+ if r.expiration != nil {
r.expiration.Stop()
- r.eng.Close()
- dbLog.Infof("rocksdb engine closed: %v", r.GetDataDir())
+ }
+ if r.rockEng != nil {
+ r.rockEng.CloseEng()
}
}
}
@@ -531,36 +464,111 @@ func (r *RockDB) Close() {
r.expiration.Destroy()
r.expiration = nil
}
- if r.defaultReadOpts != nil {
- r.defaultReadOpts.Destroy()
- r.defaultReadOpts = nil
- }
- if r.defaultWriteOpts != nil {
- r.defaultWriteOpts.Destroy()
- }
- if r.wb != nil {
- r.wb.Destroy()
- }
- if r.dbOpts != nil {
- r.dbOpts.Destroy()
- r.dbOpts = nil
- }
- if r.lruCache != nil {
- r.lruCache.Destroy()
- r.lruCache = nil
- }
- if r.rl != nil {
- r.rl.Destroy()
- r.rl = nil
+ if r.rockEng != nil {
+ r.rockEng.CloseAll()
}
dbLog.Infof("rocksdb %v closed", r.cfg.DataDir)
}
+func (r *RockDB) GetInternalStatus() map[string]interface{} {
+ return r.rockEng.GetInternalStatus()
+}
+
+func (r *RockDB) GetInternalPropertyStatus(p string) string {
+ return r.rockEng.GetInternalPropertyStatus(p)
+}
+
func (r *RockDB) GetStatistics() string {
- return r.dbOpts.GetStatistics()
+ return r.rockEng.GetStatistics()
+}
+
+func (r *RockDB) GetTopLargeKeys() []metric.TopNInfo {
+ return r.topLargeCollKeys.TopKeys()
+}
+
+func (r *RockDB) GetBytesNoLock(key []byte) ([]byte, error) {
+ return r.rockEng.GetBytesNoLock(key)
+}
+
+func (r *RockDB) GetBytes(key []byte) ([]byte, error) {
+ return r.rockEng.GetBytes(key)
+}
+
+func (r *RockDB) MultiGetBytes(keyList [][]byte, values [][]byte, errs []error) {
+ r.rockEng.MultiGetBytes(keyList, values, errs)
+}
+
+func (r *RockDB) Exist(key []byte) (bool, error) {
+ return r.rockEng.Exist(key)
+}
+
+func (r *RockDB) ExistNoLock(key []byte) (bool, error) {
+ return r.rockEng.ExistNoLock(key)
+}
+
+// make sure close all the iterator before do any write on engine, since it may have lock on read/iterator
+func (r *RockDB) NewDBRangeIterator(min []byte, max []byte, rtype uint8,
+ reverse bool) (*engine.RangeLimitedIterator, error) {
+ opts := engine.IteratorOpts{
+ Reverse: reverse,
+ }
+ opts.Max = max
+ opts.Min = min
+ opts.Type = rtype
+ return engine.NewDBRangeIteratorWithOpts(r.rockEng, opts)
+}
+
+func (r *RockDB) NewDBRangeLimitIterator(min []byte, max []byte, rtype uint8,
+ offset int, count int, reverse bool) (*engine.RangeLimitedIterator, error) {
+ opts := engine.IteratorOpts{
+ Reverse: reverse,
+ }
+ opts.Max = max
+ opts.Min = min
+ opts.Type = rtype
+ opts.Offset = offset
+ opts.Count = count
+ return engine.NewDBRangeLimitIteratorWithOpts(r.rockEng, opts)
+}
+
+func (r *RockDB) NewDBRangeIteratorWithOpts(opts engine.IteratorOpts) (*engine.RangeLimitedIterator, error) {
+ return engine.NewDBRangeIteratorWithOpts(r.rockEng, opts)
}
-func getTableDataRange(dt byte, table []byte, start, end []byte) ([]gorocksdb.Range, error) {
+func (r *RockDB) NewDBRangeLimitIteratorWithOpts(opts engine.IteratorOpts) (*engine.RangeLimitedIterator, error) {
+ return engine.NewDBRangeLimitIteratorWithOpts(r.rockEng, opts)
+}
+
+// [start, end)
+func (r *RockDB) CompactTableRange(table string) {
+ dts := []byte{KVType, HashType, ListType, SetType, ZSetType}
+ dtsMeta := []byte{KVType, HSizeType, LMetaType, SSizeType, ZSizeType}
+ for i, dt := range dts {
+ rgs, err := getTableDataRange(dt, []byte(table), nil, nil)
+ if err != nil {
+ dbLog.Infof("failed to build dt %v data range: %v", dt, err)
+ continue
+ }
+ // compact data range
+ dbLog.Infof("compacting dt %v data range: %v", dt, rgs)
+ for _, rg := range rgs {
+ r.rockEng.CompactRange(rg)
+ }
+ // compact meta range
+ minKey, maxKey, err := getTableMetaRange(dtsMeta[i], []byte(table), nil, nil)
+ if err != nil {
+ dbLog.Infof("failed to get table %v data range: %v", table, err)
+ continue
+ }
+ var rg engine.CRange
+ rg.Start = minKey
+ rg.Limit = maxKey
+ dbLog.Infof("compacting dt %v meta range: %v, %v", dt, minKey, maxKey)
+ r.rockEng.CompactRange(rg)
+ }
+}
+
+func getTableDataRange(dt byte, table []byte, start, end []byte) ([]engine.CRange, error) {
minKey, err := encodeFullScanMinKey(dt, table, start, nil)
if err != nil {
dbLog.Infof("failed to build dt %v range: %v", dt, err)
@@ -576,8 +584,8 @@ func getTableDataRange(dt byte, table []byte, start, end []byte) ([]gorocksdb.Ra
dbLog.Infof("failed to build dt %v range: %v", dt, err)
return nil, err
}
- rgs := make([]gorocksdb.Range, 0, 2)
- rgs = append(rgs, gorocksdb.Range{Start: minKey, Limit: maxKey})
+ rgs := make([]engine.CRange, 0, 2)
+ rgs = append(rgs, engine.CRange{Start: minKey, Limit: maxKey})
if dt == ZSetType {
// zset has key-score-member data except the key-member data
zminKey := zEncodeStartKey(table, start)
@@ -587,7 +595,7 @@ func getTableDataRange(dt byte, table []byte, start, end []byte) ([]gorocksdb.Ra
} else {
zmaxKey = zEncodeStopKey(table, end)
}
- rgs = append(rgs, gorocksdb.Range{Start: zminKey, Limit: zmaxKey})
+ rgs = append(rgs, engine.CRange{Start: zminKey, Limit: zmaxKey})
}
dbLog.Debugf("table dt %v data range: %v", dt, rgs)
return rgs, nil
@@ -625,7 +633,7 @@ func (r *RockDB) DeleteTableRange(dryrun bool, table string, start []byte, end [
if tidx != nil {
return errors.New("drop table with any index is not supported currently")
}
- wb := gorocksdb.NewWriteBatch()
+ wb := r.rockEng.NewWriteBatch()
defer wb.Destroy()
// kv, hash, set, list, zset
dts := []byte{KVType, HashType, ListType, SetType, ZSetType}
@@ -650,6 +658,7 @@ func (r *RockDB) DeleteTableRange(dryrun bool, table string, start []byte, end [
continue
}
for _, rg := range rgs {
+ r.rockEng.DeleteFilesInRange(rg)
wb.DeleteRange(rg.Start, rg.Limit)
}
wb.DeleteRange(minMetaKey, maxMetaKey)
@@ -661,7 +670,7 @@ func (r *RockDB) DeleteTableRange(dryrun bool, table string, start []byte, end [
if dryrun {
return nil
}
- err := r.eng.Write(r.defaultWriteOpts, wb)
+ err := r.rockEng.Write(wb)
if err != nil {
dbLog.Infof("failed to delete table %v range: %v", table, err)
}
@@ -694,7 +703,7 @@ func (r *RockDB) GetTablesSizes(tables []string) []int64 {
func (r *RockDB) GetTableSizeInRange(table string, start []byte, end []byte) int64 {
dts := []byte{KVType, HashType, ListType, SetType, ZSetType}
dtsMeta := []byte{KVType, HSizeType, LMetaType, SSizeType, ZSizeType}
- rgs := make([]gorocksdb.Range, 0, len(dts))
+ rgs := make([]engine.CRange, 0, len(dts))
for i, dt := range dts {
// data range
drgs, err := getTableDataRange(dt, []byte(table), start, end)
@@ -709,12 +718,12 @@ func (r *RockDB) GetTableSizeInRange(table string, start []byte, end []byte) int
dbLog.Infof("failed to build dt %v meta range: %v", dt, err)
continue
}
- var rgMeta gorocksdb.Range
+ var rgMeta engine.CRange
rgMeta.Start = minMetaKey
rgMeta.Limit = maxMetaKey
rgs = append(rgs, rgMeta)
}
- sList := r.eng.GetApproximateSizes(rgs, true)
+ sList := r.rockEng.GetApproximateSizes(rgs, true)
dbLog.Debugf("range %v sizes: %v", rgs, sList)
total := uint64(0)
for _, ss := range sList {
@@ -723,21 +732,19 @@ func (r *RockDB) GetTableSizeInRange(table string, start []byte, end []byte) int
return int64(total)
}
+func (r *RockDB) GetApproximateTotalNum() int64 {
+ return int64(r.rockEng.GetApproximateTotalKeyNum())
+}
+
// [start, end)
func (r *RockDB) GetTableApproximateNumInRange(table string, start []byte, end []byte) int64 {
- numStr := r.eng.GetProperty("rocksdb.estimate-num-keys")
- num, err := strconv.Atoi(numStr)
- if err != nil {
- dbLog.Infof("total keys num error: %v, %v", numStr, err)
- return 0
- }
+ num := r.rockEng.GetApproximateTotalKeyNum()
if num <= 0 {
- dbLog.Debugf("total keys num zero: %v", numStr)
return 0
}
dts := []byte{KVType, HashType, ListType, SetType, ZSetType}
dtsMeta := []byte{KVType, HSizeType, LMetaType, SSizeType, ZSizeType}
- rgs := make([]gorocksdb.Range, 0, len(dts))
+ rgs := make([]engine.CRange, 0, len(dts))
for i, dt := range dts {
// meta range
minMetaKey, maxMetaKey, err := getTableMetaRange(dtsMeta[i], []byte(table), start, end)
@@ -745,20 +752,20 @@ func (r *RockDB) GetTableApproximateNumInRange(table string, start []byte, end [
dbLog.Infof("failed to build dt %v meta range: %v", dt, err)
continue
}
- var rgMeta gorocksdb.Range
+ var rgMeta engine.CRange
rgMeta.Start = minMetaKey
rgMeta.Limit = maxMetaKey
rgs = append(rgs, rgMeta)
}
- filteredRgs := make([]gorocksdb.Range, 0, len(dts))
- sList := r.eng.GetApproximateSizes(rgs, true)
+ filteredRgs := make([]engine.CRange, 0, len(dts))
+ sList := r.rockEng.GetApproximateSizes(rgs, true)
for i, s := range sList {
if s > 0 {
filteredRgs = append(filteredRgs, rgs[i])
}
}
- keyNum := int64(r.eng.GetApproximateKeyNum(filteredRgs))
- dbLog.Debugf("total db key num: %v, table key num %v, %v", num, keyNum, sList)
+ keyNum := int64(r.rockEng.GetApproximateKeyNum(filteredRgs))
+ dbLog.Debugf("total db key num: %v, table %s key num %v, %v", num, table, keyNum, sList)
// use GetApproximateSizes and estimate-keys-num in property
// refer: https://github.com/facebook/mysql-5.6/commit/4ca34d2498e8d16ede73a7955d1ab101a91f102f
// range records = estimate-keys-num * GetApproximateSizes(range) / GetApproximateSizes (total)
@@ -766,30 +773,6 @@ func (r *RockDB) GetTableApproximateNumInRange(table string, start []byte, end [
return int64(keyNum)
}
-func (r *RockDB) GetInternalStatus() map[string]interface{} {
- status := make(map[string]interface{})
- bbt := r.dbOpts.GetBlockBasedTableFactory()
- if bbt != nil {
- bc := bbt.GetBlockCache()
- if bc != nil {
- status["block-cache-usage"] = bc.GetUsage()
- status["block-cache-pinned-usage"] = bc.GetPinnedUsage()
- }
- }
-
- memStr := r.eng.GetProperty("rocksdb.estimate-table-readers-mem")
- status["estimate-table-readers-mem"] = memStr
- memStr = r.eng.GetProperty("rocksdb.cur-size-all-mem-tables")
- status["cur-size-all-mem-tables"] = memStr
- memStr = r.eng.GetProperty("rocksdb.cur-size-active-mem-table")
- status["cur-size-active-mem-tables"] = memStr
- return status
-}
-
-func (r *RockDB) GetInternalPropertyStatus(p string) string {
- return r.eng.GetProperty(p)
-}
-
type BackupInfo struct {
backupDir string
started chan struct{}
@@ -836,7 +819,7 @@ func (r *RockDB) backupLoop() {
defer close(rsp.done)
dbLog.Infof("begin backup to:%v \n", rsp.backupDir)
start := time.Now()
- ck, err := gorocksdb.NewCheckpoint(r.eng)
+ ck, err := r.rockEng.NewCheckpoint(false)
if err != nil {
dbLog.Infof("init checkpoint failed: %v", err)
rsp.err = err
@@ -850,16 +833,7 @@ func (r *RockDB) backupLoop() {
os.RemoveAll(rsp.backupDir)
}
rsp.rsp = []byte(rsp.backupDir)
- r.eng.RLock()
- if r.eng.IsOpened() {
- time.AfterFunc(time.Millisecond*10, func() {
- close(rsp.started)
- })
- err = ck.Save(rsp.backupDir, math.MaxUint64)
- } else {
- err = errors.New("db engine closed")
- }
- r.eng.RUnlock()
+ err = ck.Save(rsp.backupDir, rsp.started)
r.checkpointDirLock.Unlock()
if err != nil {
dbLog.Infof("save checkpoint failed: %v", err)
@@ -868,11 +842,17 @@ func (r *RockDB) backupLoop() {
}
cost := time.Now().Sub(start)
dbLog.Infof("backup done (cost %v), check point to: %v\n", cost.String(), rsp.backupDir)
- // purge some old checkpoint
- r.checkpointDirLock.Lock()
- purgeOldCheckpoint(MAX_CHECKPOINT_NUM, r.GetBackupDir())
- r.checkpointDirLock.Unlock()
}()
+ // purge some old checkpoint
+ r.checkpointDirLock.Lock()
+ keepNum := MaxCheckpointNum
+ if r.cfg.KeepBackup > 0 {
+ keepNum = r.cfg.KeepBackup
+ }
+ // avoid purge the checkpoint in the raft snapshot
+ purgeOldCheckpoint(keepNum, r.GetBackupDir(), atomic.LoadUint64(&r.latestSnapIndex))
+ purgeOldCheckpoint(MaxRemoteCheckpointNum, r.GetBackupDirForRemote(), math.MaxUint64-1)
+ r.checkpointDirLock.Unlock()
case <-r.quit:
return
}
@@ -893,7 +873,12 @@ func (r *RockDB) Backup(term uint64, index uint64) *BackupInfo {
}
func (r *RockDB) IsLocalBackupOK(term uint64, index uint64) (bool, error) {
- backupDir := r.GetBackupDir()
+ r.checkpointDirLock.RLock()
+ defer r.checkpointDirLock.RUnlock()
+ return r.isBackupOKInPath(r.GetBackupDir(), term, index)
+}
+
+func (r *RockDB) isBackupOKInPath(backupDir string, term uint64, index uint64) (bool, error) {
checkpointDir := GetCheckpointDir(term, index)
fullPath := path.Join(backupDir, checkpointDir)
_, err := os.Stat(fullPath)
@@ -901,18 +886,16 @@ func (r *RockDB) IsLocalBackupOK(term uint64, index uint64) (bool, error) {
dbLog.Infof("checkpoint not exist: %v", fullPath)
return false, err
}
+ if r.rockEng == nil {
+ return false, errDBClosed
+ }
dbLog.Infof("begin check local checkpoint : %v", fullPath)
defer dbLog.Infof("check local checkpoint : %v done", fullPath)
- r.checkpointDirLock.Lock()
- defer r.checkpointDirLock.Unlock()
- ro := *r.dbOpts
- ro.SetCreateIfMissing(false)
- db, err := gorocksdb.OpenDbForReadOnly(&ro, fullPath, false)
+ err = r.rockEng.CheckDBEngForRead(fullPath)
if err != nil {
dbLog.Infof("checkpoint open failed: %v", err)
return false, err
}
- db.Close()
return true, nil
}
@@ -939,34 +922,112 @@ func copyFile(src, dst string, override bool) error {
return err
}
defer in.Close()
+ // we remove dst to avoid override the hard link file content which may affect the origin linked file
+ err = os.Remove(dst)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return err
+ }
+ }
out, err := os.Create(dst)
if err != nil {
return err
}
+ defer func() {
+ cerr := out.Close()
+ if err == nil {
+ err = cerr
+ }
+ }()
_, err = io.Copy(out, in)
if err != nil {
- out.Close()
return err
}
err = out.Sync()
+ return err
+}
+
+func (r *RockDB) RestoreFromRemoteBackup(term uint64, index uint64) error {
+ // check if there is the same term-index backup on local
+ // if not, we can just rename remote snap to this name.
+ // if already exist, we need handle rename
+ checkpointDir := GetCheckpointDir(term, index)
+ remotePath := path.Join(r.GetBackupDirForRemote(), checkpointDir)
+ _, err := os.Stat(remotePath)
if err != nil {
- out.Close()
+ dbLog.Infof("apply remote snap failed since backup data error: %v", err)
return err
}
- return out.Close()
+ err = r.restoreFromPath(r.GetBackupDirForRemote(), term, index)
+ return err
}
func (r *RockDB) Restore(term uint64, index uint64) error {
- // write meta (snap term and index) and check the meta data in the backup
backupDir := r.GetBackupDir()
- hasBackup, _ := r.IsLocalBackupOK(term, index)
+ return r.restoreFromPath(backupDir, term, index)
+}
+func isSameSSTFile(f1 string, f2 string) error {
+ stat1, err1 := os.Stat(f1)
+ stat2, err2 := os.Stat(f2)
+ if err1 != nil || err2 != nil {
+ return fmt.Errorf("sst files not match err: %v, %v", err1, err2)
+ }
+ if stat1.Size() != stat2.Size() {
+ return fmt.Errorf("sst files mismatch size: %v, %v", stat1, stat2)
+ }
+ // check if same file before read data
+ if os.SameFile(stat1, stat2) {
+ return nil
+ }
+ // sst meta is stored at the footer of file
+ // we check 256KB is enough for footer
+ rbytes := int64(256 * 1024)
+ roffset := stat1.Size() - rbytes
+ if roffset < 0 {
+ roffset = 0
+ rbytes = stat1.Size()
+ }
+ fs1, err1 := os.Open(f1)
+ fs2, err2 := os.Open(f2)
+ if err1 != nil || err2 != nil {
+ return fmt.Errorf("sst files not match err: %v, %v", err1, err2)
+ }
+ b1 := make([]byte, rbytes)
+ n1, err1 := fs1.ReadAt(b1, roffset)
+ if err1 != nil {
+ if err1 != io.EOF {
+ return fmt.Errorf("read file err: %v", err1)
+ }
+ }
+ b2 := make([]byte, rbytes)
+ n2, err2 := fs2.ReadAt(b2, roffset)
+ if err1 != nil {
+ if err1 != io.EOF {
+ return fmt.Errorf("read file err: %v", err1)
+ }
+ }
+ if n2 != n1 {
+ return fmt.Errorf("sst file footer not match")
+ }
+ // TODO: maybe add more check on header and middle of file
+ if bytes.Equal(b1[:n1], b2[:n2]) {
+ return nil
+ }
+ return fmt.Errorf("sst file footer not match")
+}
+
+func (r *RockDB) restoreFromPath(backupDir string, term uint64, index uint64) error {
+ // write meta (snap term and index) and check the meta data in the backup
+ r.checkpointDirLock.RLock()
+ defer r.checkpointDirLock.RUnlock()
+ hasBackup, _ := r.isBackupOKInPath(backupDir, term, index)
if !hasBackup {
return errors.New("no backup for restore")
}
checkpointDir := GetCheckpointDir(term, index)
start := time.Now()
- dbLog.Infof("begin restore from checkpoint: %v\n", checkpointDir)
+ dbLog.Infof("begin restore from checkpoint: %v-%v\n", backupDir, checkpointDir)
r.closeEng()
select {
case <-r.quit:
@@ -1000,19 +1061,15 @@ func (r *RockDB) Restore(term uint64, index uint64) error {
if strings.HasPrefix(shortName, "LOG") {
continue
}
+
if strings.HasSuffix(shortName, ".sst") {
if fullName, ok := ckSstNameMap[shortName]; ok {
- stat1, err1 := os.Stat(fullName)
- stat2, err2 := os.Stat(fn)
- if err1 == nil && err2 == nil {
- if stat1.Size() == stat2.Size() {
- dbLog.Infof("keeping sst file: %v", fn)
- continue
- } else {
- dbLog.Infof("no keeping sst file %v for mismatch size: %v, %v", fn, stat1, stat2)
- }
+ err = isSameSSTFile(fullName, fn)
+ if err == nil {
+ dbLog.Infof("keeping sst file: %v", fn)
+ continue
} else {
- dbLog.Infof("no keeping sst file %v for err: %v, %v", fn, err1, err2)
+ dbLog.Infof("no keeping sst file %v for not same: %v", fn, err)
}
}
}
@@ -1025,7 +1082,12 @@ func (r *RockDB) Restore(term uint64, index uint64) error {
continue
}
dst := path.Join(r.GetDataDir(), path.Base(fn))
- err := copyFile(fn, dst, false)
+ var err error
+ if strings.HasSuffix(fn, ".sst") {
+ err = common.CopyFileForHardLink(fn, dst)
+ } else {
+ err = common.CopyFile(fn, dst, true)
+ }
if err != nil {
dbLog.Infof("copy %v to %v failed: %v", fn, dst, err)
return err
@@ -1038,16 +1100,17 @@ func (r *RockDB) Restore(term uint64, index uint64) error {
dbLog.Infof("restore done, cost: %v\n", time.Now().Sub(start))
if err != nil {
dbLog.Infof("reopen the restored db failed: %v\n", err)
+ } else {
+ keepNum := MaxCheckpointNum
+ if r.cfg.KeepBackup > 0 {
+ keepNum = r.cfg.KeepBackup
+ }
+ purgeOldCheckpoint(keepNum, r.GetBackupDir(), atomic.LoadUint64(&r.latestSnapIndex))
+ purgeOldCheckpoint(MaxRemoteCheckpointNum, r.GetBackupDirForRemote(), math.MaxUint64-1)
}
return err
}
-func (r *RockDB) ClearBackup(term uint64, index uint64) error {
- backupDir := r.GetBackupDir()
- checkpointDir := GetCheckpointDir(term, index)
- return os.RemoveAll(path.Join(backupDir, checkpointDir))
-}
-
func (r *RockDB) GetIndexSchema(table string) (*common.IndexSchema, error) {
return r.indexMgr.GetIndexSchemaInfo(r, table)
}
@@ -1078,63 +1141,54 @@ func (r *RockDB) UpdateHsetIndexState(table string, hindex *common.HsetIndexSche
func (r *RockDB) BeginBatchWrite() error {
if atomic.CompareAndSwapInt32(&r.isBatching, 0, 1) {
- r.wb.Clear()
return nil
}
return errors.New("another batching is waiting")
}
-func (r *RockDB) MaybeClearBatch() {
- if atomic.LoadInt32(&r.isBatching) == 1 {
- return
- }
- r.wb.Clear()
-}
-
func (r *RockDB) MaybeCommitBatch() error {
if atomic.LoadInt32(&r.isBatching) == 1 {
return nil
}
- return r.eng.Write(r.defaultWriteOpts, r.wb)
+ err := r.rockEng.Write(r.wb)
+ r.wb.Clear()
+ return err
}
func (r *RockDB) CommitBatchWrite() error {
- err := r.eng.Write(r.defaultWriteOpts, r.wb)
+ err := r.rockEng.Write(r.wb)
if err != nil {
dbLog.Infof("commit write error: %v", err)
}
+ r.wb.Clear()
atomic.StoreInt32(&r.isBatching, 0)
return err
}
-func IsBatchableWrite(cmd string) bool {
- _, ok := batchableCmds[cmd]
- return ok
-}
-
-func SetPerfLevel(level int) {
- if level <= 0 || level > 4 {
- DisablePerfLevel()
- return
- }
- gorocksdb.SetPerfLevel(gorocksdb.PerfLevel(level))
+func (r *RockDB) AbortBatch() {
+ r.wb.Clear()
+ atomic.StoreInt32(&r.isBatching, 0)
}
-func IsPerfEnabledLevel(lv int) bool {
- if lv <= 0 || lv > 4 {
+func IsNeedAbortError(err error) bool {
+ // for the error which will not touch write batch no need abort
+ // since it will not affect the write buffer in batch
+ if err == errTooMuchBatchSize {
return false
}
- return lv != gorocksdb.PerfDisable
+ return true
}
-func DisablePerfLevel() {
- gorocksdb.SetPerfLevel(gorocksdb.PerfDisable)
+func IsBatchableWrite(cmd string) bool {
+ _, ok := batchableCmds[cmd]
+ return ok
}
func init() {
batchableCmds = make(map[string]bool)
// command need response value (not just error or ok) can not be batched
// batched command may cause the table count not-exactly.
+ // should use MaybeCommitBatch and MaybeClearBatch in command handler
batchableCmds["set"] = true
batchableCmds["setex"] = true
batchableCmds["del"] = true
diff --git a/rockredis/rockredis_test.go b/rockredis/rockredis_test.go
index 0f39cb94..666f110b 100644
--- a/rockredis/rockredis_test.go
+++ b/rockredis/rockredis_test.go
@@ -1,21 +1,55 @@
package rockredis
import (
+ "flag"
"fmt"
"io/ioutil"
"os"
+ "path"
+ "path/filepath"
"strconv"
"strings"
"testing"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
"github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/engine"
+ "github.com/youzan/ZanRedisDB/slow"
)
+const (
+ testEngineType = "rocksdb"
+)
+
+type testLogger struct {
+ t *testing.T
+}
+
+func newTestLogger(t *testing.T) *testLogger {
+ return &testLogger{t: t}
+}
+
+func (l *testLogger) Output(maxdepth int, s string) error {
+ l.t.Logf("%v:%v", time.Now().UnixNano(), s)
+ return nil
+}
+
+func (l *testLogger) OutputErr(maxdepth int, s string) error {
+ l.t.Logf("%v:%v", time.Now().UnixNano(), s)
+ return nil
+}
+
+func (l *testLogger) OutputWarning(maxdepth int, s string) error {
+ l.t.Logf("%v:%v", time.Now().UnixNano(), s)
+ return nil
+}
+
func getTestDBNoTableCounter(t *testing.T) *RockDB {
- cfg := NewRockConfig()
+ cfg := NewRockRedisDBConfig()
cfg.EnableTableCounter = false
+ cfg.EnablePartitionedIndexFilter = true
+ cfg.EngineType = testEngineType
var err error
cfg.DataDir, err = ioutil.TempDir("", fmt.Sprintf("rockredis-test-%d", time.Now().UnixNano()))
assert.Nil(t, err)
@@ -24,24 +58,31 @@ func getTestDBNoTableCounter(t *testing.T) *RockDB {
return testDB
}
-func getTestDBWithDir(t *testing.T, dataDir string) *RockDB {
- cfg := NewRockConfig()
+func getTestDBForBench() *RockDB {
+ cfg := NewRockRedisDBConfig()
cfg.EnableTableCounter = true
- cfg.DataDir = dataDir
+ cfg.EnablePartitionedIndexFilter = true
+ cfg.EngineType = testEngineType
+ var err error
+ cfg.DataDir, err = ioutil.TempDir("", fmt.Sprintf("rockredis-test-%d", time.Now().UnixNano()))
+ if err != nil {
+ panic(err)
+ }
testDB, err := OpenRockDB(cfg)
- assert.Nil(t, err)
- if testing.Verbose() {
- SetLogLevel(int32(4))
+ if err != nil {
+ panic(err)
}
return testDB
}
-func getTestDB(t *testing.T) *RockDB {
- cfg := NewRockConfig()
+func getTestDBWithDirType(t *testing.T, dataDir string, tp string) *RockDB {
+ cfg := NewRockRedisDBConfig()
cfg.EnableTableCounter = true
- var err error
- cfg.DataDir, err = ioutil.TempDir("", fmt.Sprintf("rockredis-test-%d", time.Now().UnixNano()))
- assert.Nil(t, err)
+ cfg.WriteBufferSize = 1000
+ cfg.MaxWriteBufferNumber = 1
+ cfg.EnablePartitionedIndexFilter = true
+ cfg.EngineType = tp
+ cfg.DataDir = dataDir
testDB, err := OpenRockDB(cfg)
assert.Nil(t, err)
if testing.Verbose() {
@@ -50,12 +91,119 @@ func getTestDB(t *testing.T) *RockDB {
return testDB
}
+func getTestDBWithDir(t *testing.T, dataDir string) *RockDB {
+ return getTestDBWithDirType(t, dataDir, testEngineType)
+}
+
+func getTestDB(t *testing.T) *RockDB {
+ dataDir, err := ioutil.TempDir("", fmt.Sprintf("rockredis-test-%d", time.Now().UnixNano()))
+ assert.Nil(t, err)
+ return getTestDBWithDirType(t, dataDir, testEngineType)
+}
+
+func TestMain(m *testing.M) {
+ SetLogger(int32(common.LOG_INFO), nil)
+ engine.SetLogger(int32(common.LOG_INFO), nil)
+ slow.SetLogger(int32(common.LOG_INFO), nil)
+ flag.Parse()
+ if testing.Verbose() {
+ SetLogger(int32(common.LOG_DEBUG), common.NewLogger())
+ SetLogLevel(int32(common.LOG_DETAIL))
+ engine.SetLogLevel(int32(common.LOG_DETAIL))
+ }
+ lazyCleanExpired = time.Second * 3
+ timeUpdateFreq = 1
+ ret := m.Run()
+ os.Exit(ret)
+}
+
func TestDB(t *testing.T) {
db := getTestDB(t)
defer os.RemoveAll(db.cfg.DataDir)
defer db.Close()
}
+func TestDBCompact(t *testing.T) {
+ db := getTestDB(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ key := []byte("test:test_kv_key")
+ value := []byte("value")
+ err := db.KVSet(0, key, value)
+ assert.Nil(t, err)
+ for i := 0; i < 100; i++ {
+ err := db.KVSet(0, []byte(string(key)+strconv.Itoa(i)), value)
+ assert.Nil(t, err)
+ }
+
+ v, err := db.KVGet(key)
+ assert.Nil(t, err)
+ assert.Equal(t, string(value), string(v))
+
+ db.CompactAllRange()
+
+ for i := 0; i < 50; i++ {
+ db.DelKeys([]byte(string(key) + strconv.Itoa(i)))
+ }
+
+ db.CompactAllRange()
+
+ v, err = db.KVGet(key)
+ assert.Nil(t, err)
+ assert.Equal(t, string(value), string(v))
+ err = db.SetMaxBackgroundOptions(10, 0)
+ assert.Nil(t, err)
+ err = db.SetMaxBackgroundOptions(0, 10)
+ assert.Nil(t, err)
+ err = db.SetMaxBackgroundOptions(10, 10)
+ assert.Nil(t, err)
+}
+
+func TestIsSameSST(t *testing.T) {
+ d1, err := ioutil.TempDir("", fmt.Sprintf("rockredis-test-%d", time.Now().UnixNano()))
+ assert.Nil(t, err)
+ defer os.RemoveAll(d1)
+ // small file, large file
+ f1small := path.Join(d1, "f1small")
+ f2small := path.Join(d1, "f2small")
+ f3small := path.Join(d1, "f3small")
+ err = ioutil.WriteFile(f1small, []byte("aaa"), 0655)
+ assert.Nil(t, err)
+ err = ioutil.WriteFile(f2small, []byte("aaa"), 0655)
+ err = ioutil.WriteFile(f3small, []byte("aab"), 0655)
+ assert.Nil(t, isSameSSTFile(f1small, f2small))
+ assert.NotNil(t, isSameSSTFile(f2small, f3small))
+ assert.NotNil(t, isSameSSTFile(f1small, f3small))
+ fileData := make([]byte, 1024*256*2)
+ for i := 0; i < len(fileData); i++ {
+ fileData[i] = 'a'
+ }
+
+ f1large := path.Join(d1, "f1large")
+ f2large := path.Join(d1, "f2large")
+ f3large := path.Join(d1, "f3large")
+ f4large := path.Join(d1, "f4large")
+ err = ioutil.WriteFile(f1large, fileData, 0655)
+ assert.Nil(t, err)
+ err = ioutil.WriteFile(f2large, fileData, 0655)
+ assert.Nil(t, err)
+ fileData[0] = 'b'
+ err = ioutil.WriteFile(f3large, fileData, 0655)
+ assert.Nil(t, err)
+ fileData[1024*256+1] = 'b'
+ err = ioutil.WriteFile(f4large, fileData, 0655)
+ assert.Nil(t, err)
+ assert.Nil(t, isSameSSTFile(f1large, f2large))
+ assert.Nil(t, isSameSSTFile(f1large, f3large))
+ assert.NotNil(t, isSameSSTFile(f1large, f4large))
+ assert.Nil(t, isSameSSTFile(f2large, f3large))
+ assert.NotNil(t, isSameSSTFile(f2large, f4large))
+ assert.NotNil(t, isSameSSTFile(f3large, f4large))
+ assert.NotNil(t, isSameSSTFile(f1small, f1large))
+ assert.NotNil(t, isSameSSTFile(f3small, f3large))
+}
+
func TestRockDB(t *testing.T) {
db := getTestDB(t)
defer os.RemoveAll(db.cfg.DataDir)
@@ -106,38 +254,30 @@ func TestRockDB(t *testing.T) {
assert.Equal(t, 2, len(vlist))
}
+func TestRockDBRevScanTableForHash(t *testing.T) {
+ testRockDBScanTableForHash(t, true)
+}
+
func TestRockDBScanTableForHash(t *testing.T) {
+ testRockDBScanTableForHash(t, false)
+}
+
+func testRockDBScanTableForHash(t *testing.T, reverse bool) {
db := getTestDB(t)
defer os.RemoveAll(db.cfg.DataDir)
defer db.Close()
total := 500
- keyList1 := make([][]byte, 0, total*2)
- keyList2 := make([][]byte, 0, total*2)
- for i := 0; i < total; i++ {
- keyList1 = append(keyList1, []byte("test:test_hash_scan_key"+strconv.Itoa(i)))
- keyList2 = append(keyList2, []byte("test2:test2_hash_scan_key"+strconv.Itoa(i)))
- }
- for i := 0; i < total; i++ {
- keyList1 = append(keyList1, []byte("test:test_hash_scan_key_longlonglonglonglonglong"+strconv.Itoa(i)))
- keyList2 = append(keyList2, []byte("test2:test2_hash_scan_key_longlonglonglonglonglong"+strconv.Itoa(i)))
- }
- for _, key := range keyList1 {
- _, err := db.HSet(0, false, key, []byte("a"), key)
+ keyList1, keyList2 := fillScanKeysForType(t, "hash", total, func(key []byte, prefix string) {
+ _, err := db.HSet(0, false, key, []byte(prefix+":a"), key)
assert.Nil(t, err)
- _, err = db.HSet(0, false, key, []byte("b"), key)
+ _, err = db.HSet(0, false, key, []byte(prefix+":b"), key)
assert.Nil(t, err)
- }
- for _, key := range keyList2 {
- _, err := db.HSet(0, false, key, []byte("a"), key)
- assert.Nil(t, err)
- _, err = db.HSet(0, false, key, []byte("b"), key)
- assert.Nil(t, err)
- }
+ })
minKey := encodeDataTableStart(HashType, []byte("test"))
maxKey := encodeDataTableEnd(HashType, []byte("test"))
- it, err := db.buildScanIterator(minKey, maxKey)
+ it, err := db.buildScanIterator(minKey, maxKey, reverse)
assert.Nil(t, err)
it.NoTimestamp(HashType)
func() {
@@ -147,7 +287,7 @@ func TestRockDBScanTableForHash(t *testing.T) {
table, k, f, err := hDecodeHashKey(it.Key())
assert.Nil(t, err)
assert.Equal(t, "test", string(table))
- if string(f) != "a" && string(f) != "b" {
+ if string(f) != "test:a" && string(f) != "test:b" {
t.Fatal("scan field mismatch: " + string(f))
}
assert.Equal(t, string(table)+":"+string(k), string(it.Value()))
@@ -158,7 +298,7 @@ func TestRockDBScanTableForHash(t *testing.T) {
minKey = encodeDataTableStart(HashType, []byte("test2"))
maxKey = encodeDataTableEnd(HashType, []byte("test2"))
- it, err = db.buildScanIterator(minKey, maxKey)
+ it, err = db.buildScanIterator(minKey, maxKey, reverse)
assert.Nil(t, err)
it.NoTimestamp(HashType)
func() {
@@ -168,7 +308,7 @@ func TestRockDBScanTableForHash(t *testing.T) {
table, k, f, err := hDecodeHashKey(it.Key())
assert.Nil(t, err)
assert.Equal(t, "test2", string(table))
- if string(f) != "a" && string(f) != "b" {
+ if string(f) != "test2:a" && string(f) != "test2:b" {
t.Fatal("scan field mismatch: " + string(f))
}
assert.Equal(t, string(table)+":"+string(k), string(it.Value()))
@@ -189,7 +329,7 @@ func TestRockDBScanTableForHash(t *testing.T) {
minKey = encodeDataTableStart(HashType, []byte("test"))
maxKey = encodeDataTableEnd(HashType, []byte("test"))
- it, err = db.buildScanIterator(minKey, maxKey)
+ it, err = db.buildScanIterator(minKey, maxKey, reverse)
assert.Nil(t, err)
it.NoTimestamp(HashType)
func() {
@@ -199,7 +339,7 @@ func TestRockDBScanTableForHash(t *testing.T) {
table, k, f, err := hDecodeHashKey(it.Key())
assert.Nil(t, err)
assert.Equal(t, "test", string(table))
- if string(f) != "a" && string(f) != "b" {
+ if string(f) != "test:a" && string(f) != "test:b" {
t.Fatal("scan field mismatch: " + string(f))
}
assert.Equal(t, string(table)+":"+string(k), string(it.Value()))
@@ -210,7 +350,7 @@ func TestRockDBScanTableForHash(t *testing.T) {
minKey = encodeDataTableStart(HashType, []byte("test2"))
maxKey = encodeDataTableEnd(HashType, []byte("test2"))
- it, err = db.buildScanIterator(minKey, maxKey)
+ it, err = db.buildScanIterator(minKey, maxKey, reverse)
assert.Nil(t, err)
it.NoTimestamp(HashType)
func() {
@@ -220,7 +360,7 @@ func TestRockDBScanTableForHash(t *testing.T) {
table, k, f, err := hDecodeHashKey(it.Key())
assert.Nil(t, err)
assert.Equal(t, "test2", string(table))
- if string(f) != "a" && string(f) != "b" {
+ if string(f) != "test2:a" && string(f) != "test2:b" {
t.Fatal("scan field mismatch: " + string(f))
}
assert.Equal(t, string(table)+":"+string(k), string(it.Value()))
@@ -237,36 +377,34 @@ func TestRockDBScanTableForHash(t *testing.T) {
t.Logf("test2 key number: %v, usage: %v", keyNum, diskUsage)
}
+func TestRockDBRevScanTableForList(t *testing.T) {
+ testRockDBScanTableForList(t, true)
+}
+
func TestRockDBScanTableForList(t *testing.T) {
+ testRockDBScanTableForList(t, false)
+}
+
+func testRockDBScanTableForList(t *testing.T, reverse bool) {
db := getTestDB(t)
defer os.RemoveAll(db.cfg.DataDir)
defer db.Close()
- keyList1 := make([][]byte, 0)
- keyList2 := make([][]byte, 0)
totalCnt := 50
- for i := 0; i < totalCnt; i++ {
- keyList1 = append(keyList1, []byte("test:test_list_scan_key"+strconv.Itoa(i)))
- keyList2 = append(keyList2, []byte("test2:test2_list_scan_key"+strconv.Itoa(i)))
- }
- for i := 0; i < totalCnt; i++ {
- keyList1 = append(keyList1, []byte("test:test_list_scan_key_longlonglonglonglonglong"+strconv.Itoa(i)))
- keyList2 = append(keyList2, []byte("test2:test2_list_scan_key_longlonglonglonglonglong"+strconv.Itoa(i)))
- }
- for _, key := range keyList1 {
+ keyList1, keyList2 := fillScanKeysForType(t, "list", totalCnt, func(key []byte, prefix string) {
_, err := db.LPush(0, key, key, key)
assert.Nil(t, err)
- }
- for _, key := range keyList2 {
- _, err := db.LPush(0, key, key, key)
- assert.Nil(t, err)
- }
+ })
minKey := encodeDataTableStart(ListType, []byte("test"))
maxKey := encodeDataTableEnd(ListType, []byte("test"))
- it, err := db.buildScanIterator(minKey, maxKey)
+ t.Logf("min: %v, max %v\n", minKey, maxKey)
+ it, err := db.buildScanIterator(minKey, maxKey, reverse)
assert.Nil(t, err)
func() {
+ if err != nil {
+ return
+ }
defer it.Close()
cnt := 0
for ; it.Valid(); it.Next() {
@@ -284,9 +422,12 @@ func TestRockDBScanTableForList(t *testing.T) {
minKey = encodeDataTableStart(ListType, []byte("test2"))
maxKey = encodeDataTableEnd(ListType, []byte("test2"))
- it, err = db.buildScanIterator(minKey, maxKey)
+ it, err = db.buildScanIterator(minKey, maxKey, reverse)
assert.Nil(t, err)
func() {
+ if err != nil {
+ return
+ }
defer it.Close()
cnt := 0
for ; it.Valid(); it.Next() {
@@ -315,7 +456,7 @@ func TestRockDBScanTableForList(t *testing.T) {
minKey = encodeDataTableStart(ListType, []byte("test"))
maxKey = encodeDataTableEnd(ListType, []byte("test"))
- it, err = db.buildScanIterator(minKey, maxKey)
+ it, err = db.buildScanIterator(minKey, maxKey, reverse)
assert.Nil(t, err)
func() {
defer it.Close()
@@ -335,9 +476,12 @@ func TestRockDBScanTableForList(t *testing.T) {
minKey = encodeDataTableStart(ListType, []byte("test2"))
maxKey = encodeDataTableEnd(ListType, []byte("test2"))
- it, err = db.buildScanIterator(minKey, maxKey)
+ it, err = db.buildScanIterator(minKey, maxKey, reverse)
assert.Nil(t, err)
func() {
+ if err != nil {
+ return
+ }
defer it.Close()
cnt := 0
for ; it.Valid(); it.Next() {
@@ -355,34 +499,28 @@ func TestRockDBScanTableForList(t *testing.T) {
}()
}
+func TestRockDBRevScanTableForSet(t *testing.T) {
+ testRockDBScanTableForSet(t, true)
+}
+
func TestRockDBScanTableForSet(t *testing.T) {
+ testRockDBScanTableForSet(t, false)
+}
+
+func testRockDBScanTableForSet(t *testing.T, reverse bool) {
db := getTestDB(t)
defer os.RemoveAll(db.cfg.DataDir)
defer db.Close()
- keyList1 := make([][]byte, 0)
- keyList2 := make([][]byte, 0)
totalCnt := 50
- for i := 0; i < totalCnt; i++ {
- keyList1 = append(keyList1, []byte("test:test_set_scan_key"+strconv.Itoa(i)))
- keyList2 = append(keyList2, []byte("test2:test2_set_scan_key"+strconv.Itoa(i)))
- }
- for i := 0; i < totalCnt; i++ {
- keyList1 = append(keyList1, []byte("test:test_set_scan_key_longlonglonglonglonglong"+strconv.Itoa(i)))
- keyList2 = append(keyList2, []byte("test2:test2_set_scan_key_longlonglonglonglonglong"+strconv.Itoa(i)))
- }
- for _, key := range keyList1 {
- _, err := db.SAdd(0, key, []byte("test:a"), []byte("test:b"))
- assert.Nil(t, err)
- }
- for _, key := range keyList2 {
- _, err := db.SAdd(0, key, []byte("test2:a"), []byte("test2:b"))
+ keyList1, keyList2 := fillScanKeysForType(t, "set", totalCnt, func(key []byte, prefix string) {
+ _, err := db.SAdd(0, key, []byte(prefix+":a"), []byte(prefix+":b"))
assert.Nil(t, err)
- }
+ })
minKey := encodeDataTableStart(SetType, []byte("test"))
maxKey := encodeDataTableEnd(SetType, []byte("test"))
- it, err := db.buildScanIterator(minKey, maxKey)
+ it, err := db.buildScanIterator(minKey, maxKey, reverse)
assert.Nil(t, err)
func() {
defer it.Close()
@@ -404,7 +542,7 @@ func TestRockDBScanTableForSet(t *testing.T) {
minKey = encodeDataTableStart(SetType, []byte("test2"))
maxKey = encodeDataTableEnd(SetType, []byte("test2"))
- it, err = db.buildScanIterator(minKey, maxKey)
+ it, err = db.buildScanIterator(minKey, maxKey, reverse)
assert.Nil(t, err)
func() {
defer it.Close()
@@ -436,7 +574,7 @@ func TestRockDBScanTableForSet(t *testing.T) {
minKey = encodeDataTableStart(SetType, []byte("test"))
maxKey = encodeDataTableEnd(SetType, []byte("test"))
- it, err = db.buildScanIterator(minKey, maxKey)
+ it, err = db.buildScanIterator(minKey, maxKey, reverse)
assert.Nil(t, err)
func() {
defer it.Close()
@@ -458,7 +596,7 @@ func TestRockDBScanTableForSet(t *testing.T) {
minKey = encodeDataTableStart(SetType, []byte("test2"))
maxKey = encodeDataTableEnd(SetType, []byte("test2"))
- it, err = db.buildScanIterator(minKey, maxKey)
+ it, err = db.buildScanIterator(minKey, maxKey, reverse)
assert.Nil(t, err)
func() {
defer it.Close()
@@ -486,37 +624,31 @@ func TestRockDBScanTableForSet(t *testing.T) {
t.Logf("test2 key number: %v, usage: %v", keyNum, diskUsage)
}
+func TestRockDBRevScanTableForZSet(t *testing.T) {
+ testRockDBScanTableForZSet(t, true)
+}
+
func TestRockDBScanTableForZSet(t *testing.T) {
+ testRockDBScanTableForZSet(t, false)
+}
+
+func testRockDBScanTableForZSet(t *testing.T, reverse bool) {
db := getTestDB(t)
defer os.RemoveAll(db.cfg.DataDir)
defer db.Close()
- keyList1 := make([][]byte, 0)
- keyList2 := make([][]byte, 0)
totalCnt := 50
- for i := 0; i < totalCnt; i++ {
- keyList1 = append(keyList1, []byte("test:test_zset_scan_key"+strconv.Itoa(i)))
- keyList2 = append(keyList2, []byte("test2:test2_zset_scan_key"+strconv.Itoa(i)))
- }
- for i := 0; i < totalCnt; i++ {
- keyList1 = append(keyList1, []byte("test:test_zset_scan_key_longlonglonglonglonglong"+strconv.Itoa(i)))
- keyList2 = append(keyList2, []byte("test2:test2_zset_scan_key_longlonglonglonglonglong"+strconv.Itoa(i)))
- }
- for _, key := range keyList1 {
- _, err := db.ZAdd(0, key, common.ScorePair{1, []byte("test:a")},
- common.ScorePair{2, []byte("test:b")})
- assert.Nil(t, err)
- }
- for _, key := range keyList2 {
- _, err := db.ZAdd(0, key, common.ScorePair{1, []byte("test2:a")},
- common.ScorePair{2, []byte("test2:b")})
+
+ keyList1, keyList2 := fillScanKeysForType(t, "zset", totalCnt, func(key []byte, prefix string) {
+ _, err := db.ZAdd(0, key, common.ScorePair{1, []byte(prefix + ":a")},
+ common.ScorePair{2, []byte(prefix + ":b")})
assert.Nil(t, err)
- }
+ })
minKey := encodeDataTableStart(ZScoreType, []byte("test"))
maxKey := encodeDataTableEnd(ZScoreType, []byte("test"))
t.Logf("scan test : %v, %v", minKey, maxKey)
- it, err := db.buildScanIterator(minKey, maxKey)
+ it, err := db.buildScanIterator(minKey, maxKey, reverse)
assert.Nil(t, err)
func() {
defer it.Close()
@@ -542,7 +674,7 @@ func TestRockDBScanTableForZSet(t *testing.T) {
minKey = encodeDataTableStart(ZScoreType, []byte("test2"))
maxKey = encodeDataTableEnd(ZScoreType, []byte("test2"))
- it, err = db.buildScanIterator(minKey, maxKey)
+ it, err = db.buildScanIterator(minKey, maxKey, reverse)
assert.Nil(t, err)
func() {
defer it.Close()
@@ -578,7 +710,7 @@ func TestRockDBScanTableForZSet(t *testing.T) {
minKey = encodeDataTableStart(ZScoreType, []byte("test"))
maxKey = encodeDataTableEnd(ZScoreType, []byte("test"))
- it, err = db.buildScanIterator(minKey, maxKey)
+ it, err = db.buildScanIterator(minKey, maxKey, reverse)
assert.Nil(t, err)
func() {
defer it.Close()
@@ -604,7 +736,7 @@ func TestRockDBScanTableForZSet(t *testing.T) {
minKey = encodeDataTableStart(ZScoreType, []byte("test2"))
maxKey = encodeDataTableEnd(ZScoreType, []byte("test2"))
- it, err = db.buildScanIterator(minKey, maxKey)
+ it, err = db.buildScanIterator(minKey, maxKey, reverse)
assert.Nil(t, err)
func() {
defer it.Close()
@@ -634,3 +766,190 @@ func TestRockDBScanTableForZSet(t *testing.T) {
diskUsage = db.GetTableSizeInRange("test2", nil, nil)
t.Logf("test2 key number: %v, usage: %v", keyNum, diskUsage)
}
+
+func Test_purgeOldCheckpoint(t *testing.T) {
+ tmpDir, err := ioutil.TempDir("", fmt.Sprintf("sm-test-%d", time.Now().UnixNano()))
+ assert.Nil(t, err)
+ defer os.RemoveAll(tmpDir)
+ t.Logf("dir:%v\n", tmpDir)
+ term := uint64(0x011a)
+ index := uint64(0x0c000)
+ cntIdx := 25
+
+ type args struct {
+ keepNum int
+ checkpointDir string
+ latestSnapIndex uint64
+ }
+ tests := []struct {
+ name string
+ args args
+ }{
+ {"keep0_1", args{0, "keep0_1dir", index + 1}},
+ {"keep0_2", args{0, "keep0_2dir", index + 2}},
+ {"keep0_10", args{0, "keep0_10dir", index + 10}},
+ {"keep0_max", args{0, "keep0_maxdir", index + uint64(cntIdx)}},
+ {"keep1_1", args{1, "keep1_1dir", index + 1}},
+ {"keep1_2", args{1, "keep1_2dir", index + 2}},
+ {"keep1_10", args{1, "keep1_10dir", index + 10}},
+ {"keep1_max", args{1, "keep1_maxdir", index + uint64(cntIdx)}},
+ {"keep10_1", args{10, "keep10_1dir", index + 1}},
+ {"keep10_2", args{10, "keep10_2dir", index + 2}},
+ {"keep10_10", args{10, "keep10_10dir", index + 10}},
+ {"keep10_max", args{10, "keep10_maxdir", index + uint64(cntIdx)}},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ checkDir := path.Join(tmpDir, tt.args.checkpointDir)
+ fns := make([]string, 0, cntIdx)
+ fnIndexes := make([]uint64, 0, cntIdx)
+ for j := 0; j < cntIdx; j++ {
+ idx := index + uint64(j)
+ p := path.Join(checkDir, fmt.Sprintf("%04x-%05x", term, idx))
+ err := os.MkdirAll(p, 0755)
+ assert.Nil(t, err)
+ fns = append(fns, p)
+ fnIndexes = append(fnIndexes, idx)
+ }
+ purgeOldCheckpoint(tt.args.keepNum, checkDir, tt.args.latestSnapIndex)
+ for i, fn := range fns {
+ _, err := os.Stat(fn)
+ t.Logf("checking file: %v, %v", fn, err)
+ if int64(fnIndexes[i]) >= int64(tt.args.latestSnapIndex)-int64(tt.args.keepNum) {
+ assert.Nil(t, err)
+ continue
+ }
+ assert.True(t, os.IsNotExist(err), "should not keep")
+ }
+ })
+ }
+}
+
+func TestRockDBRecovery(t *testing.T) {
+ // test restore should overwrite new local data and have the restored data
+ dataDir, err := ioutil.TempDir("", fmt.Sprintf("rockredis-test-%d", time.Now().UnixNano()))
+ assert.Nil(t, err)
+ db := getTestDBWithDirType(t, dataDir, "rocksdb")
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ t.Log(db.cfg.DataDir)
+ key := []byte("test:test_kv_recovery")
+ value := []byte("value")
+ err = db.KVSet(0, key, value)
+ assert.Nil(t, err)
+ wcnt := 50000
+ for i := 0; i < wcnt; i++ {
+ expectedV := []byte(string(key) + strconv.Itoa(i))
+ err := db.KVSet(0, expectedV, expectedV)
+ assert.Nil(t, err)
+ }
+ db.CompactAllRange()
+
+ v, err := db.KVGet(key)
+ assert.Nil(t, err)
+ assert.Equal(t, string(value), string(v))
+ bi := db.Backup(1, 1)
+ _, err = bi.GetResult()
+ assert.Nil(t, err)
+ checkpointDir := GetCheckpointDir(1, 1)
+ fullBackupPath := path.Join(db.GetBackupDir(), checkpointDir)
+ // copy file to make the backup unlink from local to test recover
+ backupfiles, _ := filepath.Glob(path.Join(fullBackupPath, "*"))
+ var backupSSTfiles []string
+ for _, f := range backupfiles {
+ fi, _ := os.Stat(f)
+ ofi, _ := os.Stat(path.Join(db.GetDataDir(), path.Base(f)))
+ if strings.HasSuffix(f, ".sst") {
+ assert.True(t, os.SameFile(fi, ofi))
+ dst := f + ".tmp"
+ common.CopyFile(f, dst, true)
+ os.Remove(f)
+ os.Rename(dst, f)
+ fi, _ = os.Stat(f)
+ assert.False(t, os.SameFile(fi, ofi))
+ backupSSTfiles = append(backupSSTfiles, f)
+ } else {
+ assert.False(t, os.SameFile(fi, ofi))
+ assert.Nil(t, isSameSSTFile(f, path.Join(db.GetDataDir(), path.Base(f))))
+ }
+ }
+
+ ok, err := db.IsLocalBackupOK(1, 1)
+ assert.Nil(t, err)
+ assert.True(t, ok)
+
+ // write new data to local to test overwrite for restore
+ err = db.KVSet(0, key, []byte("changed"))
+ assert.Nil(t, err)
+
+ v, err = db.KVGet(key)
+ assert.Nil(t, err)
+ assert.Equal(t, "changed", string(v))
+
+ err = db.restoreFromPath(db.GetBackupDir(), 1, 1)
+ assert.Nil(t, err)
+ localfiles, _ := filepath.Glob(path.Join(db.GetDataDir(), "*.sst"))
+ assert.Equal(t, len(localfiles), len(backupSSTfiles))
+ assert.True(t, len(localfiles) >= 1, localfiles)
+ for _, f := range backupfiles {
+ if strings.HasPrefix(f, "LOG") {
+ continue
+ }
+ fi, err := os.Stat(f)
+ assert.Nil(t, err)
+ lfi, err := os.Stat(path.Join(db.GetDataDir(), path.Base(f)))
+ assert.Nil(t, err)
+ if strings.HasSuffix(f, "sst") {
+ assert.True(t, os.SameFile(fi, lfi))
+ } else {
+ assert.False(t, os.SameFile(fi, lfi))
+ assert.Nil(t, isSameSSTFile(f, path.Join(db.GetDataDir(), path.Base(f))))
+ }
+ }
+
+ v, err = db.KVGet(key)
+ assert.Nil(t, err)
+ assert.Equal(t, string(value), string(v))
+ for i := 0; i < wcnt; i++ {
+ expectedV := []byte(string(key) + strconv.Itoa(i))
+ v, err := db.KVGet(expectedV)
+ assert.Nil(t, err)
+ assert.Equal(t, string(expectedV), string(v))
+ }
+
+ err = db.KVSet(0, key, []byte("changed"))
+ assert.Nil(t, err)
+
+ err = db.restoreFromPath(db.GetBackupDir(), 1, 1)
+ assert.Nil(t, err)
+
+ assert.Equal(t, len(localfiles), len(backupSSTfiles))
+ assert.True(t, len(localfiles) >= 1, localfiles)
+ for _, f := range backupfiles {
+ if strings.HasPrefix(f, "LOG") {
+ continue
+ }
+ fi, err := os.Stat(f)
+ assert.Nil(t, err)
+ lfi, err := os.Stat(path.Join(db.GetDataDir(), path.Base(f)))
+ assert.Nil(t, err)
+ if strings.HasSuffix(f, "sst") {
+ assert.True(t, os.SameFile(fi, lfi))
+ } else {
+ assert.False(t, os.SameFile(fi, lfi))
+ assert.Nil(t, isSameSSTFile(f, path.Join(db.GetDataDir(), path.Base(f))))
+ }
+ }
+
+ v, err = db.KVGet(key)
+ assert.Nil(t, err)
+ assert.Equal(t, string(value), string(v))
+ for i := 0; i < wcnt; i++ {
+ expectedV := []byte(string(key) + strconv.Itoa(i))
+ v, err := db.KVGet(expectedV)
+ assert.Nil(t, err)
+ assert.Equal(t, string(expectedV), string(v))
+ }
+}
diff --git a/rockredis/scan.go b/rockredis/scan.go
index e0e1345d..891110a6 100644
--- a/rockredis/scan.go
+++ b/rockredis/scan.go
@@ -2,28 +2,30 @@ package rockredis
import (
"errors"
+ "time"
- "github.com/absolute8511/ZanRedisDB/common"
"github.com/gobwas/glob"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/engine"
)
var errDataType = errors.New("error data type")
var errMetaKey = errors.New("error meta key")
-func (db *RockDB) Scan(dataType common.DataType, cursor []byte, count int, match string) ([][]byte, error) {
+func (db *RockDB) Scan(dataType common.DataType, cursor []byte, count int, match string, reverse bool) ([][]byte, error) {
storeDataType, err := getDataStoreType(dataType)
if err != nil {
return nil, err
}
- return db.scanGeneric(storeDataType, cursor, count, match)
+ return db.scanGeneric(storeDataType, cursor, count, match, reverse)
}
-func (db *RockDB) ScanWithBuffer(dataType common.DataType, cursor []byte, count int, match string, buffer [][]byte) ([][]byte, error) {
+func (db *RockDB) ScanWithBuffer(dataType common.DataType, cursor []byte, count int, match string, buffer [][]byte, reverse bool) ([][]byte, error) {
storeDataType, err := getDataStoreType(dataType)
if err != nil {
return nil, err
}
- return db.scanGenericUseBuffer(storeDataType, cursor, count, match, buffer)
+ return db.scanGenericUseBuffer(storeDataType, cursor, count, match, buffer, reverse)
}
func getDataStoreType(dataType common.DataType) (byte, error) {
@@ -65,6 +67,7 @@ func getCommonDataType(dataType byte) (common.DataType, error) {
}
return commonDataType, nil
}
+
func buildMatchRegexp(match string) (glob.Glob, error) {
var err error
var r glob.Glob
@@ -78,17 +81,27 @@ func buildMatchRegexp(match string) (glob.Glob, error) {
return r, nil
}
-func (db *RockDB) buildScanIterator(minKey []byte, maxKey []byte) (*RangeLimitedIterator, error) {
+func (db *RockDB) buildScanIterator(minKey []byte, maxKey []byte, reverse bool) (*engine.RangeLimitedIterator, error) {
tp := common.RangeOpen
- return NewDBRangeIterator(db.eng, minKey, maxKey, tp, false)
+ return db.NewDBRangeIterator(minKey, maxKey, tp, reverse)
}
-func buildScanKeyRange(storeDataType byte, key []byte) (minKey []byte, maxKey []byte, err error) {
- if minKey, err = encodeScanMinKey(storeDataType, key); err != nil {
- return
- }
- if maxKey, err = encodeScanMaxKey(storeDataType, nil); err != nil {
- return
+func buildScanKeyRange(storeDataType byte, key []byte, reverse bool) (minKey []byte, maxKey []byte, err error) {
+ if reverse {
+ // reverse we should make current key as end if key is nil
+ if maxKey, err = encodeScanKey(storeDataType, key); err != nil {
+ return
+ }
+ if minKey, err = encodeScanMinKey(storeDataType, nil); err != nil {
+ return
+ }
+ } else {
+ if minKey, err = encodeScanMinKey(storeDataType, key); err != nil {
+ return
+ }
+ if maxKey, err = encodeScanMaxKey(storeDataType, nil); err != nil {
+ return
+ }
}
return
}
@@ -120,20 +133,7 @@ func encodeScanKeyTableEnd(storeDataType byte, key []byte) ([]byte, error) {
}
func encodeScanKey(storeDataType byte, key []byte) ([]byte, error) {
- switch storeDataType {
- case KVType:
- return encodeKVKey(key), nil
- case LMetaType:
- return lEncodeMetaKey(key), nil
- case HSizeType:
- return hEncodeSizeKey(key), nil
- case ZSizeType:
- return zEncodeSizeKey(key), nil
- case SSizeType:
- return sEncodeSizeKey(key), nil
- default:
- return nil, errDataType
- }
+ return encodeMetaKey(storeDataType, key)
}
func decodeScanKey(storeDataType byte, ek []byte) (key []byte, err error) {
@@ -166,23 +166,24 @@ func checkScanCount(count int) int {
// note: this scan will not stop while cross table, it will scan begin from key until count or no more in db.
func (db *RockDB) scanGenericUseBuffer(storeDataType byte, key []byte, count int,
- match string, inputBuffer [][]byte) ([][]byte, error) {
+ match string, inputBuffer [][]byte, reverse bool) ([][]byte, error) {
r, err := buildMatchRegexp(match)
if err != nil {
return nil, err
}
- minKey, maxKey, err := buildScanKeyRange(storeDataType, key)
+ minKey, maxKey, err := buildScanKeyRange(storeDataType, key, reverse)
if err != nil {
return nil, err
}
dbLog.Debugf("scan range: %v, %v", minKey, maxKey)
count = checkScanCount(count)
- it, err := db.buildScanIterator(minKey, maxKey)
+ it, err := db.buildScanIterator(minKey, maxKey, reverse)
if err != nil {
return nil, err
}
+ defer it.Close()
var v [][]byte
if inputBuffer != nil {
@@ -201,38 +202,47 @@ func (db *RockDB) scanGenericUseBuffer(storeDataType byte, key []byte, count int
i++
}
}
- it.Close()
return v, nil
}
func (db *RockDB) scanGeneric(storeDataType byte, key []byte, count int,
- match string) ([][]byte, error) {
+ match string, reverse bool) ([][]byte, error) {
- return db.scanGenericUseBuffer(storeDataType, key, count, match, nil)
+ return db.scanGenericUseBuffer(storeDataType, key, count, match, nil, reverse)
}
// for special data scan
-func buildSpecificDataScanKeyRange(storeDataType byte, key []byte, cursor []byte) (minKey []byte, maxKey []byte, err error) {
- if minKey, err = encodeSpecificDataScanMinKey(storeDataType, key, cursor); err != nil {
- return
- }
- if maxKey, err = encodeSpecificDataScanMaxKey(storeDataType, key, nil); err != nil {
- return
+func buildSpecificDataScanKeyRange(storeDataType byte, table []byte, key []byte, cursor []byte, reverse bool) (minKey []byte, maxKey []byte, err error) {
+ if reverse {
+ // for reverse, we need use current cursor as the end if cursor is nil
+ if maxKey, err = encodeSpecificDataScanKey(storeDataType, table, key, cursor); err != nil {
+ return
+ }
+ if minKey, err = encodeSpecificDataScanMinKey(storeDataType, table, key, nil); err != nil {
+ return
+ }
+ } else {
+ if minKey, err = encodeSpecificDataScanMinKey(storeDataType, table, key, cursor); err != nil {
+ return
+ }
+ if maxKey, err = encodeSpecificDataScanMaxKey(storeDataType, table, key, nil); err != nil {
+ return
+ }
}
return
}
-func encodeSpecificDataScanMinKey(storeDataType byte, key []byte, cursor []byte) ([]byte, error) {
- return encodeSpecificDataScanKey(storeDataType, key, cursor)
+func encodeSpecificDataScanMinKey(storeDataType byte, table []byte, key []byte, cursor []byte) ([]byte, error) {
+ return encodeSpecificDataScanKey(storeDataType, table, key, cursor)
}
-func encodeSpecificDataScanMaxKey(storeDataType byte, key []byte, cursor []byte) ([]byte, error) {
+func encodeSpecificDataScanMaxKey(storeDataType byte, table []byte, key []byte, cursor []byte) ([]byte, error) {
if len(cursor) > 0 {
- return encodeSpecificDataScanKey(storeDataType, key, cursor)
+ return encodeSpecificDataScanKey(storeDataType, table, key, cursor)
}
- k, err := encodeSpecificDataScanKey(storeDataType, key, nil)
+ k, err := encodeSpecificDataScanKey(storeDataType, table, key, nil)
if err != nil {
return nil, err
}
@@ -241,11 +251,7 @@ func encodeSpecificDataScanMaxKey(storeDataType byte, key []byte, cursor []byte)
return k, nil
}
-func encodeSpecificDataScanKey(storeDataType byte, key []byte, cursor []byte) ([]byte, error) {
- table, rk, err := extractTableFromRedisKey(key)
- if err != nil {
- return nil, err
- }
+func encodeSpecificDataScanKey(storeDataType byte, table []byte, rk []byte, cursor []byte) ([]byte, error) {
switch storeDataType {
case HashType:
@@ -260,19 +266,20 @@ func encodeSpecificDataScanKey(storeDataType byte, key []byte, cursor []byte) ([
}
func (db *RockDB) buildSpecificDataScanIterator(storeDataType byte,
- key []byte, cursor []byte,
- count int) (*RangeLimitedIterator, error) {
+ table []byte, key []byte, cursor []byte,
+ count int, reverse bool) (*engine.RangeLimitedIterator, error) {
if err := checkKeySize(key); err != nil {
return nil, err
}
- minKey, maxKey, err := buildSpecificDataScanKeyRange(storeDataType, key, cursor)
+ minKey, maxKey, err := buildSpecificDataScanKeyRange(storeDataType, table, key, cursor, reverse)
if err != nil {
return nil, err
}
- it, err := db.buildScanIterator(minKey, maxKey)
+ dbLog.Debugf("scan data %v range: %v, %v, %v", storeDataType, minKey, maxKey, reverse)
+ it, err := db.buildScanIterator(minKey, maxKey, reverse)
if err != nil {
return nil, err
@@ -280,16 +287,24 @@ func (db *RockDB) buildSpecificDataScanIterator(storeDataType byte,
return it, nil
}
-func (db *RockDB) hScanGeneric(key []byte, cursor []byte, count int, match string) ([]common.KVRecord, error) {
+func (db *RockDB) hScanGeneric(key []byte, cursor []byte, count int, match string, reverse bool) ([]common.KVRecord, error) {
count = checkScanCount(count)
r, err := buildMatchRegexp(match)
if err != nil {
return nil, err
}
-
v := make([]common.KVRecord, 0, count)
- it, err := db.buildSpecificDataScanIterator(HashType, key, cursor, count)
+ tn := time.Now().UnixNano()
+ keyInfo, err := db.GetCollVersionKey(tn, HashType, key, true)
+ if err != nil {
+ return nil, err
+ }
+ if keyInfo.IsNotExistOrExpired() {
+ return v, nil
+ }
+
+ it, err := db.buildSpecificDataScanIterator(HashType, keyInfo.Table, keyInfo.VerKey, cursor, count, reverse)
if err != nil {
return nil, err
}
@@ -309,19 +324,29 @@ func (db *RockDB) hScanGeneric(key []byte, cursor []byte, count int, match strin
return v, nil
}
-func (db *RockDB) HScan(key []byte, cursor []byte, count int, match string) ([]common.KVRecord, error) {
- return db.hScanGeneric(key, cursor, count, match)
+func (db *RockDB) HScan(key []byte, cursor []byte, count int, match string, reverse bool) ([]common.KVRecord, error) {
+ return db.hScanGeneric(key, cursor, count, match, reverse)
}
-func (db *RockDB) sScanGeneric(key []byte, cursor []byte, count int, match string) ([][]byte, error) {
+func (db *RockDB) sScanGeneric(key []byte, cursor []byte, count int, match string, reverse bool) ([][]byte, error) {
count = checkScanCount(count)
r, err := buildMatchRegexp(match)
if err != nil {
return nil, err
}
+
+ // TODO: use pool for large alloc
v := make([][]byte, 0, count)
+ tn := time.Now().UnixNano()
+ keyInfo, err := db.GetCollVersionKey(tn, SetType, key, true)
+ if err != nil {
+ return nil, err
+ }
+ if keyInfo.IsNotExistOrExpired() {
+ return v, nil
+ }
- it, err := db.buildSpecificDataScanIterator(SetType, key, cursor, count)
+ it, err := db.buildSpecificDataScanIterator(SetType, keyInfo.Table, keyInfo.VerKey, cursor, count, reverse)
if err != nil {
return nil, err
}
@@ -341,11 +366,11 @@ func (db *RockDB) sScanGeneric(key []byte, cursor []byte, count int, match strin
return v, nil
}
-func (db *RockDB) SScan(key []byte, cursor []byte, count int, match string) ([][]byte, error) {
- return db.sScanGeneric(key, cursor, count, match)
+func (db *RockDB) SScan(key []byte, cursor []byte, count int, match string, reverse bool) ([][]byte, error) {
+ return db.sScanGeneric(key, cursor, count, match, reverse)
}
-func (db *RockDB) zScanGeneric(key []byte, cursor []byte, count int, match string) ([]common.ScorePair, error) {
+func (db *RockDB) zScanGeneric(key []byte, cursor []byte, count int, match string, reverse bool) ([]common.ScorePair, error) {
count = checkScanCount(count)
r, err := buildMatchRegexp(match)
@@ -353,9 +378,18 @@ func (db *RockDB) zScanGeneric(key []byte, cursor []byte, count int, match strin
return nil, err
}
+ tn := time.Now().UnixNano()
+ keyInfo, err := db.GetCollVersionKey(tn, ZSetType, key, true)
+ if err != nil {
+ return nil, err
+ }
+ // TODO: use pool for large alloc
v := make([]common.ScorePair, 0, count)
+ if keyInfo.IsNotExistOrExpired() {
+ return v, nil
+ }
- it, err := db.buildSpecificDataScanIterator(ZSetType, key, cursor, count)
+ it, err := db.buildSpecificDataScanIterator(ZSetType, keyInfo.Table, keyInfo.VerKey, cursor, count, reverse)
if err != nil {
return nil, err
}
@@ -369,7 +403,7 @@ func (db *RockDB) zScanGeneric(key []byte, cursor []byte, count int, match strin
continue
}
- score, err := Float64(it.Value(), nil)
+ score, err := Float64(it.RefValue(), nil)
if err != nil {
return nil, err
}
@@ -380,6 +414,6 @@ func (db *RockDB) zScanGeneric(key []byte, cursor []byte, count int, match strin
return v, nil
}
-func (db *RockDB) ZScan(key []byte, cursor []byte, count int, match string) ([]common.ScorePair, error) {
- return db.zScanGeneric(key, cursor, count, match)
+func (db *RockDB) ZScan(key []byte, cursor []byte, count int, match string, reverse bool) ([]common.ScorePair, error) {
+ return db.zScanGeneric(key, cursor, count, match, reverse)
}
diff --git a/rockredis/scan_test.go b/rockredis/scan_test.go
new file mode 100644
index 00000000..ee3ec104
--- /dev/null
+++ b/rockredis/scan_test.go
@@ -0,0 +1,317 @@
+package rockredis
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/common"
+)
+
+func fillScanKeysForType(t *testing.T, dt string, total int, insertFunc func([]byte, string)) ([][]byte, [][]byte) {
+ keyList1 := make([][]byte, 0, total*2)
+ keyList2 := make([][]byte, 0, total*2)
+ for i := 0; i < total; i++ {
+ k1 := fmt.Sprintf("test:test_%s_scan_key_%05d", dt, i)
+ k2 := fmt.Sprintf("test2:test2_%s_scan_key_%05d", dt, i)
+ keyList1 = append(keyList1, []byte(k1))
+ keyList2 = append(keyList2, []byte(k2))
+ }
+ for i := 0; i < total; i++ {
+ k1 := fmt.Sprintf("test:test_%s_scan_key_longlonglonglonglonglong_%05d", dt, i)
+ k2 := fmt.Sprintf("test2:test2_%s_scan_key_longlonglonglonglonglong_%05d", dt, i)
+ keyList1 = append(keyList1, []byte(k1))
+ keyList2 = append(keyList2, []byte(k2))
+ }
+ for _, key := range keyList1 {
+ insertFunc(key, "test")
+ }
+ for _, key := range keyList2 {
+ insertFunc(key, "test2")
+ }
+ return keyList1, keyList2
+}
+
+func runAndCheckScan(t *testing.T, db *RockDB, dataType common.DataType, total int, keyList1 [][]byte, keyList2 [][]byte) {
+ allKeys := make([][]byte, 0, total*2*2)
+ allKeys = append(allKeys, keyList2...)
+ allKeys = append(allKeys, keyList1...)
+ allRevKeys := make([][]byte, 0, total*2*2)
+ for i := len(allKeys) - 1; i >= 0; i-- {
+ allRevKeys = append(allRevKeys, allKeys[i])
+ }
+ revKeyList2 := make([][]byte, 0, total*2)
+ for i := len(keyList2) - 1; i >= 0; i-- {
+ revKeyList2 = append(revKeyList2, keyList2[i])
+ }
+ type args struct {
+ dataType common.DataType
+ cursor []byte
+ count int
+ match string
+ reverse bool
+ }
+ tests := []struct {
+ name string
+ args args
+ want [][]byte
+ wantErr bool
+ }{
+ {"scan_test", args{dataType, []byte("test:"), total * 4, "", false}, keyList1, false},
+ {"scan_test2", args{dataType, []byte("test2:"), total * 4, "", false}, allKeys, false},
+ {"scan_test_limit", args{dataType, []byte("test:"), total, "", false}, keyList1[:total], false},
+ {"revscan_test", args{dataType, []byte("test:"), total * 4, "", true}, revKeyList2, false},
+ {"revscan_test2", args{dataType, []byte("test2:"), total * 4, "", true}, make([][]byte, 0), false},
+ {"revscan_test_all", args{dataType, keyList1[len(keyList1)-1], total * 4, "", true}, allRevKeys[1:], false},
+ {"revscan_test_all_limit", args{dataType, keyList1[len(keyList1)-1], total, "", true}, allRevKeys[1 : total+1], false},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := db.Scan(tt.args.dataType, tt.args.cursor, tt.args.count, tt.args.match, tt.args.reverse)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("RockDB.Scan() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if len(got) != len(tt.want) {
+ t.Errorf("RockDB.Scan() length = %v, want %v", len(got), len(tt.want))
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("RockDB.Scan() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestRockDB_ScanKV(t *testing.T) {
+ db := getTestDB(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ total := 100
+ keyList1, keyList2 := fillScanKeysForType(t, "kv", total, func(key []byte, prefix string) {
+ err := db.KVSet(0, key, key)
+ assert.Nil(t, err)
+ })
+
+ runAndCheckScan(t, db, common.KV, total, keyList1, keyList2)
+}
+
+func TestRockDB_ScanHash(t *testing.T) {
+ db := getTestDB(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ total := 100
+ keyList1, keyList2 := fillScanKeysForType(t, "hash", total, func(key []byte, prefix string) {
+ _, err := db.HSet(0, false, key, []byte(prefix+":a"), key)
+ assert.Nil(t, err)
+ _, err = db.HSet(0, false, key, []byte(prefix+":b"), key)
+ assert.Nil(t, err)
+ })
+
+ runAndCheckScan(t, db, common.HASH, total, keyList1, keyList2)
+}
+
+func TestRockDB_ScanList(t *testing.T) {
+ db := getTestDB(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ total := 100
+ keyList1, keyList2 := fillScanKeysForType(t, "list", total, func(key []byte, prefix string) {
+ _, err := db.LPush(0, key, key, key)
+ assert.Nil(t, err)
+ })
+
+ runAndCheckScan(t, db, common.LIST, total, keyList1, keyList2)
+}
+
+func TestRockDB_ScanSet(t *testing.T) {
+ db := getTestDB(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ total := 100
+ keyList1, keyList2 := fillScanKeysForType(t, "set", total, func(key []byte, prefix string) {
+ _, err := db.SAdd(0, key, []byte(prefix+":a"), []byte(prefix+":b"))
+ assert.Nil(t, err)
+ })
+
+ runAndCheckScan(t, db, common.SET, total, keyList1, keyList2)
+}
+
+func TestRockDB_ScanZSet(t *testing.T) {
+ db := getTestDB(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ total := 100
+ keyList1, keyList2 := fillScanKeysForType(t, "zset", total, func(key []byte, prefix string) {
+ _, err := db.ZAdd(0, key, common.ScorePair{1, []byte(prefix + ":a")},
+ common.ScorePair{2, []byte(prefix + ":b")})
+ assert.Nil(t, err)
+ })
+
+ runAndCheckScan(t, db, common.ZSET, total, keyList1, keyList2)
+}
+
+func TestRockDB_HashScan(t *testing.T) {
+ db := getTestDB(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ total := 50
+ dt := "hash"
+ fieldList1 := make([][]byte, 0, total*2)
+ fieldList2 := make([][]byte, 0, total*2)
+ for i := 0; i < total; i++ {
+ k1 := fmt.Sprintf("test:%v:%5d", dt, i)
+ k2 := fmt.Sprintf("test2:%v:%5d", dt, i)
+ fieldList1 = append(fieldList1, []byte(k1))
+ fieldList2 = append(fieldList2, []byte(k2))
+ }
+ for _, f := range fieldList1 {
+ db.HSet(0, false, []byte("test:test"), f, f)
+ }
+ for _, f := range fieldList2 {
+ db.HSet(0, false, []byte("test:test2"), f, f)
+ }
+ fvs, err := db.HScan([]byte("test:test"), []byte(""), total*4, "", false)
+ assert.Nil(t, err)
+ assert.Equal(t, len(fieldList1), len(fvs))
+ for i, fv := range fvs {
+ assert.Equal(t, fv.Key, fieldList1[i])
+ assert.Equal(t, fv.Key, fv.Value)
+ }
+ fvs, err = db.HScan([]byte("test:test"), []byte(""), total*4, "", true)
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(fvs))
+ fvs, err = db.HScan([]byte("test:test"), fieldList1[len(fieldList1)-1], total*4, "", true)
+ assert.Nil(t, err)
+ assert.Equal(t, len(fieldList1)-1, len(fvs))
+ for i, fv := range fvs {
+ assert.Equal(t, fv.Key, fieldList1[len(fieldList1)-2-i])
+ assert.Equal(t, fv.Key, fv.Value)
+ }
+ fvs, err = db.HScan([]byte("test:test2"), []byte(""), total*4, "", false)
+ assert.Nil(t, err)
+ assert.Equal(t, len(fieldList2), len(fvs))
+ for i, fv := range fvs {
+ assert.Equal(t, fv.Key, fieldList2[i])
+ assert.Equal(t, fv.Key, fv.Value)
+ }
+ fvs, err = db.HScan([]byte("test:test2"), []byte(""), total*4, "", true)
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(fvs))
+ fvs, err = db.HScan([]byte("test:test2"), fieldList2[len(fieldList2)-1], total*4, "", true)
+ assert.Nil(t, err)
+ assert.Equal(t, len(fieldList2)-1, len(fvs))
+}
+
+func TestRockDB_SetScan(t *testing.T) {
+ db := getTestDB(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ total := 50
+ dt := "set"
+ fieldList1 := make([][]byte, 0, total*2)
+ fieldList2 := make([][]byte, 0, total*2)
+ for i := 0; i < total; i++ {
+ k1 := fmt.Sprintf("test:%v:%5d", dt, i)
+ k2 := fmt.Sprintf("test2:%v:%5d", dt, i)
+ fieldList1 = append(fieldList1, []byte(k1))
+ fieldList2 = append(fieldList2, []byte(k2))
+ }
+ for _, f := range fieldList1 {
+ _, err := db.SAdd(0, []byte("test:test"), f)
+ assert.Nil(t, err)
+ }
+ for _, f := range fieldList2 {
+ _, err := db.SAdd(0, []byte("test:test2"), f)
+ assert.Nil(t, err)
+ }
+ mems, err := db.SScan([]byte("test:test"), []byte(""), total*4, "", false)
+ assert.Nil(t, err)
+ assert.Equal(t, len(fieldList1), len(mems))
+ assert.Equal(t, fieldList1, mems)
+
+ mems, err = db.SScan([]byte("test:test"), []byte(""), total*4, "", true)
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(mems))
+ mems, err = db.SScan([]byte("test:test"), fieldList1[len(fieldList1)-1], total*4, "", true)
+ assert.Nil(t, err)
+ assert.Equal(t, len(fieldList1)-1, len(mems))
+ for i, v := range mems {
+ assert.Equal(t, v, fieldList1[len(fieldList1)-2-i])
+ }
+ mems, err = db.SScan([]byte("test:test2"), []byte(""), total*4, "", false)
+ assert.Nil(t, err)
+ assert.Equal(t, len(fieldList2), len(mems))
+ for i, v := range mems {
+ assert.Equal(t, v, fieldList2[i])
+ }
+ mems, err = db.SScan([]byte("test:test2"), []byte(""), total*4, "", true)
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(mems))
+ mems, err = db.SScan([]byte("test:test2"), fieldList2[len(fieldList2)-1], total*4, "", true)
+ assert.Nil(t, err)
+ assert.Equal(t, len(fieldList2)-1, len(mems))
+}
+
+func TestRockDB_ZsetScan(t *testing.T) {
+ db := getTestDB(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ total := 50
+ dt := "zset"
+ fieldList1 := make([][]byte, 0, total*2)
+ fieldList2 := make([][]byte, 0, total*2)
+ for i := 0; i < total; i++ {
+ k1 := fmt.Sprintf("test:%v:%5d", dt, i)
+ k2 := fmt.Sprintf("test2:%v:%5d", dt, i)
+ fieldList1 = append(fieldList1, []byte(k1))
+ fieldList2 = append(fieldList2, []byte(k2))
+ }
+ for _, f := range fieldList1 {
+ db.ZAdd(0, []byte("test:test"), common.ScorePair{1, f})
+ }
+ for _, f := range fieldList2 {
+ db.ZAdd(0, []byte("test:test2"),
+ common.ScorePair{2, f})
+ }
+ fvs, err := db.ZScan([]byte("test:test"), []byte(""), total*4, "", false)
+ assert.Nil(t, err)
+ assert.Equal(t, len(fieldList1), len(fvs))
+ for i, fv := range fvs {
+ assert.Equal(t, fv.Member, fieldList1[i])
+ assert.Equal(t, fv.Score, float64(1))
+ }
+ fvs, err = db.ZScan([]byte("test:test"), []byte(""), total*4, "", true)
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(fvs))
+ fvs, err = db.ZScan([]byte("test:test"), fieldList1[len(fieldList1)-1], total*4, "", true)
+ assert.Nil(t, err)
+ assert.Equal(t, len(fieldList1)-1, len(fvs))
+ for i, fv := range fvs {
+ assert.Equal(t, fv.Member, fieldList1[len(fieldList1)-2-i])
+ assert.Equal(t, fv.Score, float64(1))
+ }
+ fvs, err = db.ZScan([]byte("test:test2"), []byte(""), total*4, "", false)
+ assert.Nil(t, err)
+ assert.Equal(t, len(fieldList2), len(fvs))
+ for i, fv := range fvs {
+ assert.Equal(t, fv.Member, fieldList2[i])
+ assert.Equal(t, fv.Score, float64(2))
+ }
+ fvs, err = db.ZScan([]byte("test:test2"), []byte(""), total*4, "", true)
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(fvs))
+ fvs, err = db.ZScan([]byte("test:test2"), fieldList2[len(fieldList2)-1], total*4, "", true)
+ assert.Nil(t, err)
+ assert.Equal(t, len(fieldList2)-1, len(fvs))
+}
diff --git a/rockredis/t_bitmap.go b/rockredis/t_bitmap.go
new file mode 100644
index 00000000..d0392853
--- /dev/null
+++ b/rockredis/t_bitmap.go
@@ -0,0 +1,421 @@
+package rockredis
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ math "math"
+ "time"
+
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/engine"
+)
+
+const (
+ bitmapSegBits = 1024 * 8
+ bitmapSegBytes = 1024
+ MaxBitOffsetV2 = math.MaxUint32 - 1
+)
+
+var (
+ errBitmapKey = errors.New("invalid bitmap key")
+ errBitmapMetaKey = errors.New("invalid bitmap meta key")
+ errBitmapSize = errors.New("invalid bitmap size")
+)
+
+func convertRedisKeyToDBBitmapKey(key []byte, index int64) ([]byte, error) {
+ table, rk, err := extractTableFromRedisKey(key)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := checkKeySize(rk); err != nil {
+ return nil, err
+ }
+ return encodeBitmapKey(table, key[len(table)+1:], index)
+}
+
+func bitEncodeMetaKey(key []byte) []byte {
+ buf := make([]byte, len(key)+1+len(metaPrefix))
+ pos := 0
+ buf[pos] = BitmapMetaType
+
+ pos++
+ copy(buf[pos:], metaPrefix)
+ pos += len(metaPrefix)
+ copy(buf[pos:], key)
+ return buf
+}
+
+func bitDecodeMetaKey(ek []byte) ([]byte, error) {
+ pos := 0
+ if pos+1+len(metaPrefix) > len(ek) || ek[pos] != BitmapMetaType {
+ return nil, errBitmapMetaKey
+ }
+ pos++
+ pos += len(metaPrefix)
+ return ek[pos:], nil
+}
+
+func encodeBitmapKey(table []byte, key []byte, index int64) ([]byte, error) {
+ buf := make([]byte, getDataTablePrefixBufLen(BitmapType, table))
+ pos := encodeDataTablePrefixToBuf(buf, BitmapType, table)
+ var err error
+ buf, err = EncodeMemCmpKey(buf[:pos], key, colStartSep, index)
+ return buf, err
+}
+
+func decodeBitmapKey(ek []byte) ([]byte, []byte, int64, error) {
+ table, pos, err := decodeDataTablePrefixFromBuf(ek, BitmapType)
+ if err != nil {
+ return nil, nil, 0, err
+ }
+
+ rets, err := Decode(ek[pos:], 3)
+ if err != nil {
+ return nil, nil, 0, err
+ }
+ rk, _ := rets[0].([]byte)
+ index, _ := rets[2].(int64)
+ return table, rk, index, nil
+}
+
+func encodeBitmapStartKey(table []byte, key []byte, index int64) ([]byte, error) {
+ return encodeBitmapKey(table, key, index)
+}
+
+func encodeBitmapStopKey(table []byte, key []byte) ([]byte, error) {
+ buf := make([]byte, getDataTablePrefixBufLen(BitmapType, table))
+ pos := encodeDataTablePrefixToBuf(buf, BitmapType, table)
+ var err error
+ buf, err = EncodeMemCmpKey(buf[:pos], key, colStartSep+1, 0)
+ return buf, err
+}
+
+func (db *RockDB) bitSetToNew(ts int64, wb engine.WriteBatch, bmSize int64, key []byte, offset int64, on int) (int64, error) {
+ keyInfo, err := db.prepareCollKeyForWrite(ts, BitmapType, key, nil)
+ if err != nil {
+ return 0, err
+ }
+ oldh := keyInfo.OldHeader
+ table := keyInfo.Table
+ rk := keyInfo.VerKey
+
+ index := (offset / bitmapSegBits) * bitmapSegBytes
+ bmk, err := encodeBitmapKey(table, rk, index)
+ if err != nil {
+ return 0, err
+ }
+ bmv, err := db.GetBytesNoLock(bmk)
+ if err != nil {
+ return 0, err
+ }
+ byteOffset := int((offset / 8) % bitmapSegBytes)
+ if byteOffset >= len(bmv) {
+ expandSize := len(bmv)
+ if byteOffset >= 2*len(bmv) {
+ expandSize = byteOffset - len(bmv) + 1
+ }
+ bmv = append(bmv, make([]byte, expandSize)...)
+ if int64(len(bmv))+index > bmSize {
+ bmSize = int64(len(bmv)) + index
+ }
+ }
+ bit := 7 - uint8(uint32(offset)&0x7)
+ byteVal := bmv[byteOffset]
+ oldBit := byteVal & (1 << bit)
+ byteVal &= ^(1 << bit)
+ byteVal |= (uint8(on&0x1) << bit)
+ bmv[byteOffset] = byteVal
+ wb.Put(bmk, bmv)
+ db.updateBitmapMeta(ts, wb, oldh, key, bmSize)
+ var ret int64
+ if oldBit > 0 {
+ ret = 1
+ }
+ return ret, err
+}
+
+// BitSetV2 set the bitmap data with new format as below:
+// key:0 -> 0(first bit) 0 0 0 0 0 0 0 (last bit) | (second byte with 8 bits) | .... | (last byte with 8bits) at most bitmapSegBytes bytes for each segment
+// key:1024 -> same as key:0
+// key:2048 -> same as key:0
+// ...
+// key:512KB ->
+// ...
+// key:512MB ->
+func (db *RockDB) BitSetV2(ts int64, key []byte, offset int64, on int) (int64, error) {
+ if (on & ^1) != 0 {
+ return 0, fmt.Errorf("bit should be 0 or 1, got %d", on)
+ }
+ if offset > MaxBitOffsetV2 || offset < 0 {
+ return 0, ErrBitOverflow
+ }
+ if err := checkKeySize(key); err != nil {
+ return 0, err
+ }
+
+ wb := db.wb
+ // if new v2 is not exist, merge the old data to the new v2 first
+ // if new v2 already exist, it means the old data has been merged before, we can ignore old
+ // for old data, we can just split them to the 1KB segments
+ _, bmSize, _, ok, err := db.getBitmapMeta(ts, key, false)
+ if err != nil {
+ return 0, err
+ }
+ if !ok {
+ // convert old data to new
+ table, oldkey, err := convertRedisKeyToDBKVKey(key)
+ if err != nil {
+ return 0, err
+ }
+ var v []byte
+ if v, err = db.GetBytesNoLock(oldkey); err != nil {
+ return 0, err
+ }
+ if v == nil {
+ db.IncrTableKeyCount(table, 1, wb)
+ } else if len(v) >= tsLen {
+ v = v[:len(v)-tsLen]
+ table, rk, _ := extractTableFromRedisKey(key)
+ for i := 0; i < len(v); i += bitmapSegBytes {
+ index := int64(i)
+ bmk, err := encodeBitmapKey(table, rk, index)
+ if err != nil {
+ return 0, err
+ }
+ var segv []byte
+ if len(v) <= i+bitmapSegBytes {
+ segv = v[i:]
+ } else {
+ segv = v[i : i+bitmapSegBytes]
+ }
+ bmSize += int64(len(segv))
+ wb.Put(bmk, segv)
+ }
+ if int64(len(v)) != bmSize {
+ panic(fmt.Errorf("bitmap size mismatch: %v, %v ", v, bmSize))
+ }
+ wb.Delete(oldkey)
+ // we need flush write batch before we modify new bit
+ err = db.MaybeCommitBatch()
+ if err != nil {
+ return 0, err
+ }
+ }
+ }
+ oldBit, err := db.bitSetToNew(ts, wb, bmSize, key, offset, on)
+ if err != nil {
+ return 0, err
+ }
+ err = db.MaybeCommitBatch()
+ return oldBit, err
+}
+
+func (db *RockDB) updateBitmapMeta(ts int64, wb engine.WriteBatch, oldh *headerMetaValue, key []byte, bmSize int64) error {
+ metaKey := bitEncodeMetaKey(key)
+ buf := make([]byte, 16)
+ binary.BigEndian.PutUint64(buf[:8], uint64(bmSize))
+ binary.BigEndian.PutUint64(buf[8:], uint64(ts))
+ oldh.UserData = buf
+ nv := oldh.encodeWithData()
+ wb.Put(metaKey, nv)
+ return nil
+}
+
+func (db *RockDB) getBitmapMeta(tn int64, key []byte, lock bool) (*headerMetaValue, int64, int64, bool, error) {
+ oldh, expired, err := db.collHeaderMeta(tn, BitmapType, key, lock)
+ if err != nil {
+ return oldh, 0, 0, false, err
+ }
+ meta := oldh.UserData
+ if len(meta) == 0 {
+ return oldh, 0, 0, false, nil
+ }
+ if len(meta) < 16 {
+ return oldh, 0, 0, false, errors.New("invalid bitmap meta value")
+ }
+ s, err := Int64(meta[:8], nil)
+ if err != nil {
+ return oldh, s, 0, !expired, err
+ }
+ timestamp, err := Int64(meta[8:16], err)
+ return oldh, s, timestamp, !expired, err
+}
+
+func (db *RockDB) BitGetVer(key []byte) (int64, error) {
+ _, _, ts, ok, err := db.getBitmapMeta(time.Now().UnixNano(), key, true)
+ if err != nil {
+ return 0, err
+ }
+ if !ok && ts == 0 {
+ return db.KVGetVer(key)
+ }
+ return ts, err
+}
+
+func (db *RockDB) BitGetV2(key []byte, offset int64) (int64, error) {
+ // read new v2 first, if not exist, try old version
+ tn := time.Now().UnixNano()
+ oldh, _, _, ok, err := db.getBitmapMeta(tn, key, true)
+ if err != nil {
+ return 0, err
+ }
+ if !ok {
+ return db.bitGetOld(key, offset)
+ }
+ table, rk, err := extractTableFromRedisKey(key)
+ if err != nil {
+ return 0, err
+ }
+ rk = db.expiration.encodeToVersionKey(BitmapType, oldh, rk)
+ index := (offset / bitmapSegBits) * bitmapSegBytes
+ bitk, err := encodeBitmapKey(table, rk, index)
+ if err != nil {
+ return 0, err
+ }
+ v, err := db.GetBytes(bitk)
+ if err != nil {
+ return 0, err
+ }
+ if v == nil {
+ return 0, nil
+ }
+ byteOffset := uint32(offset/8) % bitmapSegBytes
+ if byteOffset >= uint32(len(v)) {
+ return 0, nil
+ }
+ byteVal := v[byteOffset]
+ bit := 7 - uint8(uint32(offset)&0x7)
+ if (byteVal & (1 << bit)) > 0 {
+ return 1, nil
+ }
+ return 0, nil
+}
+
+func (db *RockDB) BitCountV2(key []byte, start, end int64) (int64, error) {
+ // read new v2 first, if not exist, try old version
+ tn := time.Now().UnixNano()
+ oldh, bmSize, _, ok, err := db.getBitmapMeta(tn, key, true)
+ if err != nil {
+ return 0, err
+ }
+ if !ok {
+ return db.bitCountOld(key, start, end)
+ }
+
+ start, end = getRange(start, end, bmSize)
+ if start > end {
+ return 0, nil
+ }
+ table, rk, err := extractTableFromRedisKey(key)
+ if err != nil {
+ return 0, err
+ }
+ rk = db.expiration.encodeToVersionKey(BitmapType, oldh, rk)
+
+ total := int64(0)
+ startI := start / bitmapSegBytes
+ stopI := end / bitmapSegBytes
+
+ iterStart, _ := encodeBitmapStartKey(table, rk, int64(startI)*bitmapSegBytes)
+ iterStop, _ := encodeBitmapStopKey(table, rk)
+ it, err := db.NewDBRangeIterator(iterStart, iterStop, common.RangeROpen, false)
+ if err != nil {
+ return 0, err
+ }
+ defer it.Close()
+ for ; it.Valid(); it.Next() {
+ rawk := it.RefKey()
+ _, _, index, err := decodeBitmapKey(rawk)
+ if err != nil {
+ return 0, err
+ }
+ bmv := it.RefValue()
+ if bmv == nil {
+ continue
+ }
+ byteStart := 0
+ byteEnd := len(bmv)
+ if index == int64(startI)*bitmapSegBytes {
+ byteStart = int(start) % bitmapSegBytes
+ }
+ if index == int64(stopI)*bitmapSegBytes {
+ byteEnd = int(end)%bitmapSegBytes + 1
+ if byteEnd > len(bmv) {
+ byteEnd = len(bmv)
+ }
+ }
+ total += popcountBytes(bmv[byteStart:byteEnd])
+ }
+ return total, nil
+}
+
+func (db *RockDB) BitClear(ts int64, key []byte) (int64, error) {
+ oldh, isExpired, err := db.collHeaderMeta(ts, BitmapType, key, false)
+ if err != nil {
+ return 0, err
+ }
+ meta := oldh.UserData
+ bmSize := int64(0)
+ if len(meta) >= 8 {
+ bmSize, _ = Int64(meta[:8], nil)
+ }
+ wb := db.wb
+ table, rk, err := extractTableFromRedisKey(key)
+ if err != nil {
+ return 0, err
+ }
+ // no need delete if expired
+ if isExpired || bmSize == 0 {
+ return 0, nil
+ }
+ metaKey := bitEncodeMetaKey(key)
+ wb.Delete(metaKey)
+ if bmSize > 0 {
+ db.IncrTableKeyCount(table, -1, wb)
+ }
+ if db.cfg.ExpirationPolicy == common.WaitCompact {
+ // for compact ttl , we can just delete the meta
+ } else {
+ rk = db.expiration.encodeToVersionKey(BitmapType, oldh, rk)
+ iterStart, _ := encodeBitmapStartKey(table, rk, 0)
+ iterStop, _ := encodeBitmapStopKey(table, rk)
+ if bmSize/bitmapSegBytes > RangeDeleteNum {
+ wb.DeleteRange(iterStart, iterStop)
+ } else {
+ it, err := db.NewDBRangeIterator(iterStart, iterStop, common.RangeROpen, false)
+ if err != nil {
+ return 0, err
+ }
+ for ; it.Valid(); it.Next() {
+ rawk := it.RefKey()
+ wb.Delete(rawk)
+ }
+ it.Close()
+ }
+
+ db.delExpire(BitmapType, key, nil, false, wb)
+ }
+ err = db.MaybeCommitBatch()
+ return 1, err
+}
+
+func (db *RockDB) BitKeyExist(key []byte) (int64, error) {
+ n, err := db.collKeyExists(BitmapType, key)
+ if err != nil {
+ return 0, err
+ }
+ if n == 0 {
+ return db.KVExists(key)
+ }
+ return 1, nil
+}
+
+func (db *RockDB) BitExpire(ts int64, key []byte, ttlSec int64) (int64, error) {
+ return db.collExpire(ts, BitmapType, key, ttlSec)
+}
+
+func (db *RockDB) BitPersist(ts int64, key []byte) (int64, error) {
+ return db.collPersist(ts, BitmapType, key)
+}
diff --git a/rockredis/t_bitmap_test.go b/rockredis/t_bitmap_test.go
new file mode 100644
index 00000000..a580f81e
--- /dev/null
+++ b/rockredis/t_bitmap_test.go
@@ -0,0 +1,587 @@
+package rockredis
+
+import (
+ "os"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestBitmapV2(t *testing.T) {
+ db := getTestDB(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ bitsForOne := make(map[int]bool)
+ key := []byte("test:testdb_kv_bitv2")
+ tn := time.Now().UnixNano()
+ n, err := db.BitSetV2(tn, key, 5, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ bitsForOne[5] = true
+
+ n, err = db.BitGetV2(key, 0)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.BitGetV2(key, 5)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.BitGetVer(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(tn), n)
+
+ n, err = db.BitGetV2(key, 100)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ n, err = db.BitCountV2(key, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.BitKeyExist(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ tn = time.Now().UnixNano()
+ n, err = db.BitSetV2(tn, key, 5, 0)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ delete(bitsForOne, 5)
+
+ _, err = db.BitSetV2(tn, key, -5, 0)
+ assert.NotNil(t, err)
+
+ n, err = db.BitGetVer(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(tn), n)
+
+ for i := 0; i < bitmapSegBits*3; i++ {
+ n, err = db.BitGetV2(key, int64(i))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.BitCountV2(key, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ }
+
+ // insert one bit at start and end of each segment
+ bitsForOne[0] = true
+ bitsForOne[bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBytes] = true
+ bitsForOne[bitmapSegBytes+1] = true
+ bitsForOne[bitmapSegBytes*2-1] = true
+ bitsForOne[bitmapSegBytes*2] = true
+ bitsForOne[bitmapSegBits-bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBits-bitmapSegBytes] = true
+ bitsForOne[bitmapSegBits-bitmapSegBytes+1] = true
+
+ bitsForOne[bitmapSegBits-1] = true
+ bitsForOne[bitmapSegBits] = true
+ bitsForOne[bitmapSegBits+1] = true
+ bitsForOne[bitmapSegBits+bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBits+bitmapSegBytes] = true
+ bitsForOne[bitmapSegBits+bitmapSegBytes+1] = true
+ bitsForOne[bitmapSegBits*2-bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBits*2-bitmapSegBytes] = true
+ bitsForOne[bitmapSegBits*2-1] = true
+ bitsForOne[bitmapSegBits*2] = true
+ bitsForOne[bitmapSegBits*2+1] = true
+ bitsForOne[bitmapSegBits*2+bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBits*2+bitmapSegBytes] = true
+ bitsForOne[bitmapSegBits*2+bitmapSegBytes+1] = true
+
+ for bpos, _ := range bitsForOne {
+ n, err = db.BitSetV2(0, key, int64(bpos), 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.BitGetV2(key, int64(bpos))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ }
+ n, err = db.BitCountV2(key, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(bitsForOne)), n)
+
+ for i := 0; i < bitmapSegBits*3; i++ {
+ n, err = db.BitGetV2(key, int64(i))
+ assert.Nil(t, err)
+ if _, ok := bitsForOne[i]; ok {
+ assert.Equal(t, int64(1), n)
+ } else {
+ assert.Equal(t, int64(0), n)
+ }
+ }
+
+ _, err = db.BitSetV2(0, key, MaxBitOffsetV2+1, 0)
+ assert.NotNil(t, err)
+ n, err = db.BitSetV2(0, key, MaxBitOffsetV2, 1)
+ assert.Nil(t, err)
+ n, err = db.BitGetV2(key, MaxBitOffsetV2)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ n, err = db.BitCountV2(key, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(bitsForOne)+1), n)
+ n, err = db.BitKeyExist(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+}
+
+func TestBitmapV2Clear(t *testing.T) {
+ db := getTestDB(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ bitsForOne := make(map[int]bool)
+ key := []byte("test:testdb_kv_bitv2_clear")
+ n, err := db.BitSetV2(0, key, 5, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ bitsForOne[5] = true
+
+ n, err = db.BitKeyExist(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ // insert one bit at start and end of each segment
+ bitsForOne[0] = true
+ bitsForOne[bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBytes] = true
+ bitsForOne[bitmapSegBytes+1] = true
+ bitsForOne[bitmapSegBytes*2-1] = true
+ bitsForOne[bitmapSegBytes*2] = true
+ bitsForOne[bitmapSegBits-bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBits-bitmapSegBytes] = true
+ bitsForOne[bitmapSegBits-bitmapSegBytes+1] = true
+
+ bitsForOne[bitmapSegBits-1] = true
+ bitsForOne[bitmapSegBits] = true
+ bitsForOne[bitmapSegBits+1] = true
+ bitsForOne[bitmapSegBits+bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBits+bitmapSegBytes] = true
+ bitsForOne[bitmapSegBits+bitmapSegBytes+1] = true
+ bitsForOne[bitmapSegBits*2-bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBits*2-bitmapSegBytes] = true
+ bitsForOne[bitmapSegBits*2-1] = true
+ bitsForOne[bitmapSegBits*2] = true
+ bitsForOne[bitmapSegBits*2+1] = true
+ bitsForOne[bitmapSegBits*2+bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBits*2+bitmapSegBytes] = true
+ bitsForOne[bitmapSegBits*2+bitmapSegBytes+1] = true
+
+ for bpos, _ := range bitsForOne {
+ n, err = db.BitSetV2(0, key, int64(bpos), 1)
+ assert.Nil(t, err)
+ if bpos == 5 {
+ assert.Equal(t, int64(1), n)
+ } else {
+ assert.Equal(t, int64(0), n)
+ }
+ n, err = db.BitGetV2(key, int64(bpos))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ }
+ n, err = db.BitCountV2(key, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(bitsForOne)), n)
+
+ db.BitClear(0, key)
+ n, err = db.BitCountV2(key, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ n, err = db.BitKeyExist(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ n, err = db.BitSetV2(0, key, 5, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ for i := 6; i < bitmapSegBits*3; i++ {
+ n, err = db.BitGetV2(key, int64(i))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ }
+
+ n, err = db.BitGetV2(key, 5)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ n, err = db.BitCountV2(key, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.BitKeyExist(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+}
+
+func TestDBBitClearInCompactTTL(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ key := []byte("test:testdb_bit_clear_compact_a")
+
+ ts := time.Now().UnixNano()
+ db.BitSetV2(ts, key, 1, 1)
+
+ n, err := db.BitCountV2(key, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ ts = time.Now().UnixNano()
+ n, err = db.BitClear(ts, key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.BitCountV2(key, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ n, err = db.BitGetV2(key, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ n, err = db.BitKeyExist(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ // renew
+ ts = time.Now().UnixNano()
+ db.BitSetV2(ts, key, 2, 1)
+
+ n, err = db.BitCountV2(key, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.BitGetV2(key, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.BitGetV2(key, 2)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.BitKeyExist(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+}
+
+func TestBitmapV2FromOld(t *testing.T) {
+ db := getTestDB(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ key := []byte("test:testdb_kv_bit_convert")
+ tn := time.Now().UnixNano()
+ n, err := db.BitSetOld(tn, key, 5, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.BitGetV2(key, int64(5))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ n, err = db.bitCountOld(key, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.BitGetVer(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(tn), n)
+
+ n, err = db.BitKeyExist(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.BitSetV2(tn, key, 6, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ n, err = db.BitCountV2(key, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
+ n, err = db.BitGetV2(key, int64(6))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ n, err = db.BitGetV2(key, int64(5))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ n, err = db.BitCountV2(key, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
+ // old data should be deleted
+ n, err = db.bitGetOld(key, int64(5))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ n, err = db.BitGetVer(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(tn), n)
+
+ n, err = db.BitKeyExist(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ key = []byte("test:testdb_kv_bit_convert2")
+
+ n, err = db.BitKeyExist(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ bitsForOne := make(map[int]bool)
+ // insert one bit at start and end of each segment
+ bitsForOne[0] = true
+ bitsForOne[bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBytes] = true
+ bitsForOne[bitmapSegBytes+1] = true
+ bitsForOne[bitmapSegBytes*2-1] = true
+ bitsForOne[bitmapSegBytes*2] = true
+ bitsForOne[bitmapSegBits-bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBits-bitmapSegBytes] = true
+ bitsForOne[bitmapSegBits-bitmapSegBytes+1] = true
+
+ bitsForOne[bitmapSegBits-1] = true
+ bitsForOne[bitmapSegBits] = true
+ bitsForOne[bitmapSegBits+1] = true
+ bitsForOne[bitmapSegBits+bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBits+bitmapSegBytes] = true
+ bitsForOne[bitmapSegBits+bitmapSegBytes+1] = true
+ bitsForOne[bitmapSegBits*2-bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBits*2-bitmapSegBytes] = true
+ bitsForOne[bitmapSegBits*2-1] = true
+ bitsForOne[bitmapSegBits*2] = true
+ bitsForOne[bitmapSegBits*2+1] = true
+ bitsForOne[bitmapSegBits*2+bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBits*2+bitmapSegBytes] = true
+ bitsForOne[bitmapSegBits*2+bitmapSegBytes+1] = true
+
+ for bpos := range bitsForOne {
+ n, err = db.BitSetOld(tn, key, int64(bpos), 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.bitGetOld(key, int64(bpos))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ // new v2 should read old
+ n, err = db.BitGetV2(key, int64(bpos))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ }
+
+ for i := 10; i < bitmapSegBits*3; i++ {
+ n, err = db.bitGetOld(key, int64(i))
+ assert.Nil(t, err)
+ if _, ok := bitsForOne[i]; ok {
+ assert.Equal(t, int64(1), n)
+ } else {
+ assert.Equal(t, int64(0), n)
+ }
+ }
+ n, err = db.bitCountOld(key, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(bitsForOne)), n)
+ // new v2 should read old
+ n, err = db.BitCountV2(key, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(bitsForOne)), n)
+
+ n, err = db.BitKeyExist(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.BitSetV2(tn, key, bitmapSegBits, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ n, err = db.BitCountV2(key, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(bitsForOne)), n)
+
+ n, err = db.BitKeyExist(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ // test bitmap convert across two segments
+ n, err = db.BitCountV2(key, bitmapSegBytes-1, bitmapSegBytes*2+1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(11), n)
+
+ n, err = db.BitGetVer(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(tn), n)
+ for i := 10; i < bitmapSegBits*3; i++ {
+ n, err = db.BitGetV2(key, int64(i))
+ assert.Nil(t, err)
+ if _, ok := bitsForOne[i]; ok {
+ assert.Equal(t, int64(1), n)
+ } else {
+ assert.Equal(t, int64(0), n)
+ }
+ }
+ key = []byte("test:testdb_kv_bit_convert3")
+ err = db.KVSet(tn, key, []byte("foobar"))
+ assert.Nil(t, err)
+ n, err = db.BitKeyExist(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.BitCountV2(key, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(26), n)
+
+ n, err = db.BitCountV2(key, 0, 0)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(4), n)
+
+ n, err = db.BitCountV2(key, 1, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(6), n)
+
+ n, err = db.BitGetVer(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(tn), n)
+
+ n, err = db.BitGetV2(key, 0)
+ assert.Nil(t, err)
+
+ // convert to new
+ n, err = db.BitSetV2(tn, key, 0, int(n))
+ assert.Nil(t, err)
+
+ n, err = db.BitCountV2(key, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(26), n)
+
+ n, err = db.BitCountV2(key, 0, 0)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(4), n)
+
+ n, err = db.BitCountV2(key, 1, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(6), n)
+
+ n, err = db.BitKeyExist(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+}
+
+func BenchmarkBitCountV2(b *testing.B) {
+ db := getTestDBForBench()
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ key := []byte("test:testdb_kv_bit_benchcount")
+ db.BitSetV2(0, key, MaxBitOffsetV2, 1)
+ for i := 0; i < b.N; i++ {
+ n, _ := db.BitCountV2(key, 0, -1)
+ if n != int64(1) {
+ panic("count error")
+ }
+ }
+}
+
+func BenchmarkBitSetV2(b *testing.B) {
+ db := getTestDBForBench()
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ b.StopTimer()
+ key := []byte("test:testdb_kv_bit_benchset")
+ bitsForOne := make(map[int]bool)
+ // insert one bit at start and end of each segment
+ bitsForOne[0] = true
+ bitsForOne[bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBytes] = true
+ bitsForOne[bitmapSegBytes+1] = true
+ bitsForOne[bitmapSegBytes*2-1] = true
+ bitsForOne[bitmapSegBytes*2] = true
+ bitsForOne[bitmapSegBits-bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBits-bitmapSegBytes] = true
+ bitsForOne[bitmapSegBits-bitmapSegBytes+1] = true
+
+ bitsForOne[bitmapSegBits-1] = true
+ bitsForOne[bitmapSegBits] = true
+ bitsForOne[bitmapSegBits+1] = true
+ bitsForOne[bitmapSegBits+bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBits+bitmapSegBytes] = true
+ bitsForOne[bitmapSegBits+bitmapSegBytes+1] = true
+ bitsForOne[bitmapSegBits*2-bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBits*2-bitmapSegBytes] = true
+ bitsForOne[bitmapSegBits*2-1] = true
+ bitsForOne[bitmapSegBits*2] = true
+ bitsForOne[bitmapSegBits*2+1] = true
+ bitsForOne[bitmapSegBits*2+bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBits*2+bitmapSegBytes] = true
+ bitsForOne[bitmapSegBits*2+bitmapSegBytes+1] = true
+
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ // test set 0 to 1
+ for bpos := range bitsForOne {
+ db.BitSetV2(0, key, int64(bpos), 1)
+ }
+ // test set 1 to 1
+ for bpos := range bitsForOne {
+ db.BitSetV2(0, key, int64(bpos), 1)
+ }
+ // test set 1 to 0
+ for bpos := range bitsForOne {
+ db.BitSetV2(0, key, int64(bpos), 0)
+ }
+ // test set 0 to 0
+ for bpos := range bitsForOne {
+ db.BitSetV2(0, key, int64(bpos), 0)
+ }
+ }
+ b.StopTimer()
+}
+
+func BenchmarkBitGetV2(b *testing.B) {
+ db := getTestDBForBench()
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ key := []byte("test:testdb_kv_bit_benchget")
+ bitsForOne := make(map[int]bool)
+ // insert one bit at start and end of each segment
+ bitsForOne[0] = true
+ bitsForOne[bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBytes] = true
+ bitsForOne[bitmapSegBytes+1] = true
+ bitsForOne[bitmapSegBytes*2-1] = true
+ bitsForOne[bitmapSegBytes*2] = true
+ bitsForOne[bitmapSegBits-bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBits-bitmapSegBytes] = true
+ bitsForOne[bitmapSegBits-bitmapSegBytes+1] = true
+
+ bitsForOne[bitmapSegBits-1] = true
+ bitsForOne[bitmapSegBits] = true
+ bitsForOne[bitmapSegBits+1] = true
+ bitsForOne[bitmapSegBits+bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBits+bitmapSegBytes] = true
+ bitsForOne[bitmapSegBits+bitmapSegBytes+1] = true
+ bitsForOne[bitmapSegBits*2-bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBits*2-bitmapSegBytes] = true
+ bitsForOne[bitmapSegBits*2-1] = true
+ bitsForOne[bitmapSegBits*2] = true
+ bitsForOne[bitmapSegBits*2+1] = true
+ bitsForOne[bitmapSegBits*2+bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBits*2+bitmapSegBytes] = true
+ bitsForOne[bitmapSegBits*2+bitmapSegBytes+1] = true
+ for bpos := range bitsForOne {
+ db.BitSetV2(0, key, int64(bpos), 1)
+ }
+
+ for i := 0; i < b.N; i++ {
+ // test get 1
+ for bpos := range bitsForOne {
+ db.BitGetV2(key, int64(bpos))
+ }
+ // test get non 1
+ for i := 0; i < 10; i++ {
+ db.BitGetV2(key, int64(i))
+ }
+ for i := MaxBitOffsetV2 - bitmapSegBits; i < MaxBitOffsetV2-bitmapSegBits+10; i++ {
+ db.BitGetV2(key, int64(i))
+ }
+ }
+}
diff --git a/rockredis/t_collections.go b/rockredis/t_collections.go
new file mode 100644
index 00000000..8cda3673
--- /dev/null
+++ b/rockredis/t_collections.go
@@ -0,0 +1,276 @@
+package rockredis
+
+import (
+ "encoding/binary"
+ "errors"
+ "time"
+
+ "github.com/youzan/ZanRedisDB/common"
+)
+
+const (
+ collStartSep byte = ':'
+ collStopSep byte = collStartSep + 1
+ collectionLengthForMetric int64 = 128
+)
+
+var (
+ errCollKey = errors.New("invalid collection key")
+ errCollTypeMismatch = errors.New("decoded collection type mismatch")
+)
+
+// note for list/bitmap/zscore subkey is different
+func encodeCollSubKey(dt byte, table []byte, key []byte, subkey []byte) []byte {
+ if dt != HashType && dt != SetType && dt != ZSetType {
+ panic(errDataType)
+ }
+ buf := make([]byte, getDataTablePrefixBufLen(dt, table)+len(key)+len(subkey)+1+2)
+
+ pos := encodeDataTablePrefixToBuf(buf, dt, table)
+
+ binary.BigEndian.PutUint16(buf[pos:], uint16(len(key)))
+ pos += 2
+
+ copy(buf[pos:], key)
+ pos += len(key)
+
+ buf[pos] = collStartSep
+ pos++
+ copy(buf[pos:], subkey)
+
+ return buf
+}
+
+// note for list/bitmap/zscore subkey is different
+func decodeCollSubKey(dbk []byte) (byte, []byte, []byte, []byte, error) {
+ if len(dbk) < 0 {
+ return 0, nil, nil, nil, errDataType
+ }
+ dt := dbk[0]
+ if dt != HashType && dt != SetType && dt != ZSetType {
+ return dt, nil, nil, nil, errDataType
+ }
+ table, pos, err := decodeDataTablePrefixFromBuf(dbk, dt)
+ if err != nil {
+ return dt, nil, nil, nil, err
+ }
+
+ if pos+2 > len(dbk) {
+ return dt, nil, nil, nil, errCollKey
+ }
+
+ keyLen := int(binary.BigEndian.Uint16(dbk[pos:]))
+ pos += 2
+
+ if keyLen+pos > len(dbk) {
+ return dt, table, nil, nil, errCollKey
+ }
+
+ key := dbk[pos : pos+keyLen]
+ pos += keyLen
+
+ if dbk[pos] != collStartSep {
+ return dt, table, nil, nil, errCollKey
+ }
+ pos++
+ subkey := dbk[pos:]
+ return dt, table, key, subkey, nil
+}
+
+// decode the versioned collection key (in wait compact policy) and convert to raw redis key
+func convertCollDBKeyToRawKey(dbk []byte) (byte, []byte, int64, error) {
+ if len(dbk) < 0 {
+ return 0, nil, 0, errDataType
+ }
+ dt := dbk[0]
+ var table []byte
+ var verk []byte
+ var err error
+ switch dt {
+ case HashType, SetType, ZSetType:
+ _, table, verk, _, err = decodeCollSubKey(dbk)
+ case ListType:
+ table, verk, _, err = lDecodeListKey(dbk)
+ case ZScoreType:
+ table, verk, _, _, err = zDecodeScoreKey(dbk)
+ case BitmapType:
+ table, verk, _, err = decodeBitmapKey(dbk)
+ default:
+ return 0, nil, 0, errDataType
+ }
+ if err != nil {
+ return dt, nil, 0, err
+ }
+ rk, ver, err := decodeVerKey(verk)
+ if err != nil {
+ return dt, nil, 0, err
+ }
+ rawKey := packRedisKey(table, rk)
+ return dt, rawKey, ver, nil
+}
+
+type collVerKeyInfo struct {
+ OldHeader *headerMetaValue
+ Expired bool
+ Table []byte
+ VerKey []byte
+ RangeStart []byte
+ RangeEnd []byte
+}
+
+// decoded meta for compatible with old format
+func (info collVerKeyInfo) MetaData() []byte {
+ return info.OldHeader.UserData
+}
+
+func (info collVerKeyInfo) IsNotExistOrExpired() bool {
+ return info.Expired || info.MetaData() == nil
+}
+
+func checkCollKFSize(key []byte, field []byte) error {
+ return common.CheckKeySubKey(key, field)
+}
+
+func encodeMetaKey(dt byte, key []byte) ([]byte, error) {
+ switch dt {
+ case KVType:
+ key = encodeKVKey(key)
+ case HashType, HSizeType:
+ key = hEncodeSizeKey(key)
+ case SetType, SSizeType:
+ key = sEncodeSizeKey(key)
+ case BitmapType, BitmapMetaType:
+ key = bitEncodeMetaKey(key)
+ case ListType, LMetaType:
+ key = lEncodeMetaKey(key)
+ case ZSetType, ZSizeType, ZScoreType:
+ key = zEncodeSizeKey(key)
+ default:
+ return nil, errDataType
+ }
+ return key, nil
+}
+
+func (db *RockDB) getCollVerKeyForRange(ts int64, dt byte, key []byte, useLock bool) (collVerKeyInfo, error) {
+ info, err := db.GetCollVersionKey(ts, dt, key, useLock)
+ if err != nil {
+ return info, err
+ }
+ switch dt {
+ case SetType:
+ info.RangeStart = sEncodeStartKey(info.Table, info.VerKey)
+ info.RangeEnd = sEncodeStopKey(info.Table, info.VerKey)
+ case HashType:
+ info.RangeStart = hEncodeStartKey(info.Table, info.VerKey)
+ info.RangeEnd = hEncodeStopKey(info.Table, info.VerKey)
+ case ZSetType:
+ info.RangeStart = zEncodeStartKey(info.Table, info.VerKey)
+ info.RangeEnd = zEncodeStopKey(info.Table, info.VerKey)
+ default:
+ }
+ return info, nil
+}
+
+// if run in raft write loop should avoid lock.
+func (db *RockDB) GetCollVersionKey(ts int64, dt byte, key []byte, useLock bool) (collVerKeyInfo, error) {
+ var keyInfo collVerKeyInfo
+ table, rk, err := extractTableFromRedisKey(key)
+ if err != nil {
+ return keyInfo, err
+ }
+ if len(table) == 0 {
+ return keyInfo, errTableName
+ }
+ keyInfo.Table = table
+ if err := checkKeySize(rk); err != nil {
+ return keyInfo, err
+ }
+ keyInfo.OldHeader, keyInfo.Expired, err = db.collHeaderMeta(ts, dt, key, useLock)
+ if err != nil {
+ return keyInfo, err
+ }
+ keyInfo.VerKey = db.expiration.encodeToVersionKey(dt, keyInfo.OldHeader, rk)
+ return keyInfo, nil
+}
+
+// note this may use write batch in db
+func (db *RockDB) prepareCollKeyForWrite(ts int64, dt byte, key []byte, field []byte) (collVerKeyInfo, error) {
+ var keyInfo collVerKeyInfo
+ table, rk, err := extractTableFromRedisKey(key)
+ if err != nil {
+ return keyInfo, err
+ }
+ keyInfo.Table = table
+ if err := checkCollKFSize(rk, field); err != nil {
+ return keyInfo, err
+ }
+
+ keyInfo.OldHeader, keyInfo.Expired, err = db.collHeaderMeta(ts, dt, key, false)
+ if err != nil {
+ return keyInfo, err
+ }
+
+ if keyInfo.IsNotExistOrExpired() {
+ // since renew on expired may change the header meta in old header in some expire policy,
+ // then the renewed meta data should also be return for different expire policy
+ db.expiration.renewOnExpired(ts, dt, key, keyInfo.OldHeader)
+ }
+ keyInfo.VerKey = db.expiration.encodeToVersionKey(dt, keyInfo.OldHeader, rk)
+ return keyInfo, nil
+}
+
+// TODO: maybe we do not need read the meta if only expired is needed and we use the local_deletion policy,
+// since the ttl is not available in local_deletion policy (can reduce 1 get to db)
+func (db *RockDB) collHeaderMeta(ts int64, dt byte, key []byte, useLock bool) (*headerMetaValue, bool, error) {
+ var sizeKey []byte
+ sizeKey, err := encodeMetaKey(dt, key)
+ if err != nil {
+ return nil, false, err
+ }
+ var v []byte
+ if useLock {
+ v, err = db.GetBytes(sizeKey)
+ } else {
+ v, err = db.GetBytesNoLock(sizeKey)
+ }
+ if err != nil {
+ return nil, false, err
+ }
+ h, err := db.expiration.decodeRawValue(dt, v)
+ if err != nil {
+ return h, false, err
+ }
+ isExpired, err := db.expiration.isExpired(ts, dt, key, v, useLock)
+ return h, isExpired, err
+}
+
+func (db *RockDB) collKeyExists(dt byte, key []byte) (int64, error) {
+ h, expired, err := db.collHeaderMeta(time.Now().UnixNano(), dt, key, true)
+ if err != nil {
+ return 0, err
+ }
+ if expired || h.UserData == nil {
+ return 0, nil
+ }
+ return 1, nil
+}
+
+func (db *RockDB) collExpire(ts int64, dt byte, key []byte, duration int64) (int64, error) {
+ oldh, expired, err := db.collHeaderMeta(ts, dt, key, false)
+ if err != nil || expired || oldh.UserData == nil {
+ return 0, err
+ }
+
+ rawV := db.expiration.encodeToRawValue(dt, oldh)
+ return db.ExpireAt(dt, key, rawV, duration+ts/int64(time.Second))
+}
+
+func (db *RockDB) collPersist(ts int64, dt byte, key []byte) (int64, error) {
+ oldh, expired, err := db.collHeaderMeta(ts, dt, key, false)
+ if err != nil || expired || oldh.UserData == nil {
+ return 0, err
+ }
+
+ rawV := db.expiration.encodeToRawValue(dt, oldh)
+ return db.ExpireAt(dt, key, rawV, 0)
+}
diff --git a/rockredis/t_hash.go b/rockredis/t_hash.go
index d1a3094a..56a4684e 100644
--- a/rockredis/t_hash.go
+++ b/rockredis/t_hash.go
@@ -2,46 +2,21 @@ package rockredis
import (
"bytes"
- "encoding/binary"
"errors"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/gorocksdb"
+ ps "github.com/prometheus/client_golang/prometheus"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/engine"
+ "github.com/youzan/ZanRedisDB/metric"
+ "github.com/youzan/ZanRedisDB/slow"
)
var (
- errHashKey = errors.New("invalid hash key")
- errHSizeKey = errors.New("invalid hash size key")
- errHashFieldSize = errors.New("invalid hash field size")
+ errHashKey = errors.New("invalid hash key")
+ errHSizeKey = errors.New("invalid hash size key")
)
-const (
- hashStartSep byte = ':'
-)
-
-func convertRedisKeyToDBHKey(key []byte, field []byte) ([]byte, error) {
- table, rk, err := extractTableFromRedisKey(key)
- if err != nil {
- return nil, err
- }
-
- if err := checkHashKFSize(rk, field); err != nil {
- return nil, err
- }
- key = hEncodeHashKey(table, key[len(table)+1:], field)
- return key, nil
-}
-
-func checkHashKFSize(key []byte, field []byte) error {
- if len(key) > MaxKeySize || len(key) == 0 {
- return errKeySize
- } else if len(field) > MaxHashFieldSize {
- return errHashFieldSize
- }
- return nil
-}
-
func hEncodeSizeKey(key []byte) []byte {
buf := make([]byte, len(key)+1+len(metaPrefix))
@@ -68,48 +43,17 @@ func hDecodeSizeKey(ek []byte) ([]byte, error) {
}
func hEncodeHashKey(table []byte, key []byte, field []byte) []byte {
- buf := make([]byte, getDataTablePrefixBufLen(HashType, table)+len(key)+len(field)+1+2)
-
- pos := encodeDataTablePrefixToBuf(buf, HashType, table)
-
- binary.BigEndian.PutUint16(buf[pos:], uint16(len(key)))
- pos += 2
-
- copy(buf[pos:], key)
- pos += len(key)
-
- buf[pos] = hashStartSep
- pos++
- copy(buf[pos:], field)
- return buf
+ return encodeCollSubKey(HashType, table, key, field)
}
func hDecodeHashKey(ek []byte) ([]byte, []byte, []byte, error) {
- table, pos, err := decodeDataTablePrefixFromBuf(ek, HashType)
+ dt, table, key, field, err := decodeCollSubKey(ek)
if err != nil {
return nil, nil, nil, err
}
-
- if pos+2 > len(ek) {
- return nil, nil, nil, errHashKey
+ if dt != HashType {
+ return table, key, field, errCollTypeMismatch
}
-
- keyLen := int(binary.BigEndian.Uint16(ek[pos:]))
- pos += 2
-
- if keyLen+pos > len(ek) {
- return nil, nil, nil, errHashKey
- }
-
- key := ek[pos : pos+keyLen]
- pos += keyLen
-
- if ek[pos] != hashStartSep {
- return nil, nil, nil, errHashKey
- }
-
- pos++
- field := ek[pos:]
return table, key, field, nil
}
@@ -125,33 +69,40 @@ func hEncodeStopKey(table []byte, key []byte) []byte {
// return if we create the new field or override it
func (db *RockDB) hSetField(ts int64, checkNX bool, hkey []byte, field []byte, value []byte,
- wb *gorocksdb.WriteBatch, hindex *HsetIndex) (int64, error) {
- table, rk, err := extractTableFromRedisKey(hkey)
-
+ wb engine.WriteBatch, hindex *HsetIndex) (int64, error) {
+ created := int64(1)
+ keyInfo, err := db.prepareHashKeyForWrite(ts, hkey, field)
if err != nil {
return 0, err
}
- if err := checkHashKFSize(rk, field); err != nil {
- return 0, err
- }
+ table := keyInfo.Table
+ rk := keyInfo.VerKey
ek := hEncodeHashKey(table, rk, field)
- created := int64(1)
tsBuf := PutInt64(ts)
value = append(value, tsBuf...)
var oldV []byte
- if oldV, _ = db.eng.GetBytesNoLock(db.defaultReadOpts, ek); oldV != nil {
+ if oldV, _ = db.GetBytesNoLock(ek); oldV != nil {
created = 0
if checkNX || bytes.Equal(oldV, value) {
return created, nil
}
} else {
- if n, err := db.hIncrSize(hkey, 1, wb); err != nil {
+ newNum, err := db.hIncrSize(hkey, keyInfo.OldHeader, 1, wb)
+ if err != nil {
return 0, err
- } else if n == 1 {
+ } else if newNum == 1 && !keyInfo.Expired {
db.IncrTableKeyCount(table, 1, wb)
}
+ db.topLargeCollKeys.Update(hkey, int(newNum))
+ slow.LogLargeCollection(int(newNum), slow.NewSlowLogInfo(string(table), string(hkey), "hash"))
+ if newNum > collectionLengthForMetric {
+ metric.CollectionLenDist.With(ps.Labels{
+ "table": string(table),
+ }).Observe(float64(newNum))
+ }
}
+
wb.Put(ek, value)
if hindex != nil {
@@ -171,33 +122,38 @@ func (db *RockDB) HLen(hkey []byte) (int64, error) {
if err := checkKeySize(hkey); err != nil {
return 0, err
}
- sizeKey := hEncodeSizeKey(hkey)
- v, err := db.eng.GetBytes(db.defaultReadOpts, sizeKey)
- return Int64(v, err)
+ tn := time.Now().UnixNano()
+ oldh, expired, err := db.hHeaderMeta(tn, hkey, true)
+ if err != nil {
+ return 0, err
+ }
+ if expired {
+ return 0, nil
+ }
+ return Int64(oldh.UserData, err)
}
-func (db *RockDB) hIncrSize(hkey []byte, delta int64, wb *gorocksdb.WriteBatch) (int64, error) {
+func (db *RockDB) hIncrSize(hkey []byte, oldh *headerMetaValue, delta int64, wb engine.WriteBatch) (int64, error) {
sk := hEncodeSizeKey(hkey)
-
- var err error
- var size int64
- if size, err = Int64(db.eng.GetBytesNoLock(db.defaultReadOpts, sk)); err != nil {
+ metaV := oldh.UserData
+ size, err := Int64(metaV, nil)
+ if err != nil {
return 0, err
+ }
+ size += delta
+ if size <= 0 {
+ size = 0
+ wb.Delete(sk)
} else {
- size += delta
- if size <= 0 {
- size = 0
- wb.Delete(sk)
- } else {
- wb.Put(sk, PutInt64(size))
- }
+ oldh.UserData = PutInt64(size)
+ nv := oldh.encodeWithData()
+ wb.Put(sk, nv)
}
return size, nil
}
-func (db *RockDB) HSet(ts int64, checkNX bool, key []byte, field []byte, value []byte) (int64, error) {
- s := time.Now()
- if err := checkValueSize(value); err != nil {
+func (db *RockDB) HSet(ts int64, checkNX bool, key []byte, field []byte, ovalue []byte) (int64, error) {
+ if err := checkValueSize(ovalue); err != nil {
return 0, err
}
table, _, err := extractTableFromRedisKey(key)
@@ -213,55 +169,58 @@ func (db *RockDB) HSet(ts int64, checkNX bool, key []byte, field []byte, value [
defer tableIndexes.Unlock()
hindex = tableIndexes.GetHIndexNoLock(string(field))
}
- db.wb.Clear()
+ var value []byte
+ if len(ovalue) > len(db.writeTmpBuf) {
+ value = make([]byte, len(ovalue))
+ } else {
+ value = db.writeTmpBuf[:len(ovalue)]
+ }
+ copy(value, ovalue)
created, err := db.hSetField(ts, checkNX, key, field, value, db.wb, hindex)
if err != nil {
return 0, err
}
- c1 := time.Since(s)
- err = db.eng.Write(db.defaultWriteOpts, db.wb)
- c2 := time.Since(s)
- if c2 > time.Second/3 {
- dbLog.Infof("key %v slow write cost: %v, %v", string(key), c1, c2)
- }
+ err = db.MaybeCommitBatch()
return created, err
}
func (db *RockDB) HMset(ts int64, key []byte, args ...common.KVRecord) error {
- s := time.Now()
- if len(args) >= MAX_BATCH_NUM {
+ if len(args) > MAX_BATCH_NUM {
return errTooMuchBatchSize
}
if len(args) == 0 {
return nil
}
- table, rk, err := extractTableFromRedisKey(key)
+
+ // get old header for this hash key
+ keyInfo, err := db.prepareHashKeyForWrite(ts, key, nil)
if err != nil {
return err
}
+ table := keyInfo.Table
+ verKey := keyInfo.VerKey
+
tableIndexes := db.indexMgr.GetTableIndexes(string(table))
if tableIndexes != nil {
tableIndexes.Lock()
defer tableIndexes.Unlock()
}
- db.wb.Clear()
- c1 := time.Since(s)
var num int64
var value []byte
tsBuf := PutInt64(ts)
for i := 0; i < len(args); i++ {
- if err = checkHashKFSize(rk, args[i].Key); err != nil {
+ if err = checkCollKFSize(verKey, args[i].Key); err != nil {
return err
} else if err = checkValueSize(args[i].Value); err != nil {
return err
}
- ek := hEncodeHashKey(table, rk, args[i].Key)
+ ek := hEncodeHashKey(table, verKey, args[i].Key)
var oldV []byte
- if oldV, err = db.eng.GetBytesNoLock(db.defaultReadOpts, ek); err != nil {
+ if oldV, err = db.GetBytesNoLock(ek); err != nil {
return err
} else if oldV == nil {
num++
@@ -283,69 +242,150 @@ func (db *RockDB) HMset(ts int64, key []byte, args ...common.KVRecord) error {
}
}
}
- c2 := time.Since(s)
- if newNum, err := db.hIncrSize(key, num, db.wb); err != nil {
+ newNum, err := db.hIncrSize(key, keyInfo.OldHeader, num, db.wb)
+ if err != nil {
return err
- } else if newNum > 0 && newNum == num {
+ } else if newNum > 0 && newNum == num && !keyInfo.Expired {
db.IncrTableKeyCount(table, 1, db.wb)
}
- c3 := time.Since(s)
-
- err = db.eng.Write(db.defaultWriteOpts, db.wb)
- c4 := time.Since(s)
- if c4 > time.Second/3 {
- dbLog.Infof("key %v slow write cost: %v, %v, %v, %v", string(key), c1, c2, c3, c4)
+ db.topLargeCollKeys.Update(key, int(newNum))
+ slow.LogLargeCollection(int(newNum), slow.NewSlowLogInfo(string(table), string(key), "hash"))
+ if newNum > collectionLengthForMetric {
+ metric.CollectionLenDist.With(ps.Labels{
+ "table": string(table),
+ }).Observe(float64(newNum))
}
+
+ err = db.MaybeCommitBatch()
return err
}
-func (db *RockDB) HGetVer(key []byte, field []byte) (int64, error) {
- if err := checkHashKFSize(key, field); err != nil {
- return 0, err
+func (db *RockDB) hGetRawFieldValue(ts int64, key []byte, field []byte, checkExpired bool, useLock bool) ([]byte, error) {
+ if err := checkCollKFSize(key, field); err != nil {
+ return nil, err
+ }
+ keyInfo, err := db.GetCollVersionKey(ts, HashType, key, useLock)
+ if err != nil {
+ return nil, err
+ }
+ if checkExpired && keyInfo.IsNotExistOrExpired() {
+ return nil, nil
}
+ table := keyInfo.Table
+ rk := keyInfo.VerKey
+ ek := hEncodeHashKey(table, rk, field)
+
+ if useLock {
+ return db.GetBytes(ek)
+ } else {
+ return db.GetBytesNoLock(ek)
+ }
+}
- dbKey, err := convertRedisKeyToDBHKey(key, field)
+func (db *RockDB) hExistRawField(ts int64, key []byte, field []byte, checkExpired bool, useLock bool) (bool, error) {
+ if err := checkCollKFSize(key, field); err != nil {
+ return false, err
+ }
+ keyInfo, err := db.GetCollVersionKey(ts, HashType, key, useLock)
if err != nil {
- return 0, err
+ return false, err
+ }
+ if checkExpired && keyInfo.IsNotExistOrExpired() {
+ return false, nil
+ }
+ table := keyInfo.Table
+ rk := keyInfo.VerKey
+ ek := hEncodeHashKey(table, rk, field)
+
+ if useLock {
+ v, err := db.Exist(ek)
+ return v, err
+ } else {
+ v, err := db.ExistNoLock(ek)
+ return v, err
}
+}
+
+func (db *RockDB) HGetVer(key []byte, field []byte) (int64, error) {
+ v, err := db.hGetRawFieldValue(0, key, field, false, true)
var ts uint64
- v, err := db.eng.GetBytes(db.defaultReadOpts, dbKey)
if len(v) >= tsLen {
ts, err = Uint64(v[len(v)-tsLen:], err)
}
return int64(ts), err
}
-func (db *RockDB) HGet(key []byte, field []byte) ([]byte, error) {
- if err := checkHashKFSize(key, field); err != nil {
- return nil, err
+func (db *RockDB) HGetWithOp(key []byte, field []byte, op func([]byte) error) error {
+ tn := time.Now().UnixNano()
+ if err := checkCollKFSize(key, field); err != nil {
+ return err
}
+ keyInfo, err := db.GetCollVersionKey(tn, HashType, key, true)
+ if err != nil {
+ return err
+ }
+ if keyInfo.IsNotExistOrExpired() {
+ // we must call the callback if no error returned
+ return op(nil)
+ }
+ table := keyInfo.Table
+ rk := keyInfo.VerKey
+ ek := hEncodeHashKey(table, rk, field)
- dbKey, err := convertRedisKeyToDBHKey(key, field)
+ return db.rockEng.GetValueWithOp(ek, func(v []byte) error {
+ if len(v) >= tsLen {
+ v = v[:len(v)-tsLen]
+ }
+ return op(v)
+ })
+}
+
+func (db *RockDB) HGetExpired(key []byte, field []byte) ([]byte, error) {
+ return db.hgetWithFlag(key, field, true)
+}
+
+func (db *RockDB) HGet(key []byte, field []byte) ([]byte, error) {
+ return db.hgetWithFlag(key, field, false)
+}
+
+func (db *RockDB) hgetWithFlag(key []byte, field []byte, getExpired bool) ([]byte, error) {
+ tn := time.Now().UnixNano()
+ v, err := db.hGetRawFieldValue(tn, key, field, !getExpired, true)
if err != nil {
return nil, err
}
- v, err := db.eng.GetBytes(db.defaultReadOpts, dbKey)
+ if v == nil {
+ return nil, nil
+ }
if len(v) >= tsLen {
v = v[:len(v)-tsLen]
}
- return v, err
+ return v, nil
}
-func (db *RockDB) HKeyExists(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return 0, err
+func (db *RockDB) HExist(key []byte, field []byte) (bool, error) {
+ tn := time.Now().UnixNano()
+ vok, err := db.hExistRawField(tn, key, field, true, true)
+ return vok, err
+}
+
+func (db *RockDB) HMgetExpired(key []byte, args ...[]byte) ([][]byte, error) {
+ if len(args) > MAX_BATCH_NUM {
+ return nil, errTooMuchBatchSize
}
- sk := hEncodeSizeKey(key)
- v, err := db.eng.GetBytes(db.defaultReadOpts, sk)
- if v != nil && err == nil {
- return 1, nil
+ var err error
+ r := make([][]byte, len(args))
+ for i := 0; i < len(args); i++ {
+ r[i], err = db.HGetExpired(key, args[i])
+ if err != nil {
+ return nil, err
+ }
}
- return 0, err
+ return r, nil
}
func (db *RockDB) HMget(key []byte, args ...[]byte) ([][]byte, error) {
- if len(args) >= MAX_BATCH_NUM {
+ if len(args) > MAX_BATCH_NUM {
return nil, errTooMuchBatchSize
}
var err error
@@ -359,17 +399,20 @@ func (db *RockDB) HMget(key []byte, args ...[]byte) ([][]byte, error) {
return r, nil
}
-func (db *RockDB) HDel(key []byte, args ...[]byte) (int64, error) {
- if len(args) >= MAX_BATCH_NUM {
+func (db *RockDB) HDel(ts int64, key []byte, args ...[]byte) (int64, error) {
+ if len(args) > MAX_BATCH_NUM {
return 0, errTooMuchBatchSize
}
if len(args) == 0 {
return 0, nil
}
- table, rk, err := extractTableFromRedisKey(key)
+ keyInfo, err := db.GetCollVersionKey(ts, HashType, key, false)
if err != nil {
return 0, err
}
+ table := keyInfo.Table
+ rk := keyInfo.VerKey
+ oldh := keyInfo.OldHeader
tableIndexes := db.indexMgr.GetTableIndexes(string(table))
if tableIndexes != nil {
@@ -377,7 +420,6 @@ func (db *RockDB) HDel(key []byte, args ...[]byte) (int64, error) {
defer tableIndexes.Unlock()
}
- db.wb.Clear()
wb := db.wb
var ek []byte
var oldV []byte
@@ -385,12 +427,12 @@ func (db *RockDB) HDel(key []byte, args ...[]byte) (int64, error) {
var num int64 = 0
var newNum int64 = -1
for i := 0; i < len(args); i++ {
- if err := checkHashKFSize(rk, args[i]); err != nil {
+ if err := common.CheckKeySubKey(rk, args[i]); err != nil {
return 0, err
}
ek = hEncodeHashKey(table, rk, args[i])
- oldV, err = db.eng.GetBytesNoLock(db.defaultReadOpts, ek)
+ oldV, err = db.GetBytesNoLock(ek)
if oldV == nil {
continue
} else {
@@ -408,39 +450,49 @@ func (db *RockDB) HDel(key []byte, args ...[]byte) (int64, error) {
}
}
- if newNum, err = db.hIncrSize(key, -num, wb); err != nil {
+ if newNum, err = db.hIncrSize(key, oldh, -num, wb); err != nil {
return 0, err
- } else if num > 0 && newNum == 0 {
+ }
+ if num > 0 && newNum == 0 {
db.IncrTableKeyCount(table, -1, wb)
- db.delExpire(HashType, key, wb)
}
+ if newNum == 0 {
+ db.delExpire(HashType, key, nil, false, wb)
+ }
+ db.topLargeCollKeys.Update(key, int(newNum))
- err = db.eng.Write(db.defaultWriteOpts, wb)
+ err = db.MaybeCommitBatch()
return num, err
}
-func (db *RockDB) hDeleteAll(hkey []byte, wb *gorocksdb.WriteBatch, tableIndexes *TableIndexContainer) error {
- sk := hEncodeSizeKey(hkey)
- table, rk, err := extractTableFromRedisKey(hkey)
+func (db *RockDB) hDeleteAll(ts int64, hkey []byte, hlen int64, wb engine.WriteBatch, tableIndexes *TableIndexContainer) error {
+ keyInfo, err := db.getCollVerKeyForRange(ts, HashType, hkey, false)
if err != nil {
return err
}
- start := hEncodeStartKey(table, rk)
- stop := hEncodeStopKey(table, rk)
- hlen, err := db.HLen(hkey)
- if err != nil {
- return err
+ // no need delete if expired
+ if keyInfo.IsNotExistOrExpired() {
+ return nil
}
+ sk := hEncodeSizeKey(hkey)
+ wb.Delete(sk)
+ db.topLargeCollKeys.Update(hkey, int(0))
+ if db.cfg.ExpirationPolicy == common.WaitCompact && tableIndexes == nil {
+ // for compact ttl , we can just delete the meta
+ return nil
+ }
+ start := keyInfo.RangeStart
+ stop := keyInfo.RangeEnd
if tableIndexes != nil || hlen <= RangeDeleteNum {
- it, err := NewDBRangeIterator(db.eng, start, stop, common.RangeROpen, false)
+ it, err := db.NewDBRangeIterator(start, stop, common.RangeROpen, false)
if err != nil {
return err
}
defer it.Close()
for ; it.Valid(); it.Next() {
- rawk := it.RefKey()
+ rawk := it.Key()
if hlen <= RangeDeleteNum {
wb.Delete(rawk)
}
@@ -459,11 +511,10 @@ func (db *RockDB) hDeleteAll(hkey []byte, wb *gorocksdb.WriteBatch, tableIndexes
if hlen > RangeDeleteNum {
wb.DeleteRange(start, stop)
}
- wb.Delete(sk)
return nil
}
-func (db *RockDB) HClear(hkey []byte) (int64, error) {
+func (db *RockDB) HClear(ts int64, hkey []byte) (int64, error) {
if err := checkKeySize(hkey); err != nil {
return 0, err
}
@@ -482,23 +533,28 @@ func (db *RockDB) HClear(hkey []byte) (int64, error) {
if err != nil {
return 0, err
}
+ if hlen == 0 {
+ return 0, nil
+ }
wb := db.wb
- wb.Clear()
- err = db.hDeleteAll(hkey, wb, tableIndexes)
+ err = db.hDeleteAll(ts, hkey, hlen, wb, tableIndexes)
if err != nil {
return 0, err
}
if hlen > 0 {
db.IncrTableKeyCount(table, -1, wb)
- db.delExpire(HashType, hkey, wb)
}
+ db.delExpire(HashType, hkey, nil, false, wb)
- err = db.eng.Write(db.defaultWriteOpts, wb)
- return hlen, err
+ err = db.MaybeCommitBatch()
+ if hlen > 0 {
+ return 1, err
+ }
+ return 0, err
}
-func (db *RockDB) hClearWithBatch(hkey []byte, wb *gorocksdb.WriteBatch) error {
+func (db *RockDB) hClearWithBatch(hkey []byte, wb engine.WriteBatch) error {
if err := checkKeySize(hkey); err != nil {
return err
}
@@ -517,26 +573,26 @@ func (db *RockDB) hClearWithBatch(hkey []byte, wb *gorocksdb.WriteBatch) error {
defer tableIndexes.Unlock()
}
- err = db.hDeleteAll(hkey, wb, tableIndexes)
+ err = db.hDeleteAll(0, hkey, hlen, wb, tableIndexes)
if err != nil {
return err
}
if hlen > 0 {
db.IncrTableKeyCount(table, -1, wb)
- db.delExpire(HashType, hkey, wb)
}
+ db.delExpire(HashType, hkey, nil, false, wb)
return err
}
func (db *RockDB) HMclear(keys ...[]byte) {
for _, key := range keys {
- db.HClear(key)
+ db.HClear(0, key)
}
}
func (db *RockDB) HIncrBy(ts int64, key []byte, field []byte, delta int64) (int64, error) {
- if err := checkHashKFSize(key, field); err != nil {
+ if err := checkCollKFSize(key, field); err != nil {
return 0, err
}
table, _, err := extractTableFromRedisKey(key)
@@ -544,9 +600,7 @@ func (db *RockDB) HIncrBy(ts int64, key []byte, field []byte, delta int64) (int6
return 0, err
}
- var ek []byte
-
- ek, err = convertRedisKeyToDBHKey(key, field)
+ fv, err := db.hGetRawFieldValue(ts, key, field, true, false)
if err != nil {
return 0, err
}
@@ -559,15 +613,15 @@ func (db *RockDB) HIncrBy(ts int64, key []byte, field []byte, delta int64) (int6
hindex = tableIndexes.GetHIndexNoLock(string(field))
}
wb := db.wb
- wb.Clear()
var n int64
- oldV, err := db.eng.GetBytesNoLock(db.defaultReadOpts, ek)
- if len(oldV) >= tsLen {
- oldV = oldV[:len(oldV)-tsLen]
- }
- if n, err = StrInt64(oldV, err); err != nil {
- return 0, err
+ if fv != nil {
+ if len(fv) >= tsLen {
+ fv = fv[:len(fv)-tsLen]
+ }
+ if n, err = StrInt64(fv, err); err != nil {
+ return 0, err
+ }
}
n += delta
@@ -577,172 +631,171 @@ func (db *RockDB) HIncrBy(ts int64, key []byte, field []byte, delta int64) (int6
return 0, err
}
- err = db.eng.Write(db.defaultWriteOpts, wb)
+ err = db.MaybeCommitBatch()
return n, err
}
-func (db *RockDB) HGetAll(key []byte) (int64, chan common.KVRecordRet, error) {
+func (db *RockDB) HGetAll(key []byte) (int64, []common.KVRecordRet, error) {
+ return db.hGetAll(key, false)
+}
+
+func (db *RockDB) HGetAllExpired(key []byte) (int64, []common.KVRecordRet, error) {
+ return db.hGetAll(key, true)
+}
+
+func (db *RockDB) hGetAll(key []byte, getExpired bool) (int64, []common.KVRecordRet, error) {
if err := checkKeySize(key); err != nil {
return 0, nil, err
}
- table, rk, err := extractTableFromRedisKey(key)
+ tn := time.Now().UnixNano()
+ keyInfo, err := db.getCollVerKeyForRange(tn, HashType, key, true)
if err != nil {
return 0, nil, err
}
-
- length, err := db.HLen(key)
- if err != nil {
- return 0, nil, err
+ if keyInfo.IsNotExistOrExpired() && !getExpired {
+ return 0, nil, nil
}
- if length >= MAX_BATCH_NUM {
+ start := keyInfo.RangeStart
+ stop := keyInfo.RangeEnd
+
+ length, err := Int64(keyInfo.MetaData(), err)
+ if length > MAX_BATCH_NUM {
return length, nil, errTooMuchBatchSize
}
- start := hEncodeStartKey(table, rk)
- stop := hEncodeStopKey(table, rk)
- it, err := NewDBRangeIterator(db.eng, start, stop, common.RangeROpen, false)
+ it, err := db.NewDBRangeIterator(start, stop, common.RangeROpen, false)
if err != nil {
return 0, nil, err
}
it.NoTimestamp(HashType)
- valCh := make(chan common.KVRecordRet, 16)
+ vals := make([]common.KVRecordRet, 0, length)
doScan := func() {
defer it.Close()
- defer close(valCh)
for ; it.Valid(); it.Next() {
_, _, f, err := hDecodeHashKey(it.Key())
v := it.Value()
- select {
- case valCh <- common.KVRecordRet{
+ vals = append(vals, common.KVRecordRet{
Rec: common.KVRecord{Key: f, Value: v},
Err: err,
- }:
- case <-db.quit:
- break
- }
+ })
}
}
- if length < int64(len(valCh)) {
- doScan()
- } else {
- go doScan()
- }
-
- return length, valCh, nil
+ doScan()
+ return length, vals, nil
}
-func (db *RockDB) HKeys(key []byte) (int64, chan common.KVRecordRet, error) {
+func (db *RockDB) HKeys(key []byte) (int64, []common.KVRecordRet, error) {
if err := checkKeySize(key); err != nil {
return 0, nil, err
}
- table, rk, err := extractTableFromRedisKey(key)
-
- length, err := db.HLen(key)
+ tn := time.Now().UnixNano()
+ keyInfo, err := db.getCollVerKeyForRange(tn, HashType, key, true)
+ if err != nil {
+ return 0, nil, err
+ }
+ if keyInfo.IsNotExistOrExpired() {
+ return 0, nil, nil
+ }
+ start := keyInfo.RangeStart
+ stop := keyInfo.RangeEnd
+ length, err := Int64(keyInfo.MetaData(), err)
if err != nil {
return 0, nil, err
}
- if length >= MAX_BATCH_NUM {
+ if length > MAX_BATCH_NUM {
return length, nil, errTooMuchBatchSize
}
- start := hEncodeStartKey(table, rk)
- stop := hEncodeStopKey(table, rk)
- it, err := NewDBRangeIterator(db.eng, start, stop, common.RangeROpen, false)
+
+ it, err := db.NewDBRangeIterator(start, stop, common.RangeROpen, false)
if err != nil {
return 0, nil, err
}
- valCh := make(chan common.KVRecordRet, 16)
+ vals := make([]common.KVRecordRet, 0, length)
doScan := func() {
defer it.Close()
- defer close(valCh)
for ; it.Valid(); it.Next() {
- _, _, f, err := hDecodeHashKey(it.Key())
- valCh <- common.KVRecordRet{
- Rec: common.KVRecord{Key: f, Value: nil},
- Err: err,
+ _, _, f, _ := hDecodeHashKey(it.Key())
+ if f == nil {
+ continue
}
+ vals = append(vals, common.KVRecordRet{
+ Rec: common.KVRecord{Key: f, Value: nil},
+ Err: nil,
+ })
}
}
- if length < int64(len(valCh)) {
- doScan()
- } else {
- go doScan()
- }
- return length, valCh, nil
+ doScan()
+ return length, vals, nil
}
-func (db *RockDB) HValues(key []byte) (int64, chan common.KVRecordRet, error) {
+func (db *RockDB) HValues(key []byte) (int64, []common.KVRecordRet, error) {
if err := checkKeySize(key); err != nil {
return 0, nil, err
}
- table, rk, err := extractTableFromRedisKey(key)
+ tn := time.Now().UnixNano()
+ keyInfo, err := db.getCollVerKeyForRange(tn, HashType, key, true)
if err != nil {
return 0, nil, err
}
-
- length, err := db.HLen(key)
+ if keyInfo.IsNotExistOrExpired() {
+ return 0, nil, nil
+ }
+ start := keyInfo.RangeStart
+ stop := keyInfo.RangeEnd
+ length, err := Int64(keyInfo.MetaData(), err)
if err != nil {
return 0, nil, err
}
- if length >= MAX_BATCH_NUM {
+ if length > MAX_BATCH_NUM {
return length, nil, errTooMuchBatchSize
}
- start := hEncodeStartKey(table, rk)
- stop := hEncodeStopKey(table, rk)
- it, err := NewDBRangeIterator(db.eng, start, stop, common.RangeROpen, false)
+ it, err := db.NewDBRangeIterator(start, stop, common.RangeROpen, false)
if err != nil {
return 0, nil, err
}
it.NoTimestamp(HashType)
- valCh := make(chan common.KVRecordRet, 16)
- go func() {
- defer it.Close()
- defer close(valCh)
- for ; it.Valid(); it.Next() {
- va := it.Value()
- valCh <- common.KVRecordRet{
- Rec: common.KVRecord{Key: nil, Value: va},
- Err: nil,
- }
+ // TODO: use pool for large alloc
+ vals := make([]common.KVRecordRet, 0, length)
+ defer it.Close()
+ for ; it.Valid(); it.Next() {
+ va := it.Value()
+ if va == nil {
+ continue
}
- }()
+ vals = append(vals, common.KVRecordRet{
+ Rec: common.KVRecord{Key: nil, Value: va},
+ Err: nil,
+ })
+ }
- return length, valCh, nil
+ return length, vals, nil
}
-func (db *RockDB) HExpire(key []byte, duration int64) (int64, error) {
- if exists, err := db.HKeyExists(key); err != nil || exists != 1 {
+func (db *RockDB) HKeyExists(key []byte) (int64, error) {
+ if err := checkKeySize(key); err != nil {
return 0, err
- } else {
- if err2 := db.expire(HashType, key, duration); err2 != nil {
- return 0, err2
- } else {
- return 1, nil
- }
}
+
+ return db.collKeyExists(HashType, key)
}
-func (db *RockDB) HPersist(key []byte) (int64, error) {
- if exists, err := db.HKeyExists(key); err != nil || exists != 1 {
- return 0, err
- }
+func (db *RockDB) HExpire(ts int64, key []byte, duration int64) (int64, error) {
+ return db.collExpire(ts, HashType, key, duration)
+}
- if ttl, err := db.ttl(HashType, key); err != nil || ttl < 0 {
- return 0, err
- }
+func (db *RockDB) HPersist(ts int64, key []byte) (int64, error) {
+ return db.collPersist(ts, HashType, key)
+}
- db.wb.Clear()
- if err := db.delExpire(HashType, key, db.wb); err != nil {
- return 0, err
- } else {
- if err2 := db.eng.Write(db.defaultWriteOpts, db.wb); err2 != nil {
- return 0, err2
- } else {
- return 1, nil
- }
- }
+func (db *RockDB) prepareHashKeyForWrite(ts int64, key []byte, field []byte) (collVerKeyInfo, error) {
+ return db.prepareCollKeyForWrite(ts, HashType, key, field)
+}
+
+func (db *RockDB) hHeaderMeta(ts int64, hkey []byte, useLock bool) (*headerMetaValue, bool, error) {
+ return db.collHeaderMeta(ts, HashType, hkey, useLock)
}
diff --git a/rockredis/t_hash_index.go b/rockredis/t_hash_index.go
index 07c505cb..e3196aad 100644
--- a/rockredis/t_hash_index.go
+++ b/rockredis/t_hash_index.go
@@ -5,8 +5,8 @@ import (
"errors"
"strconv"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/gorocksdb"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/engine"
)
const (
@@ -241,7 +241,7 @@ func encodeHsetIndexStringStopKey(table []byte, indexName []byte, indexValue []b
return k, nil
}
-func hsetIndexAddNumberRec(table []byte, indexName []byte, indexValue int64, pk []byte, pkvalue []byte, wb *gorocksdb.WriteBatch) error {
+func hsetIndexAddNumberRec(table []byte, indexName []byte, indexValue int64, pk []byte, pkvalue []byte, wb engine.WriteBatch) error {
dbkey, err := encodeHsetIndexNumberKey(table, indexName, indexValue, pk, false)
if err != nil {
return err
@@ -250,7 +250,7 @@ func hsetIndexAddNumberRec(table []byte, indexName []byte, indexValue int64, pk
return nil
}
-func hsetIndexRemoveNumberRec(table []byte, indexName []byte, indexValue int64, pk []byte, wb *gorocksdb.WriteBatch) error {
+func hsetIndexRemoveNumberRec(table []byte, indexName []byte, indexValue int64, pk []byte, wb engine.WriteBatch) error {
dbkey, err := encodeHsetIndexNumberKey(table, indexName, indexValue, pk, false)
if err != nil {
return err
@@ -259,7 +259,7 @@ func hsetIndexRemoveNumberRec(table []byte, indexName []byte, indexValue int64,
return nil
}
-func hsetIndexAddStringRec(table []byte, indexName []byte, indexValue []byte, pk []byte, pkvalue []byte, wb *gorocksdb.WriteBatch) error {
+func hsetIndexAddStringRec(table []byte, indexName []byte, indexValue []byte, pk []byte, pkvalue []byte, wb engine.WriteBatch) error {
dbkey, err := encodeHsetIndexStringKey(table, indexName, indexValue, pk, false)
if err != nil {
return err
@@ -268,7 +268,7 @@ func hsetIndexAddStringRec(table []byte, indexName []byte, indexValue []byte, pk
return nil
}
-func hsetIndexRemoveStringRec(table []byte, indexName []byte, indexValue []byte, pk []byte, wb *gorocksdb.WriteBatch) error {
+func hsetIndexRemoveStringRec(table []byte, indexName []byte, indexValue []byte, pk []byte, wb engine.WriteBatch) error {
dbkey, err := encodeHsetIndexStringKey(table, indexName, indexValue, pk, false)
if err != nil {
return err
@@ -277,7 +277,7 @@ func hsetIndexRemoveStringRec(table []byte, indexName []byte, indexValue []byte,
return nil
}
-func (db *RockDB) hsetIndexAddFieldRecs(pk []byte, fieldList [][]byte, valueList [][]byte, wb *gorocksdb.WriteBatch) error {
+func (db *RockDB) hsetIndexAddFieldRecs(pk []byte, fieldList [][]byte, valueList [][]byte, wb engine.WriteBatch) error {
table, _, _ := extractTableFromRedisKey(pk)
if len(table) == 0 {
return errTableName
@@ -299,7 +299,7 @@ func (db *RockDB) hsetIndexAddFieldRecs(pk []byte, fieldList [][]byte, valueList
return nil
}
-func (db *RockDB) hsetIndexUpdateFieldRecs(pk []byte, fieldList [][]byte, valueList [][]byte, wb *gorocksdb.WriteBatch) error {
+func (db *RockDB) hsetIndexUpdateFieldRecs(pk []byte, fieldList [][]byte, valueList [][]byte, wb engine.WriteBatch) error {
table, _, _ := extractTableFromRedisKey(pk)
if len(table) == 0 {
return errTableName
@@ -325,7 +325,7 @@ func (db *RockDB) hsetIndexUpdateFieldRecs(pk []byte, fieldList [][]byte, valueL
return nil
}
-func (db *RockDB) hsetIndexAddRec(pk []byte, field []byte, value []byte, wb *gorocksdb.WriteBatch) error {
+func (db *RockDB) hsetIndexAddRec(pk []byte, field []byte, value []byte, wb engine.WriteBatch) error {
table, _, _ := extractTableFromRedisKey(pk)
if len(table) == 0 {
return errTableName
@@ -339,7 +339,7 @@ func (db *RockDB) hsetIndexAddRec(pk []byte, field []byte, value []byte, wb *gor
return hindex.UpdateRec(nil, value, pk, wb)
}
-func (db *RockDB) hsetIndexUpdateRec(pk []byte, field []byte, value []byte, wb *gorocksdb.WriteBatch) error {
+func (db *RockDB) hsetIndexUpdateRec(pk []byte, field []byte, value []byte, wb engine.WriteBatch) error {
table, _, _ := extractTableFromRedisKey(pk)
if len(table) == 0 {
return errTableName
@@ -358,7 +358,7 @@ func (db *RockDB) hsetIndexUpdateRec(pk []byte, field []byte, value []byte, wb *
return hindex.UpdateRec(oldvalue, value, pk, wb)
}
-func (self *RockDB) hsetIndexRemoveRec(pk []byte, field []byte, value []byte, wb *gorocksdb.WriteBatch) error {
+func (self *RockDB) hsetIndexRemoveRec(pk []byte, field []byte, value []byte, wb engine.WriteBatch) error {
table, _, _ := extractTableFromRedisKey(pk)
if len(table) == 0 {
return errTableName
@@ -489,7 +489,7 @@ func (self *HsetIndex) SearchRec(db *RockDB, cond *IndexCondition, countOnly boo
if dbLog.Level() >= common.LOG_DEBUG {
dbLog.Debugf("begin search index: %v-%v-%v, %v~%v", string(self.Table), string(self.Name), string(self.IndexField), min, max)
}
- it, err := NewDBRangeLimitIterator(db.eng, min, max, rt, cond.Offset, cond.Limit, false)
+ it, err := db.NewDBRangeLimitIterator(min, max, rt, cond.Offset, cond.Limit, false)
if err != nil {
return n, nil, err
}
@@ -523,7 +523,7 @@ func (self *HsetIndex) SearchRec(db *RockDB, cond *IndexCondition, countOnly boo
return n, pkList, nil
}
-func (self *HsetIndex) UpdateRec(oldvalue []byte, value []byte, pk []byte, wb *gorocksdb.WriteBatch) error {
+func (self *HsetIndex) UpdateRec(oldvalue []byte, value []byte, pk []byte, wb engine.WriteBatch) error {
if self.State == DeletedIndex {
return nil
}
@@ -554,7 +554,7 @@ func (self *HsetIndex) UpdateRec(oldvalue []byte, value []byte, pk []byte, wb *g
return nil
}
-func (self *HsetIndex) RemoveRec(value []byte, pk []byte, wb *gorocksdb.WriteBatch) {
+func (self *HsetIndex) RemoveRec(value []byte, pk []byte, wb engine.WriteBatch) {
if value == nil {
return
}
@@ -583,12 +583,12 @@ func (self *HsetIndex) cleanAll(db *RockDB, stopChan chan struct{}) error {
dbLog.Infof("begin clean index: %v-%v-%v", string(self.Table), string(self.Name), string(self.IndexField))
- wb := gorocksdb.NewWriteBatch()
+ wb := db.rockEng.NewWriteBatch()
defer wb.Destroy()
wb.DeleteRange(min, max)
wb.Delete(max)
- err := db.eng.Write(db.defaultWriteOpts, wb)
+ err := db.rockEng.Write(wb)
if err != nil {
dbLog.Infof("clean index %v, %v error: %v", string(self.Table), string(self.Name), err)
} else {
diff --git a/rockredis/t_hash_test.go b/rockredis/t_hash_test.go
index a59ee73e..7feb6a94 100644
--- a/rockredis/t_hash_test.go
+++ b/rockredis/t_hash_test.go
@@ -7,8 +7,8 @@ import (
"testing"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
"github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/common"
)
func TestHashCodec(t *testing.T) {
@@ -41,13 +41,33 @@ func TestDBHash(t *testing.T) {
key := []byte("test:testdb_hash_a")
- if n, err := db.HSet(0, false, key, []byte("a"), []byte("hello world 1")); err != nil {
- t.Fatal(err)
- } else if n != 1 {
- t.Fatal(n)
+ tn := time.Now().UnixNano()
+ r1 := common.KVRecord{
+ Key: []byte("a"),
+ Value: []byte("hello world 1"),
+ }
+ r2 := common.KVRecord{
+ Key: []byte("b"),
+ Value: []byte("hello world 2"),
}
+ // test hget on not exist
+ n, err := db.HGetVer(key, []byte("a"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ n, vals, err := db.HGetAll(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ assert.Equal(t, 0, len(vals))
+ n, vals, err = db.HGetAllExpired(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ assert.Equal(t, 0, len(vals))
- if n, err := db.HSet(0, false, key, []byte("b"), []byte("hello world 2")); err != nil {
+ err = db.HMset(tn, key, r1, r2)
+ assert.Nil(t, err)
+
+ if n, err := db.HSet(tn, false, key, []byte("d"), []byte("hello world 2")); err != nil {
t.Fatal(err)
} else if n != 1 {
t.Fatal(n)
@@ -70,19 +90,30 @@ func TestDBHash(t *testing.T) {
if string(v2) != string(ay[1]) {
t.Error(ay[1])
}
+ n, err = db.HGetVer(key, []byte("a"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(tn), n)
- len, err := db.HLen(key)
+ n, vals, err = db.HGetAll(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(3), n)
+ assert.Equal(t, 3, len(vals))
+ n, vals2, err := db.HGetAllExpired(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(3), n)
+ assert.Equal(t, 3, len(vals2))
+ assert.Equal(t, vals, vals2)
+
+ length, err := db.HLen(key)
if err != nil {
t.Error(err)
}
- if len != 2 {
- t.Errorf("length should be 2: %v", len)
- }
- _, ch, _ := db.HGetAll(key)
- results := make([]common.KVRecordRet, 0)
- for r := range ch {
- results = append(results, r)
+ if length != 3 {
+ t.Errorf("length should be 2: %v", length)
}
+ n, results, _ := db.HGetAll(key)
+ time.Sleep(time.Second)
+ assert.Equal(t, int(n), len(results))
if string(results[0].Rec.Key) != "a" {
t.Error(results)
}
@@ -97,22 +128,16 @@ func TestDBHash(t *testing.T) {
t.Error(results)
}
- _, ch, _ = db.HKeys(key)
- results = make([]common.KVRecordRet, 0)
- for r := range ch {
- results = append(results, r)
- }
+ n, results, _ = db.HKeys(key)
+ assert.Equal(t, int(n), len(results))
+
if string(results[0].Rec.Key) != "a" {
t.Error(results)
}
if string(results[1].Rec.Key) != "b" {
t.Error(results)
}
- _, ch, _ = db.HValues(key)
- results = make([]common.KVRecordRet, 0)
- for r := range ch {
- results = append(results, r)
- }
+ n, results, _ = db.HValues(key)
if string(results[0].Rec.Value) != "hello world 1" {
t.Error(results)
}
@@ -120,7 +145,7 @@ func TestDBHash(t *testing.T) {
t.Error(results)
}
- if n, err := db.HSet(0, true, key, []byte("b"), []byte("hello world changed nx")); err != nil {
+ if n, err := db.HSet(tn, true, key, []byte("b"), []byte("hello world changed nx")); err != nil {
t.Fatal(err)
} else if n != 0 {
t.Fatal(n)
@@ -130,7 +155,7 @@ func TestDBHash(t *testing.T) {
t.Error(v2)
}
- if n, err := db.HSet(0, true, key, []byte("c"), []byte("hello world c")); err != nil {
+ if n, err := db.HSet(tn, true, key, []byte("c"), []byte("hello world c")); err != nil {
t.Fatal(err)
} else if n != 1 {
t.Fatal(n)
@@ -169,7 +194,7 @@ func TestHashKeyExists(t *testing.T) {
if _, err := db.HSet(0, false, key, []byte("hello2"), []byte("world2")); err != nil {
t.Fatal(err.Error())
}
- db.HDel(key, []byte("hello"))
+ db.HDel(0, key, []byte("hello"))
v, err = db.HKeyExists(key)
if err != nil {
t.Fatal(err.Error())
@@ -177,7 +202,7 @@ func TestHashKeyExists(t *testing.T) {
if v != 1 {
t.Fatal("invalid value ", v)
}
- db.HClear(key)
+ db.HClear(0, key)
v, err = db.HKeyExists(key)
if err != nil {
t.Fatal(err.Error())
@@ -444,12 +469,11 @@ func TestHashIndexStringV(t *testing.T) {
for i := 0; i < pkCnt; i++ {
inputPKList = append(inputPKList, []byte("test:key"+strconv.Itoa(i)))
}
- db.wb.Clear()
for i, pk := range inputPKList {
err = db.hsetIndexAddRec(pk, hindex.IndexField, inputFVList[i], db.wb)
assert.Nil(t, err)
}
- db.eng.Write(db.defaultWriteOpts, db.wb)
+ db.CommitBatchWrite()
condAll := &IndexCondition{
StartKey: nil,
IncludeStart: false,
@@ -582,9 +606,8 @@ func TestHashIndexStringV(t *testing.T) {
assert.True(t, comp == -1 || comp == 0)
}
- db.wb.Clear()
db.hsetIndexRemoveRec(inputPKList[0], hindex.IndexField, inputFVList[0], db.wb)
- db.eng.Write(db.defaultWriteOpts, db.wb)
+ db.CommitBatchWrite()
_, cnt, pkList, err = db.HsetIndexSearch(hindex.Table, hindex.IndexField, condEqual, false)
assert.Nil(t, err)
assert.Equal(t, 0, int(cnt))
@@ -656,7 +679,7 @@ func TestHashIndexInt64V(t *testing.T) {
for i, pk := range inputPKList {
db.hsetIndexAddRec(pk, hindex.IndexField, inputFVList[i], db.wb)
}
- db.eng.Write(db.defaultWriteOpts, db.wb)
+ db.rockEng.Write(db.wb)
condAll := &IndexCondition{
StartKey: nil,
IncludeStart: false,
@@ -783,7 +806,7 @@ func TestHashIndexInt64V(t *testing.T) {
db.wb.Clear()
db.hsetIndexRemoveRec(inputPKList[0], hindex.IndexField, inputFVList[0], db.wb)
- db.eng.Write(db.defaultWriteOpts, db.wb)
+ db.rockEng.Write(db.wb)
_, cnt, pkList, err = db.HsetIndexSearch(hindex.Table, hindex.IndexField, condEqual, false)
assert.Nil(t, err)
assert.Equal(t, 0, int(cnt))
@@ -934,7 +957,6 @@ func TestHashUpdateWithIndex(t *testing.T) {
inputFVList = append(inputFVList, []byte("fv1"))
inputFVList = append(inputFVList, []byte("fv2"))
inputFVList = append(inputFVList, []byte("fv3"))
- db.wb.Clear()
for i, pk := range inputPKList {
err = db.HMset(0, pk, common.KVRecord{stringIndex.IndexField, inputFVList[i]})
assert.Nil(t, err)
@@ -959,7 +981,7 @@ func TestHashUpdateWithIndex(t *testing.T) {
assert.Equal(t, 1, len(pkList))
assert.Equal(t, inputPKList[0], pkList[0].PKey)
- db.HDel(inputPKList[0], stringIndex.IndexField)
+ db.HDel(0, inputPKList[0], stringIndex.IndexField)
_, cnt, _, err = db.HsetIndexSearch(stringIndex.Table, stringIndex.IndexField, condEqual0, false)
assert.Nil(t, err)
assert.Equal(t, 0, int(cnt))
@@ -968,7 +990,7 @@ func TestHashUpdateWithIndex(t *testing.T) {
assert.Nil(t, err)
assert.Equal(t, len(inputPKList)-1, int(cnt))
- db.HClear(inputPKList[1])
+ db.HClear(0, inputPKList[1])
_, cnt, _, err = db.HsetIndexSearch(intIndex.Table, intIndex.IndexField, condAll, false)
assert.Nil(t, err)
@@ -978,3 +1000,92 @@ func TestHashUpdateWithIndex(t *testing.T) {
assert.Nil(t, err)
assert.Equal(t, len(inputPKList)-2, int(cnt))
}
+
+func TestDBHashClearInCompactTTL(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ key := []byte("test:testdb_hash_clear_compact_a")
+ member := []byte("member")
+ memberNew := []byte("memberNew")
+
+ ts := time.Now().UnixNano()
+ db.HSet(ts, false, key, member, member)
+
+ n, err := db.HLen(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ ts = time.Now().UnixNano()
+ n, err = db.HClear(ts, key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.HLen(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ v, err := db.HGet(key, member)
+ assert.Nil(t, err)
+ assert.Nil(t, v)
+
+ vlist, err := db.HMget(key, member)
+ assert.Nil(t, err)
+ assert.Equal(t, 1, len(vlist))
+ assert.Nil(t, vlist[0])
+
+ n, rets, err := db.HGetAll(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int(n), len(rets))
+ assert.Equal(t, int(0), len(rets))
+
+ n, err = db.HKeyExists(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, rets, err = db.HKeys(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ assert.Equal(t, int(0), len(rets))
+ n, rets, err = db.HValues(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ assert.Equal(t, int(0), len(rets))
+
+ // renew
+ ts = time.Now().UnixNano()
+ db.HSet(ts, false, key, memberNew, memberNew)
+
+ n, err = db.HLen(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ v, err = db.HGet(key, member)
+ assert.Nil(t, err)
+ assert.Nil(t, v)
+ v, err = db.HGet(key, memberNew)
+ assert.Nil(t, err)
+ assert.Equal(t, memberNew, v)
+
+ vlist, err = db.HMget(key, memberNew)
+ assert.Nil(t, err)
+ assert.Equal(t, int(n), len(vlist))
+ assert.Equal(t, memberNew, vlist[0])
+
+ n, rets, err = db.HGetAll(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int(n), len(rets))
+ assert.Equal(t, int(1), len(rets))
+
+ n, err = db.HKeyExists(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ n, rets, err = db.HKeys(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ assert.Equal(t, int(1), len(rets))
+ n, rets, err = db.HValues(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ assert.Equal(t, int(1), len(rets))
+}
diff --git a/rockredis/t_hll.go b/rockredis/t_hll.go
index abd8fc10..4d7871ef 100644
--- a/rockredis/t_hll.go
+++ b/rockredis/t_hll.go
@@ -3,23 +3,33 @@ package rockredis
import (
"encoding/binary"
"errors"
+ "hash"
"sync"
"sync/atomic"
"time"
- "github.com/absolute8511/gorocksdb"
+ hll2 "github.com/absolute8511/go-hll"
hll "github.com/absolute8511/hyperloglog"
+ hll3 "github.com/absolute8511/hyperloglog2"
+ "github.com/golang/snappy"
+ "github.com/youzan/ZanRedisDB/slow"
+
//hll "github.com/axiomhq/hyperloglog"
- "github.com/hashicorp/golang-lru"
+ lru "github.com/hashicorp/golang-lru"
)
const (
- hllPrecision uint8 = 14
+ hllPrecision uint8 = 12
// to make it compatible for some different hll lib
// we add a flag to indicate which hll lib we are using
hllPlusDefault uint8 = 1
+ hllType2 uint8 = 2
+ hllType3 uint8 = 3
)
+//TODO: how to change this through raft and keep it after restart
+var initHLLType = hllPlusDefault
+
// pfcount use uint64 to storage the precomputed value, and the most significant bit is used
// to indicate whether the cached value is valid.
// pfadd will modify the pfcount most significant bit to indicate the register is changed
@@ -28,28 +38,231 @@ const (
// 14 bits precision yield to 16384 registers and standard error is 0.81% (1/sqrt(number of registers))
var errInvalidHLLData = errors.New("invalide hyperloglog data")
+var errInvalidHLLType = errors.New("hyperloglog type is wrong")
var errHLLCountOverflow = errors.New("hyperloglog count overflow")
type hllCacheItem struct {
sync.Mutex
- hllp *hll.HyperLogLogPlus
+ hllp *hll.HyperLogLogPlus
+ hll2 hll2.HLL
+ hll3 *hll3.Sketch
+ // we use the highest bit to indicated if this cached count is out of date
cachedCount uint64
hllType uint8
ts int64
- flushed bool
+ dirty bool
deleting bool
}
+func newHLLItem(init uint8) (*hllCacheItem, error) {
+ switch init {
+ case hllPlusDefault:
+ hllp, _ := hll.NewPlus(hllPrecision)
+ item := &hllCacheItem{
+ hllp: hllp,
+ hllType: hllPlusDefault,
+ }
+ return item, nil
+ case hllType2:
+ sz, err := hll2.SizeByP(int(hllPrecision))
+ if err != nil {
+ return nil, err
+ }
+ h2 := make(hll2.HLL, sz)
+ item := &hllCacheItem{
+ hll2: h2,
+ hllType: hllType2,
+ }
+ return item, nil
+ case hllType3:
+ h3, _ := hll3.New(hllPrecision, true)
+ item := &hllCacheItem{
+ hll3: h3,
+ hllType: hllType3,
+ }
+ return item, nil
+ default:
+ return nil, errInvalidHLLType
+ }
+}
+
+func newHLLItemFromDBBytes(hllType uint8, fkv []byte) (*hllCacheItem, bool, error) {
+ pos := 1
+ recompute := true
+ cnt := binary.BigEndian.Uint64(fkv[pos : pos+8])
+ if cnt&0x8000000000000000 == 0 {
+ recompute = false
+ }
+ switch hllType {
+ case hllPlusDefault:
+ hllp, _ := hll.NewPlus(hllPrecision)
+ err := hllp.GobDecode(fkv[pos+8:])
+ if err != nil {
+ return nil, recompute, err
+ }
+
+ item := &hllCacheItem{
+ hllp: hllp,
+ hllType: hllPlusDefault,
+ cachedCount: cnt,
+ dirty: false,
+ }
+ return item, recompute, err
+ case hllType3:
+ h3, _ := hll3.New(hllPrecision, true)
+ err := h3.UnmarshalBinary(fkv[pos+8:])
+ if err != nil {
+ return nil, recompute, err
+ }
+
+ item := &hllCacheItem{
+ hll3: h3,
+ hllType: hllType3,
+ cachedCount: cnt,
+ }
+ return item, recompute, err
+ case hllType2:
+ sz, err := hll2.SizeByP(int(hllPrecision))
+ if err != nil {
+ return nil, recompute, err
+ }
+ h2 := make(hll2.HLL, sz)
+ d := fkv[pos+8:]
+ if len(d) != len(h2) {
+ d, err = snappy.Decode(nil, d)
+ if err != nil || len(d) != len(h2) {
+ dbLog.Infof("invalid hll data: %v, expect len: %v, %v", d, len(h2), err)
+ return nil, recompute, errInvalidHLLData
+ }
+ }
+ copy(h2, d)
+
+ item := &hllCacheItem{
+ hll2: h2,
+ hllType: hllType2,
+ cachedCount: cnt,
+ }
+ return item, recompute, nil
+ default:
+ // unknown hll type
+ return nil, recompute, errInvalidHLLType
+ }
+}
+
+func (hllItem *hllCacheItem) HLLToBytes() ([]byte, error) {
+ if hllItem.hllType == hllPlusDefault {
+ return hllItem.hllp.GobEncode()
+ } else if hllItem.hllType == hllType2 {
+ return snappy.Encode(nil, hllItem.hll2), nil
+ } else if hllItem.hllType == hllType3 {
+ return hllItem.hll3.MarshalBinary()
+ }
+ return nil, errInvalidHLLType
+}
+
+func (hllItem *hllCacheItem) HLLMerge(item *hllCacheItem) error {
+ if hllItem.hllType != item.hllType {
+ return errInvalidHLLType
+ }
+ switch hllItem.hllType {
+ case hllPlusDefault:
+ return hllItem.hllp.Merge(item.hllp)
+ case hllType2:
+ return hllItem.hll2.Merge(item.hll2)
+ case hllType3:
+ return hllItem.hll3.Merge(item.hll3)
+ default:
+ return errInvalidHLLType
+ }
+}
+
+func (hllItem *hllCacheItem) computeHLLCount() uint64 {
+ if hllItem.hllType == hllPlusDefault {
+ return hllItem.hllp.Count()
+ } else if hllItem.hllType == hllType2 {
+ return hllItem.hll2.EstimateCardinality()
+ } else if hllItem.hllType == hllType3 {
+ return hllItem.hll3.Estimate()
+ }
+ return 0
+}
+
+func (hllItem *hllCacheItem) getcomputeCount(ts int64) (int64, error) {
+ cnt := atomic.LoadUint64(&hllItem.cachedCount)
+ if cnt&0x8000000000000000 == 0 {
+ return int64(cnt), nil
+ }
+ hllItem.Lock()
+ cnt = hllItem.computeHLLCount()
+ if cnt&0x8000000000000000 != 0 {
+ hllItem.Unlock()
+ return 0, errHLLCountOverflow
+ }
+ atomic.StoreInt64(&hllItem.ts, ts)
+ atomic.StoreUint64(&hllItem.cachedCount, cnt)
+ hllItem.Unlock()
+ return int64(cnt), nil
+}
+
+func (hllItem *hllCacheItem) invalidCachedCnt(ts int64) {
+ hllItem.Lock()
+ cnt := atomic.LoadUint64(&hllItem.cachedCount)
+ cnt = cnt | 0x8000000000000000
+ atomic.StoreUint64(&hllItem.cachedCount, cnt)
+ atomic.StoreInt64(&hllItem.ts, ts)
+ hllItem.dirty = true
+ hllItem.Unlock()
+}
+
+func (hllItem *hllCacheItem) HLLAdd(hasher hash.Hash64, elems ...[]byte) bool {
+ changed := false
+
+ for _, elem := range elems {
+ hasher.Write(elem)
+ if hllItem.hllType == hllPlusDefault {
+ if hllItem.hllp.Add(hasher) {
+ changed = true
+ }
+ } else if hllItem.hllType == hllType2 {
+ if hllItem.hll2.Add(hasher.Sum64()) {
+ changed = true
+ }
+ } else if hllItem.hllType == hllType3 {
+ if hllItem.hll3.InsertHash(hasher.Sum64()) {
+ changed = true
+ }
+ }
+ hasher.Reset()
+ }
+
+ return changed
+}
+
+func (hllItem *hllCacheItem) addCount(hasher hash.Hash64, elems ...[]byte) (bool, bool) {
+ changed := false
+ hllItem.Lock()
+ isDirty := hllItem.dirty
+ if hllItem.HLLAdd(hasher, elems...) {
+ changed = true
+ }
+ hllItem.Unlock()
+ return changed, isDirty
+}
+
type hllCache struct {
- lruCache *lru.Cache
+ dirtyWriteCache *lru.Cache
// to avoid pfcount modify the db, we separate the read cache.
// so we can treat the pfcount as read command, since it only get from the write cache or
// read from db and cache to read cache.
+ rl sync.RWMutex
readCache *lru.Cache
db *RockDB
}
-func newHLLCache(size int, db *RockDB) (*hllCache, error) {
+// write cache size should not too much, it may cause flush slow if
+// too much dirty write need flush
+// TODO: delete range need to be handled to clean cache
+func newHLLCache(size int, wsize int, db *RockDB) (*hllCache, error) {
c := &hllCache{
db: db,
}
@@ -58,26 +271,17 @@ func newHLLCache(size int, db *RockDB) (*hllCache, error) {
if err != nil {
return nil, err
}
- c.lruCache, err = lru.NewWithEvict(size, c.onEvicted)
+ c.dirtyWriteCache, err = lru.NewWithEvict(wsize, c.onEvicted)
return c, err
}
// must be called in raft commit loop
func (c *hllCache) Flush() {
start := time.Now()
- keys := c.lruCache.Keys()
- for _, k := range keys {
- v, ok := c.lruCache.Peek(k)
- if ok {
- item, ok := v.(*hllCacheItem)
- if ok && !item.flushed && !item.deleting {
- c.onEvicted(k, v)
- c.readCache.Remove(k.(string))
- }
- }
- }
- if time.Since(start) > time.Second {
- dbLog.Infof("flush hll cache cost: %v", time.Since(start))
+ c.dirtyWriteCache.Purge()
+ cost := time.Since(start)
+ if cost > time.Millisecond*100 {
+ dbLog.Infof("flush hll cache cost: %v", cost)
}
}
@@ -87,52 +291,59 @@ func (c *hllCache) onEvicted(rawKey interface{}, value interface{}) {
if !ok {
return
}
- if item.deleting {
+ item.Lock()
+ if item.deleting || !item.dirty {
+ item.Unlock()
+ return
+ }
+ tsBuf := PutInt64(item.ts)
+ item.dirty = false
+ newV, err := item.HLLToBytes()
+ oldCnt := atomic.LoadUint64(&item.cachedCount)
+ ht := item.hllType
+ item.Unlock()
+ if err != nil {
+ dbLog.Warningf("failed to encode %v hll: %v", rawKey, err.Error())
return
}
- wb := gorocksdb.NewWriteBatch()
- defer wb.Destroy()
cachedKey := []byte(rawKey.(string))
table, key, err := convertRedisKeyToDBKVKey(cachedKey)
- oldV, _ := c.db.eng.GetBytesNoLock(c.db.defaultReadOpts, key)
- hllp := item.hllp
- newV, err := hllp.GobEncode()
if err != nil {
- dbLog.Warningf("failed to encode %v hll: %v", key, err.Error())
+ dbLog.Warningf("key invalid %v : %v", cachedKey, err.Error())
return
}
+
+ s := time.Now()
+ wb := c.db.rockEng.NewWriteBatch()
+ defer wb.Destroy()
+ oldV, _ := c.db.GetBytesNoLock(key)
+
if c.db.cfg.EnableTableCounter && oldV == nil {
c.db.IncrTableKeyCount(table, 1, wb)
}
- // modify pfcount bit
- var oldCnt uint64
- if oldV != nil {
- oldCnt = binary.BigEndian.Uint64(oldV[1 : 1+8])
+ if len(oldV) >= 1+8 {
oldV = oldV[:1+8]
} else {
oldV = make([]byte, 8+1)
}
- oldCnt = oldCnt | 0x8000000000000000
+ oldV[0] = ht
binary.BigEndian.PutUint64(oldV[1:1+8], oldCnt)
- oldV[0] = hllPlusDefault
oldV = append(oldV, newV...)
- tsBuf := PutInt64(item.ts)
oldV = append(oldV, tsBuf...)
wb.Put(key, oldV)
- c.db.eng.Write(c.db.defaultWriteOpts, wb)
- item.flushed = true
+ c.db.rockEng.Write(wb)
+ cost := time.Since(s)
+ slow.LogSlowDBWrite(cost, slow.NewSlowLogInfo(c.db.cfg.DataDir, string(key), "flush pfadd"))
+ c.AddToReadCache(cachedKey, item)
}
func (c *hllCache) Get(key []byte) (*hllCacheItem, bool) {
- v, ok := c.lruCache.Get(string(key))
- rmRead := true
+ v, ok := c.dirtyWriteCache.Get(string(key))
if !ok {
v, ok = c.readCache.Get(string(key))
if !ok {
return nil, false
- } else {
- rmRead = false
}
}
@@ -140,50 +351,42 @@ func (c *hllCache) Get(key []byte) (*hllCacheItem, bool) {
if !ok {
return nil, false
}
- if rmRead {
- c.readCache.Remove(string(key))
- }
return item, true
}
+// make sure dirty write is not added
func (c *hllCache) AddToReadCache(key []byte, item *hllCacheItem) {
c.readCache.Add(string(key), item)
}
-func (c *hllCache) Add(key []byte, item *hllCacheItem) {
- c.lruCache.Add(string(key), item)
+func (c *hllCache) AddDirtyWrite(key []byte, item *hllCacheItem) {
+ c.dirtyWriteCache.Add(string(key), item)
c.readCache.Remove(string(key))
}
func (c *hllCache) Del(key []byte) {
- v, ok := c.lruCache.Peek(string(key))
+ v, ok := c.dirtyWriteCache.Peek(string(key))
if ok {
item, ok := v.(*hllCacheItem)
if ok {
+ item.Lock()
item.deleting = true
+ item.Unlock()
}
- c.lruCache.Remove(string(key))
+ c.dirtyWriteCache.Remove(string(key))
}
c.readCache.Remove(string(key))
}
+func cntFromItem(ts int64, item *hllCacheItem) (int64, error) {
+ return item.getcomputeCount(ts)
+}
+
func (db *RockDB) PFCount(ts int64, keys ...[]byte) (int64, error) {
if len(keys) == 1 {
item, ok := db.hllCache.Get(keys[0])
if ok {
- cnt := atomic.LoadUint64(&item.cachedCount)
- if cnt&0x8000000000000000 == 0 {
- return int64(cnt), nil
- }
- item.Lock()
- cnt = item.hllp.Count()
- item.Unlock()
- if cnt&0x8000000000000000 != 0 {
- return 0, errHLLCountOverflow
- }
- atomic.StoreInt64(&item.ts, ts)
- atomic.StoreUint64(&item.cachedCount, cnt)
- return int64(cnt), nil
+ return cntFromItem(ts, item)
}
}
keyList := make([][]byte, len(keys))
@@ -198,7 +401,15 @@ func (db *RockDB) PFCount(ts int64, keys ...[]byte) (int64, error) {
keyList[i] = kk
}
}
- db.eng.MultiGetBytes(db.defaultReadOpts, keyList, keyList, errs)
+ if len(keys) == 1 {
+ db.hllCache.rl.RLock()
+ defer db.hllCache.rl.RUnlock()
+ item, ok := db.hllCache.Get(keys[0])
+ if ok {
+ return cntFromItem(ts, item)
+ }
+ }
+ db.MultiGetBytes(keyList, keyList, errs)
for i, v := range keyList {
if errs[i] == nil && len(v) >= tsLen {
keyList[i] = keyList[i][:len(v)-tsLen]
@@ -216,72 +427,54 @@ func (db *RockDB) PFCount(ts int64, keys ...[]byte) (int64, error) {
if len(fkv) < 8+1 {
return 0, errInvalidHLLData
}
- pos := 1
- cnt := binary.BigEndian.Uint64(fkv[pos : pos+8])
- if cnt&0x8000000000000000 == 0 {
- return int64(cnt), nil
+ s := time.Now()
+ item, recompute, err := newHLLItemFromDBBytes(hllType, fkv)
+ if err != nil {
+ return 0, err
}
- switch hllType {
- case hllPlusDefault:
- // recompute
- hllp, _ := hll.NewPlus(hllPrecision)
- err := hllp.GobDecode(fkv[pos+8:])
- if err != nil {
- return 0, err
- }
- cnt = hllp.Count()
+ cost := time.Since(s)
+ slow.LogSlowDBWrite(cost, slow.NewSlowLogInfo(db.cfg.DataDir, string(fkv), "init pf item from db"))
+ cnt := item.cachedCount
+ if recompute {
+ cnt = item.computeHLLCount()
if cnt&0x8000000000000000 != 0 {
return 0, errHLLCountOverflow
}
- item := &hllCacheItem{
- hllp: hllp,
- hllType: hllPlusDefault,
- ts: ts,
- cachedCount: cnt,
- flushed: true,
- }
- db.hllCache.AddToReadCache(keys[0], item)
-
- return int64(cnt), err
- default:
- // unknown hll type
- return 0, errInvalidHLLData
+ atomic.StoreUint64(&item.cachedCount, cnt)
}
+ atomic.StoreInt64(&item.ts, ts)
+ db.hllCache.AddToReadCache(keys[0], item)
+ return int64(cnt), nil
} else {
- hllp, _ := hll.NewPlus(hllPrecision)
+ var mergeItem *hllCacheItem
// merge count
for i, v := range keyList {
item, ok := db.hllCache.Get(keys[i])
- var hllpv *hll.HyperLogLogPlus
+ var err error
if !ok {
if len(v) < 8+1 {
return 0, errInvalidHLLData
}
hllType := uint8(v[0])
- if hllType != hllPlusDefault {
- return 0, errInvalidHLLData
- }
- pos := 1
- hllpv, _ = hll.NewPlus(hllPrecision)
- err := hllpv.GobDecode(v[pos+8:])
+ item, _, err = newHLLItemFromDBBytes(hllType, v)
if err != nil {
return 0, err
}
- } else {
- hllpv = item.hllp
- }
- if item != nil {
- item.Lock()
}
- err := hllp.Merge(hllpv)
- if item != nil {
- item.Unlock()
+ if mergeItem == nil {
+ mergeItem, err = newHLLItem(item.hllType)
+ if err != nil {
+ return 0, err
+ }
}
+ item.Lock()
+ err = mergeItem.HLLMerge(item)
+ item.Unlock()
if err != nil {
return 0, err
}
}
- return int64(hllp.Count()), nil
+ return int64(mergeItem.computeHLLCount()), nil
}
}
@@ -297,58 +490,49 @@ func (db *RockDB) PFAdd(ts int64, rawKey []byte, elems ...[]byte) (int64, error)
_, key, err := convertRedisKeyToDBKVKey(rawKey)
item, ok := db.hllCache.Get(rawKey)
-
+ s := time.Now()
changed := false
- var hllp *hll.HyperLogLogPlus
if !ok {
- hllp, _ = hll.NewPlus(hllPrecision)
- oldV, _ := db.eng.GetBytesNoLock(db.defaultReadOpts, key)
+ oldV, _ := db.GetBytesNoLock(key)
if oldV != nil {
if len(oldV) < 8+1+tsLen {
return 0, errInvalidHLLData
}
- if len(oldV) >= tsLen {
- oldV = oldV[:len(oldV)-tsLen]
- }
- if uint8(oldV[0]) != hllPlusDefault {
- return 0, errInvalidHLLData
+ oldV = oldV[:len(oldV)-tsLen]
+ item, _, err = newHLLItemFromDBBytes(uint8(oldV[0]), oldV)
+ if err != nil {
+ return 0, err
}
- err = hllp.GobDecode(oldV[8+1:])
+ } else {
+ item, err = newHLLItem(initHLLType)
if err != nil {
return 0, err
}
- }
- if oldV == nil {
// first init always return changed
changed = true
}
- } else {
- hllp = item.hllp
- }
- if item == nil {
- item = &hllCacheItem{
- hllp: hllp,
- hllType: hllPlusDefault,
- }
+ item.ts = ts
}
- item.Lock()
- for _, elem := range elems {
- db.hasher64.Write(elem)
- if hllp.Add(db.hasher64) {
- changed = true
- }
- db.hasher64.Reset()
+
+ added, isDirty := item.addCount(db.hasher64, elems...)
+ if added {
+ changed = true
}
- item.Unlock()
+ cost := time.Since(s)
if !changed {
+ slow.LogSlowDBWrite(cost, slow.NewSlowLogInfo(db.cfg.DataDir, string(rawKey), "pfadd not changed"))
+ if !isDirty {
+ db.hllCache.AddToReadCache(rawKey, item)
+ }
return 0, nil
}
- cnt := atomic.LoadUint64(&item.cachedCount)
- cnt = cnt | 0x8000000000000000
- atomic.StoreUint64(&item.cachedCount, cnt)
- atomic.StoreInt64(&item.ts, ts)
- item.flushed = false
- db.hllCache.Add(rawKey, item)
+ db.hllCache.rl.Lock()
+ defer db.hllCache.rl.Unlock()
+ item.invalidCachedCnt(ts)
+
+ db.hllCache.AddDirtyWrite(rawKey, item)
+ cost = time.Since(s)
+ slow.LogSlowDBWrite(cost, slow.NewSlowLogInfo(db.cfg.DataDir, string(rawKey), "pfadd changed"))
return 1, nil
}
diff --git a/rockredis/t_hll_test.go b/rockredis/t_hll_test.go
index 0171721d..3f458b6a 100644
--- a/rockredis/t_hll_test.go
+++ b/rockredis/t_hll_test.go
@@ -1,65 +1,203 @@
package rockredis
import (
+ "math/rand"
"os"
"strconv"
"testing"
"time"
- "github.com/spaolacci/murmur3"
+ "github.com/golang/snappy"
+ "github.com/twmb/murmur3"
- hll2 "github.com/absolute8511/hyperloglog"
- //hll "github.com/axiomhq/hyperloglog"
+ hll2 "github.com/absolute8511/go-hll"
+ hll "github.com/absolute8511/hyperloglog"
+ hll3 "github.com/absolute8511/hyperloglog2"
"github.com/stretchr/testify/assert"
)
-// func TestHLLPerf(t *testing.T) {
-// hllp := hll.New14()
-// hasher64 := murmur3.New64()
-// for i := 0; i < 100000; i++ {
-// hasher64.Write([]byte(strconv.Itoa(i)))
-// hllp.InsertHash(hasher64.Sum64())
-// hasher64.Reset()
-// }
-// b, err := hllp.MarshalBinary()
-// t.Log(len(b))
-// //t.Log(hllp.Estimate())
-// //assert.True(t, false, "")
-// assert.Nil(t, err)
-// for i := 0; i < 100000; i++ {
-// hllp := hll.New14()
-// err = hllp.UnmarshalBinary(b)
-// hasher64.Write([]byte(strconv.Itoa(i)))
-// hllp.InsertHash(hasher64.Sum64())
-// hasher64.Reset()
-// //hllp.Estimate()
-// hllp.MarshalBinary()
-// }
-// }
-
+func TestHLLPlusSpace(t *testing.T) {
+ hllp, _ := hll.NewPlus(hllPrecision)
+ hasher64 := murmur3.New64()
+ for i := 0; i < 5000; i++ {
+ hasher64.Write([]byte(strconv.Itoa(rand.Int())))
+ hllp.Add(hasher64)
+ hasher64.Reset()
+ if i%100 == 0 {
+ b, _ := hllp.GobEncode()
+ t.Log(len(b))
+ }
+ }
+}
func TestHLLPlusPerf(t *testing.T) {
- hllp, _ := hll2.NewPlus(14)
+ hllp, _ := hll.NewPlus(hllPrecision)
hasher64 := murmur3.New64()
- for i := 0; i < 100000; i++ {
- hasher64.Write([]byte(strconv.Itoa(i)))
+ for i := 0; i < 500; i++ {
+ hasher64.Write([]byte(strconv.Itoa(rand.Int())))
hllp.Add(hasher64)
hasher64.Reset()
}
b, err := hllp.GobEncode()
- t.Log(len(b))
- //t.Log(hllp.Count())
- //assert.True(t, false, "")
- assert.Nil(t, err)
+ s := time.Now()
for i := 0; i < 100000; i++ {
- hllp, _ := hll2.NewPlus(14)
+ hllp, _ := hll.NewPlus(14)
hllp.GobDecode(b)
- hasher64.Write([]byte(strconv.Itoa(i)))
+ }
+ t.Logf("small hll unmarshal cost: %v", time.Since(s))
+ for i := 0; i < 100000; i++ {
+ hllp.GobEncode()
+ }
+ t.Logf("small hll marshal cost: %v", time.Since(s))
+ for i := 0; i < 10000; i++ {
+ hllp.Count()
+ }
+ t.Logf("small hll count cost: %v", time.Since(s))
+ for i := 0; i < 100000; i++ {
+ hasher64.Write([]byte(strconv.Itoa(rand.Int())))
hllp.Add(hasher64)
hasher64.Reset()
- //hllp.Count()
+ }
+ //t.Log(hllp.Count())
+ t.Log(time.Since(s))
+ for i := 0; i < 10000; i++ {
+ hllp.Count()
+ }
+ b, err = hllp.GobEncode()
+ assert.Nil(t, err)
+ t.Log(time.Since(s))
+ for i := 0; i < 100000; i++ {
+ hllp, _ := hll.NewPlus(14)
+ hllp.GobDecode(b)
+ }
+ t.Logf("large hll unmarshal cost: %v", time.Since(s))
+ for i := 0; i < 100000; i++ {
hllp.GobEncode()
}
+ t.Logf("large hll marshal cost: %v", time.Since(s))
+}
+func TestHLL3Space(t *testing.T) {
+ hllp, _ := hll3.New(hllPrecision, true)
+ hasher64 := murmur3.New64()
+ for i := 0; i < 5000; i++ {
+ hasher64.Write([]byte(strconv.Itoa(rand.Int())))
+ hllp.InsertHash(hasher64.Sum64())
+ hasher64.Reset()
+ if i%100 == 0 {
+ b, _ := hllp.MarshalBinary()
+ t.Log(len(b))
+ }
+ }
+}
+
+func TestHLL3Perf(t *testing.T) {
+ hllp, _ := hll3.New(hllPrecision, true)
+ hasher64 := murmur3.New64()
+ for i := 0; i < 500; i++ {
+ hasher64.Write([]byte(strconv.Itoa(rand.Int())))
+ hllp.InsertHash(hasher64.Sum64())
+ hasher64.Reset()
+ }
+ b, err := hllp.MarshalBinary()
+ s := time.Now()
+ for i := 0; i < 100000; i++ {
+ hllp, _ := hll3.New(hllPrecision, true)
+ hllp.UnmarshalBinary(b)
+ }
+ t.Logf("small hll unmarshal cost: %v", time.Since(s))
+ for i := 0; i < 100000; i++ {
+ hllp.MarshalBinary()
+ }
+ t.Logf("small hll marshal cost: %v", time.Since(s))
+ for i := 0; i < 10000; i++ {
+ hllp.Estimate()
+ }
+ t.Logf("small hll count cost: %v", time.Since(s))
+ for i := 0; i < 100000; i++ {
+ hasher64.Write([]byte(strconv.Itoa(rand.Int())))
+ hllp.InsertHash(hasher64.Sum64())
+ hasher64.Reset()
+ }
+ //t.Log(hllp.Estimate())
+ t.Log(time.Since(s))
+ for i := 0; i < 10000; i++ {
+ hllp.Estimate()
+ }
+ t.Logf("large hll count cost: %v", time.Since(s))
+ b, err = hllp.MarshalBinary()
+ assert.Nil(t, err)
+ for i := 0; i < 100000; i++ {
+ hllp, _ := hll3.New(hllPrecision, true)
+ hllp.UnmarshalBinary(b)
+ }
+ t.Logf("large hll unmarshal cost: %v", time.Since(s))
+ for i := 0; i < 100000; i++ {
+ hllp.MarshalBinary()
+ }
+ t.Logf("large hll marshal cost: %v", time.Since(s))
+}
+
+func TestHLL2Space(t *testing.T) {
+ sz, err := hll2.SizeByP(int(hllPrecision))
+ assert.Nil(t, err)
+ hllp := make(hll2.HLL, sz)
+ hasher64 := murmur3.New64()
+ for i := 0; i < 5000; i++ {
+ hasher64.Write([]byte(strconv.Itoa(rand.Int())))
+ hllp.Add(hasher64.Sum64())
+ hasher64.Reset()
+ if i%100 == 0 {
+ b := snappy.Encode(nil, hllp)
+ t.Log(len(b))
+ }
+ }
+}
+func TestHLL2Perf(t *testing.T) {
+ sz, err := hll2.SizeByP(int(hllPrecision))
+ assert.Nil(t, err)
+ hllp := make(hll2.HLL, sz)
+
+ hasher64 := murmur3.New64()
+ for i := 0; i < 500; i++ {
+ hasher64.Write([]byte(strconv.Itoa(rand.Int())))
+ hllp.Add(hasher64.Sum64())
+ hasher64.Reset()
+ }
+
+ b := snappy.Encode(nil, []byte(hllp))
+ s := time.Now()
+ for i := 0; i < 100000; i++ {
+ snappy.Decode(nil, b)
+ }
+ t.Logf("small hll unmarshal cost: %v", time.Since(s))
+
+ for i := 0; i < 100000; i++ {
+ snappy.Encode(nil, hllp)
+ }
+ t.Logf("small hll marshal cost: %v", time.Since(s))
+
+ for i := 0; i < 100000; i++ {
+ hasher64.Write([]byte(strconv.Itoa(rand.Int())))
+ hllp.Add(hasher64.Sum64())
+ hasher64.Reset()
+ }
+ t.Log(time.Since(s))
+ //t.Log(hllp.EstimateCardinality())
+ for i := 0; i < 1000; i++ {
+ hllp.EstimateCardinality()
+ }
+ t.Log(hllp.IsSparse())
+ b = snappy.Encode(nil, []byte(hllp))
+ t.Log(time.Since(s))
+ for i := 0; i < 100000; i++ {
+ snappy.Decode(nil, b)
+ }
+ t.Logf("large hll unmarshal cost: %v", time.Since(s))
+ for i := 0; i < 100000; i++ {
+ snappy.Encode(nil, hllp)
+ }
+ t.Logf("large hll marshal cost: %v", time.Since(s))
}
+
func TestDBHLLOp(t *testing.T) {
db := getTestDB(t)
defer os.RemoveAll(db.cfg.DataDir)
@@ -87,6 +225,12 @@ func TestDBHLLOp(t *testing.T) {
assert.True(t, v1 > 0, "should have pf count")
v11, _ := db.PFCount(0, key1)
assert.Equal(t, v1, v11)
+ // add same value
+ ret, err = db.PFAdd(0, key1, []byte("hello world 1"))
+ assert.Nil(t, err)
+ v11, _ = db.PFCount(0, key1)
+ assert.Equal(t, v1, v11)
+
v2, _ := db.PFCount(0, key2)
t.Log(v2)
assert.True(t, v2 > 0, "should have pf count")
@@ -113,12 +257,17 @@ func TestDBHLLOp(t *testing.T) {
assert.Nil(t, err)
t.Log(v3)
assert.True(t, v3 <= v11+v22, "merged count should not great than add")
+ if db.cfg.EngineType == "mem" {
+ ck, _ := db.rockEng.NewCheckpoint(false)
+ err = ck.Save(db.GetDataDir(), nil)
+ assert.Nil(t, err)
+ }
+
db.Close()
db = getTestDBWithDir(t, db.cfg.DataDir)
+ defer db.Close()
v1Reopen, err := db.PFCount(0, key1)
assert.Nil(t, err)
- rawV, _ := db.KVGet(key1)
- t.Logf("pf key : %v\n", rawV)
v2Reopen, err := db.PFCount(0, key2)
assert.Nil(t, err)
t.Log(v1Reopen)
@@ -134,7 +283,9 @@ func TestDBHLLOp(t *testing.T) {
var lastC1 int64
var lastC2 int64
var cnt int64
+ var largeDiff int64
loop := true
+ var total int64
for loop {
c1, err := db.PFCount(0, key1)
assert.Nil(t, err)
@@ -143,11 +294,18 @@ func TestDBHLLOp(t *testing.T) {
if c1 < lastC1 {
t.Logf("pfcount not increased: %v, %v", c1, lastC1)
cnt++
+ if c1 < lastC1-lastC1/100 {
+ largeDiff++
+ }
}
if c2 < lastC2 {
t.Logf("pfcount not increased: %v, %v", c2, lastC2)
cnt++
+ if c2 < lastC2-lastC2/100 {
+ largeDiff++
+ }
}
+ total += 2
lastC1 = c1
lastC2 = c2
select {
@@ -158,7 +316,9 @@ func TestDBHLLOp(t *testing.T) {
time.Sleep(time.Microsecond)
}
}
- assert.True(t, cnt < 10, "not increased count: %v", cnt)
+ t.Logf("pfcount not increased: %v, %v, %v", largeDiff, cnt, total)
+ assert.True(t, largeDiff < 10, "not increased count has large diff: %v, %v, %v", cnt, largeDiff, total)
+ assert.True(t, cnt < total/10, "not increased count: %v, %v, %v", cnt, largeDiff, total)
}()
totalCnt := MAX_BATCH_NUM * 10
elems := make([][]byte, totalCnt)
@@ -184,31 +344,29 @@ func TestDBHLLOp(t *testing.T) {
v2, _ = db.PFCount(0, key2)
t.Log(v1)
t.Log(v2)
- newkey1, _ := db.KVGet(key1)
- t.Log(newkey1)
//newkey2, _ := db.KVGet(key2)
//assert.NotEqual(t, oldkey1, newkey1)
//assert.NotEqual(t, oldkey2, newkey2)
assert.NotEqual(t, v1, v11)
assert.NotEqual(t, v2, v22)
- assert.True(t, int64(totalCnt-totalCnt/100) < v1, "error should be less than 1%")
- assert.True(t, int64(totalCnt+totalCnt/100) > v1, "error should be less than 1%")
- assert.True(t, int64(totalCnt-totalCnt/100) < v2, "error should be less than 1%")
- assert.True(t, int64(totalCnt+totalCnt/100) > v2, "error should be less than 1%")
+ assert.True(t, int64(totalCnt-totalCnt/40) < v1, "error should be less than 2%")
+ assert.True(t, int64(totalCnt+totalCnt/40) > v1, "error should be less than 2%")
+ assert.True(t, int64(totalCnt-totalCnt/40) < v2, "error should be less than 2%")
+ assert.True(t, int64(totalCnt+totalCnt/40) > v2, "error should be less than 2%")
v33, err := db.PFCount(0, key1, key2)
assert.Nil(t, err)
t.Log(v33)
assert.NotEqual(t, v3, v33)
- assert.True(t, v33 <= v1+v2+int64(totalCnt/100), "merged count should not diff too much")
- assert.True(t, v33 >= v1+v2-int64(totalCnt/100), "merged count should not diff too much")
+ assert.True(t, v33 <= v1+v2+int64(totalCnt/20), "merged count should not diff too much")
+ assert.True(t, v33 >= v1+v2-int64(totalCnt/20), "merged count should not diff too much")
db.hllCache.Flush()
close(stopC)
// refill cache with key1, key2 to remove read cache
db.PFAdd(0, key1, []byte(strconv.Itoa(0)))
- db.PFAdd(0, key2, []byte(strconv.Itoa(0+totalCnt)))
+ db.PFAdd(0, key2, []byte(strconv.Itoa(0)))
// test cache evict to remove write cache
- for i := 0; i < HLLCacheSize*2; i++ {
- db.PFAdd(0, []byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))
+ for i := 0; i < HLLReadCacheSize*2; i++ {
+ db.PFAdd(0, []byte("test:"+strconv.Itoa(i)), []byte(strconv.Itoa(i)))
}
// refill cache with key1, key2
for i := 0; i < totalCnt; i++ {
@@ -216,8 +374,8 @@ func TestDBHLLOp(t *testing.T) {
db.PFAdd(0, key2, []byte(strconv.Itoa(i+totalCnt)))
}
// cache evict, remove read cache
- for i := 0; i < HLLCacheSize*2; i++ {
- db.PFAdd(0, []byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))
+ for i := 0; i < HLLReadCacheSize*2; i++ {
+ db.PFAdd(0, []byte("test:"+strconv.Itoa(i)), []byte(strconv.Itoa(i)))
}
v3, err = db.PFCount(0, key1, key2)
assert.Nil(t, err)
@@ -228,3 +386,309 @@ func TestDBHLLOp(t *testing.T) {
assert.Equal(t, v2, v22)
//assert.True(t, false, "failed")
}
+
+func TestDBHLLDifferentType(t *testing.T) {
+ db := getTestDB(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ // add type1, read type1
+ // change default type to type2 read type1
+ // add type2, read type2
+ // change default type to type1, read type1, read type2
+ key1 := []byte("test:testdb_hll_a_t1")
+ v1, err := db.PFCount(0, key1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), v1)
+
+ var ret int64
+ ret, err = db.PFAdd(0, key1, []byte("hello world 1"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), ret)
+ v1, err = db.PFCount(0, key1)
+ assert.Nil(t, err)
+ t.Log(v1)
+ assert.True(t, v1 > 0, "should have pf count")
+
+ initHLLType = hllType2
+
+ key2 := []byte("test:testdb_hll_b_t2")
+ ret, err = db.PFAdd(0, key2, []byte("hello world 2"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), ret)
+ v2, _ := db.PFCount(0, key2)
+ t.Log(v2)
+ assert.True(t, v2 > 0, "should have pf count")
+
+ v11, _ := db.PFCount(0, key1)
+ assert.Equal(t, v1, v11)
+ v22, _ := db.PFCount(0, key2)
+ assert.Equal(t, v2, v22)
+
+ for i := 0; i < 200; i++ {
+ db.PFAdd(0, key1, []byte(strconv.Itoa(i)))
+ db.PFAdd(0, key2, []byte(strconv.Itoa(i)))
+ }
+ v11, _ = db.PFCount(0, key1)
+ v22, _ = db.PFCount(0, key2)
+ t.Log(v11)
+ t.Log(v22)
+ assert.NotEqual(t, v1, v11)
+ assert.NotEqual(t, v2, v22)
+
+ db.hllCache.Flush()
+ num, err := db.GetTableKeyCount([]byte("test"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), num)
+
+ initHLLType = hllPlusDefault
+
+ key3 := []byte("test:testdb_hll_b_t3")
+ ret, err = db.PFAdd(0, key3, []byte("hello world 2"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), ret)
+ v3, _ := db.PFCount(0, key2)
+ t.Log(v3)
+ assert.True(t, v3 > 0, "should have pf count")
+
+ if db.cfg.EngineType == "mem" {
+ ck, _ := db.rockEng.NewCheckpoint(false)
+ err = ck.Save(db.GetDataDir(), nil)
+ assert.Nil(t, err)
+ }
+
+ db.Close()
+ db = getTestDBWithDir(t, db.cfg.DataDir)
+ defer db.Close()
+ v1Reopen, err := db.PFCount(0, key1)
+ assert.Nil(t, err)
+ v2Reopen, err := db.PFCount(0, key2)
+ assert.Nil(t, err)
+ t.Log(v1Reopen)
+ t.Log(v2Reopen)
+ assert.Equal(t, v11, v1Reopen)
+ assert.Equal(t, v22, v2Reopen)
+ _, err = db.PFCount(0, key1, key2, key3)
+ assert.NotNil(t, err)
+
+ totalCnt := MAX_BATCH_NUM * 10
+ elems := make([][]byte, totalCnt)
+ for i := 0; i < totalCnt; i++ {
+ elems[i] = []byte(strconv.Itoa(i))
+ }
+ ret, err = db.PFAdd(0, key1, elems...)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), ret)
+ totalCnt = totalCnt * 2
+ for i := 0; i < totalCnt; i++ {
+ db.PFAdd(0, key1, []byte(strconv.Itoa(i)))
+ db.PFAdd(0, key2, []byte(strconv.Itoa(i+totalCnt)))
+ }
+ ret, err = db.PFAdd(0, key1, []byte("1"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), ret)
+ // lazy compute should modify value
+ v1, _ = db.PFCount(0, key1)
+ v2, _ = db.PFCount(0, key2)
+ t.Log(v1)
+ t.Log(v2)
+
+ db.hllCache.Flush()
+ // refill cache with key1, key2 to remove read cache
+ db.PFAdd(0, key1, []byte(strconv.Itoa(0)))
+ db.PFAdd(0, key2, []byte(strconv.Itoa(0)))
+ // test cache evict to remove write cache
+ for i := 0; i < HLLReadCacheSize*2; i++ {
+ db.PFAdd(0, []byte("test:"+strconv.Itoa(i)), []byte(strconv.Itoa(i)))
+ }
+ // refill cache with key1, key2
+ for i := 0; i < totalCnt; i++ {
+ db.PFAdd(0, key1, []byte(strconv.Itoa(i)))
+ db.PFAdd(0, key2, []byte(strconv.Itoa(i+totalCnt)))
+ }
+ // cache evict, remove read cache
+ for i := 0; i < HLLReadCacheSize*2; i++ {
+ db.PFAdd(0, []byte("test:"+strconv.Itoa(i)), []byte(strconv.Itoa(i)))
+ }
+ v11, _ = db.PFCount(0, key1)
+ v22, _ = db.PFCount(0, key2)
+ assert.Equal(t, v1, v11)
+ assert.Equal(t, v2, v22)
+ _, err = db.PFCount(0, key1, key2, key3)
+ assert.NotNil(t, err)
+}
+
+func TestDBHLLOpPerf1(t *testing.T) {
+ initHLLType = hllPlusDefault
+ db := getTestDB(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ key1 := []byte("test:testdb_hll_perf1_a")
+ v1, err := db.PFCount(0, key1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), v1)
+
+ var ret int64
+ ret, err = db.PFAdd(0, key1, []byte("hello world 1"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), ret)
+
+ key2 := []byte("test:testdb_hll_perf1_b")
+
+ stopC := make(chan bool, 0)
+ for i := 0; i < 5; i++ {
+ go func() {
+ var lastC1 int64
+ var lastC2 int64
+ var cnt int64
+ loop := true
+ for loop {
+ c1, err := db.PFCount(0, key1)
+ assert.Nil(t, err)
+ c2, err := db.PFCount(0, key2)
+ assert.Nil(t, err)
+ if c1 < lastC1 {
+ cnt++
+ }
+ if c2 < lastC2 {
+ cnt++
+ }
+ lastC1 = c1
+ lastC2 = c2
+ select {
+ case <-stopC:
+ loop = false
+ break
+ default:
+ time.Sleep(time.Microsecond)
+ }
+ }
+ assert.True(t, cnt < 10, "not increased count: %v", cnt)
+ }()
+ }
+
+ for i := 0; i < 100; i++ {
+ db.PFAdd(0, key1, []byte(strconv.Itoa(i)))
+ db.PFAdd(0, key2, []byte(strconv.Itoa(i)))
+ }
+ db.hllCache.Flush()
+ kv1, _ := db.KVGet(key1)
+ kv2, _ := db.KVGet(key2)
+ t.Log(len(kv1))
+ t.Log(len(kv2))
+ t.Log(db.PFCount(0, key1))
+ t.Log(db.PFCount(0, key2))
+
+ for i := 0; i < 10; i++ {
+ // test cache evict to remove write cache
+ for j := 0; j < HLLReadCacheSize*2; j++ {
+ db.PFAdd(0, []byte("test:"+strconv.Itoa(j)), []byte(strconv.Itoa(i)))
+ }
+ // refill cache with key1, key2
+ for j := 0; j < MAX_BATCH_NUM*2; j++ {
+ db.PFAdd(0, key1, []byte(strconv.Itoa(j)))
+ db.PFAdd(0, key2, []byte(strconv.Itoa(j+MAX_BATCH_NUM*10)))
+ }
+ // cache evict, remove read cache
+ for j := 0; j < HLLReadCacheSize*2; j++ {
+ db.PFAdd(0, []byte("test:"+strconv.Itoa(j)), []byte(strconv.Itoa(i+1000)))
+ }
+ time.Sleep(time.Microsecond)
+ }
+}
+
+func TestDBHLLOpPerf3(t *testing.T) {
+ initHLLType = hllType3
+ db := getTestDB(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ key1 := []byte("test:testdb_hll_perf3_a")
+ v1, err := db.PFCount(0, key1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), v1)
+
+ var ret int64
+ ret, err = db.PFAdd(0, key1, []byte("hello world 1"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), ret)
+
+ key2 := []byte("test:testdb_hll_perf3_b")
+
+ stopC := make(chan bool, 0)
+ for i := 0; i < 5; i++ {
+ go func() {
+ var lastC1 int64
+ var lastC2 int64
+ var cnt int64
+ loop := true
+ for loop {
+ c1, err := db.PFCount(0, key1)
+ assert.Nil(t, err)
+ c2, err := db.PFCount(0, key2)
+ assert.Nil(t, err)
+ if c1 < lastC1 {
+ cnt++
+ }
+ if c2 < lastC2 {
+ cnt++
+ }
+ lastC1 = c1
+ lastC2 = c2
+ select {
+ case <-stopC:
+ loop = false
+ break
+ default:
+ time.Sleep(time.Microsecond)
+ }
+ }
+ assert.True(t, cnt < 10, "not increased count: %v", cnt)
+ }()
+ }
+
+ for i := 0; i < 100; i++ {
+ db.PFAdd(0, key1, []byte(strconv.Itoa(i)))
+ db.PFAdd(0, key2, []byte(strconv.Itoa(i)))
+ }
+ db.hllCache.Flush()
+ kv1, _ := db.KVGet(key1)
+ kv2, _ := db.KVGet(key2)
+ t.Log(len(kv1))
+ t.Log(len(kv2))
+ t.Log(db.PFCount(0, key1))
+ t.Log(db.PFCount(0, key2))
+
+ for i := 0; i < 10; i++ {
+ // test cache evict to remove write cache
+ for j := 0; j < HLLReadCacheSize*2; j++ {
+ db.PFAdd(0, []byte("test:"+strconv.Itoa(j)), []byte(strconv.Itoa(i)))
+ }
+ // refill cache with key1, key2
+ for j := 0; j < MAX_BATCH_NUM*2; j++ {
+ db.PFAdd(0, key1, []byte(strconv.Itoa(j)))
+ db.PFAdd(0, key2, []byte(strconv.Itoa(j+MAX_BATCH_NUM*10)))
+ }
+ // cache evict, remove read cache
+ for j := 0; j < HLLReadCacheSize*2; j++ {
+ db.PFAdd(0, []byte("test:"+strconv.Itoa(j)), []byte(strconv.Itoa(i+1000)))
+ }
+ time.Sleep(time.Microsecond)
+ }
+}
+
+func BenchmarkHLLPFADD(b *testing.B) {
+ db := getTestDBForBench()
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ key1 := []byte("test:hll_pfadd_bench")
+
+ b.ResetTimer()
+ for i := 0; i <= b.N; i++ {
+ db.PFAdd(0, key1, []byte("hello world 1"+strconv.Itoa(i)))
+ db.hllCache.Flush()
+ }
+ b.StopTimer()
+}
diff --git a/rockredis/t_json.go b/rockredis/t_json.go
index 525d4c09..d417e1b6 100644
--- a/rockredis/t_json.go
+++ b/rockredis/t_json.go
@@ -5,9 +5,9 @@ import (
"errors"
"strings"
- "github.com/absolute8511/ZanRedisDB/common"
"github.com/tidwall/gjson"
"github.com/tidwall/sjson"
+ "github.com/youzan/ZanRedisDB/common"
)
var (
@@ -72,7 +72,10 @@ func encodeJSONStopKey(table []byte, key []byte) []byte {
func (db *RockDB) jSetPath(jdata []byte, path string, value []byte) ([]byte, error) {
if len(path) == 0 {
- return value, nil
+ // for set path it will change the value, so we need return copy
+ v := make([]byte, len(value))
+ copy(v, value)
+ return v, nil
}
return sjson.SetRawBytes(jdata, path, value)
}
@@ -85,7 +88,7 @@ func (db *RockDB) getOldJSON(table []byte, rk []byte) ([]byte, []byte, bool, err
if err != nil {
return nil, nil, false, err
}
- oldV, err := db.eng.GetBytesNoLock(db.defaultReadOpts, ek)
+ oldV, err := db.GetBytesNoLock(ek)
if err != nil {
return ek, nil, false, err
}
@@ -122,7 +125,6 @@ func (db *RockDB) JSet(ts int64, key []byte, path []byte, value []byte) (int64,
return 0, err
}
- db.wb.Clear()
oldV, err = db.jSetPath(oldV, convertJSONPath(path), value)
if err != nil {
return 0, err
@@ -143,7 +145,7 @@ func (db *RockDB) JSet(ts int64, key []byte, path []byte, value []byte) (int64,
tsBuf := PutInt64(ts)
oldV = append(oldV, tsBuf...)
db.wb.Put(ek, oldV)
- err = db.eng.Write(db.defaultWriteOpts, db.wb)
+ err = db.CommitBatchWrite()
if isExist {
return 0, err
}
@@ -151,7 +153,7 @@ func (db *RockDB) JSet(ts int64, key []byte, path []byte, value []byte) (int64,
}
func (db *RockDB) JMset(ts int64, key []byte, args ...common.KVRecord) error {
- if len(args) >= MAX_BATCH_NUM {
+ if len(args) > MAX_BATCH_NUM {
return errTooMuchBatchSize
}
if len(args) == 0 {
@@ -172,8 +174,6 @@ func (db *RockDB) JMset(ts int64, key []byte, args ...common.KVRecord) error {
return err
}
- db.wb.Clear()
-
for i := 0; i < len(args); i++ {
path := args[i].Key
oldV, err = db.jSetPath(oldV, convertJSONPath(path), args[i].Value)
@@ -199,7 +199,7 @@ func (db *RockDB) JMset(ts int64, key []byte, args ...common.KVRecord) error {
if !isExist {
db.IncrTableKeyCount(table, 1, db.wb)
}
- err = db.eng.Write(db.defaultWriteOpts, db.wb)
+ err = db.CommitBatchWrite()
return err
}
@@ -277,7 +277,6 @@ func (db *RockDB) JDel(ts int64, key []byte, path []byte) (int64, error) {
}
jpath := convertJSONPath(path)
- db.wb.Clear()
if jpath == "" {
// delete whole json
db.wb.Delete(ek)
@@ -295,7 +294,7 @@ func (db *RockDB) JDel(ts int64, key []byte, path []byte) (int64, error) {
oldV = append(oldV, tsBuf...)
db.wb.Put(ek, oldV)
}
- err = db.eng.Write(db.defaultWriteOpts, db.wb)
+ err = db.CommitBatchWrite()
return 1, err
}
@@ -305,7 +304,7 @@ func (db *RockDB) JKeyExists(key []byte) (int64, error) {
return 0, err
}
sk, _ := encodeJSONKey(table, rk)
- v, err := db.eng.GetBytes(db.defaultReadOpts, sk)
+ v, err := db.GetBytes(sk)
if v != nil && err == nil {
return 1, nil
}
@@ -348,7 +347,6 @@ func (db *RockDB) JArrayAppend(ts int64, key []byte, path []byte, jsons ...[]byt
}
arrySize++
}
- db.wb.Clear()
if err := checkJSONValueSize(oldV); err != nil {
return 0, err
}
@@ -361,7 +359,7 @@ func (db *RockDB) JArrayAppend(ts int64, key []byte, path []byte, jsons ...[]byt
if !isExist {
db.IncrTableKeyCount(table, 1, db.wb)
}
- err = db.eng.Write(db.defaultWriteOpts, db.wb)
+ err = db.CommitBatchWrite()
return int64(arrySize), err
}
@@ -410,11 +408,10 @@ func (db *RockDB) JArrayPop(ts int64, key []byte, path []byte) (string, error) {
if err != nil {
return "", err
}
- db.wb.Clear()
tsBuf := PutInt64(ts)
oldV = append(oldV, tsBuf...)
db.wb.Put(ek, oldV)
- err = db.eng.Write(db.defaultWriteOpts, db.wb)
+ err = db.CommitBatchWrite()
return poped, err
}
diff --git a/rockredis/t_kv.go b/rockredis/t_kv.go
index 194037d3..8ea9381c 100644
--- a/rockredis/t_kv.go
+++ b/rockredis/t_kv.go
@@ -1,19 +1,33 @@
package rockredis
import (
+ "bytes"
+ "encoding/binary"
"errors"
+ "fmt"
+ "math/bits"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/gorocksdb"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/engine"
)
+// kv new format as below:
+// KVType | key -> |value header| value | modify time|
const (
tsLen = 8
+ // 2MB
+ MaxBitOffset = 2 * 8 * 1024 * 1024
+)
+
+const (
+ defaultSep byte = ':'
)
var errKVKey = errors.New("invalid encode kv key")
var errInvalidDBValue = errors.New("invalide db value")
+var ErrBitOverflow = errors.New("bit offset overflowed")
+var errInvalidTTL = errors.New("invalid expire time")
func convertRedisKeyToDBKVKey(key []byte) ([]byte, []byte, error) {
table, _, _ := extractTableFromRedisKey(key)
@@ -28,10 +42,7 @@ func convertRedisKeyToDBKVKey(key []byte) ([]byte, []byte, error) {
}
func checkKeySize(key []byte) error {
- if len(key) > MaxKeySize || len(key) == 0 {
- return errKeySize
- }
- return nil
+ return common.CheckKey(key)
}
func checkValueSize(value []byte) error {
@@ -62,81 +73,247 @@ func decodeKVKey(ek []byte) ([]byte, error) {
return ek[pos:], nil
}
+type verKeyInfo struct {
+ OldHeader *headerMetaValue
+ Expired bool
+ Table []byte
+ VerKey []byte
+}
+
+// decoded meta for compatible with old format
+func (info verKeyInfo) MetaData() []byte {
+ return info.OldHeader.UserData
+}
+
+// use for kv write operation to do some init on header
+func (db *RockDB) prepareKVValueForWrite(ts int64, rawKey []byte, reset bool) (verKeyInfo, []byte, error) {
+ var keyInfo verKeyInfo
+ var err error
+ var v []byte
+ keyInfo.Table, keyInfo.VerKey, v, keyInfo.Expired, err = db.getRawDBKVValue(ts, rawKey, false)
+ if err != nil {
+ return keyInfo, nil, err
+ }
+ // expired is true only if the value is exist and expired time is reached.
+ // we need decode old data on expired to left the caller to check the expired state
+ var realV []byte
+ realV, keyInfo.OldHeader, err = db.decodeDBRawValueToRealValue(v)
+ if err != nil {
+ return keyInfo, nil, err
+ }
+ if keyInfo.Expired || reset {
+ // since renew may not remove the old expire meta under consistence policy,
+ // we need delete expire meta for kv type to avoid expire the new rewrite data
+ db.expiration.renewOnExpired(ts, KVType, rawKey, keyInfo.OldHeader)
+ }
+ if realV == nil {
+ return keyInfo, nil, nil
+ }
+ return keyInfo, realV, nil
+}
+
+// this will reset the expire meta on old and rewrite the value with new ttl and new header
+func (db *RockDB) resetWithNewKVValue(ts int64, rawKey []byte, value []byte, ttl int64, wb engine.WriteBatch) ([]byte, error) {
+ oldHeader, err := db.expiration.decodeRawValue(KVType, nil)
+ if err != nil {
+ return nil, err
+ }
+ oldHeader.UserData = value
+ value = db.expiration.encodeToRawValue(KVType, oldHeader)
+ if ttl <= 0 {
+ value, err = db.expiration.delExpire(KVType, rawKey, value, true, wb)
+ } else {
+ value, err = db.expiration.rawExpireAt(KVType, rawKey, value, ttl+ts/int64(time.Second), wb)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ var nvalue []byte
+ if len(value)+8 > len(db.writeTmpBuf) {
+ nvalue = make([]byte, len(value)+8)
+ } else {
+ nvalue = db.writeTmpBuf[:len(value)+8]
+ }
+ copy(nvalue, value)
+ PutInt64ToBuf(ts, nvalue[len(value):])
+ return nvalue, nil
+}
+
+// encode the user kv data (no header and no modify time) to the db value (include header and modify time)
+func (db *RockDB) encodeRealValueToDBRawValue(ts int64, oldh *headerMetaValue, value []byte) []byte {
+ oldh.UserData = value
+ buf := db.expiration.encodeToRawValue(KVType, oldh)
+ tsBuf := PutInt64(ts)
+ buf = append(buf, tsBuf...)
+ return buf
+}
+
+// decode the db value (include header and modify time) to the real user kv data + header
+func (db *RockDB) decodeDBRawValueToRealValue(value []byte) ([]byte, *headerMetaValue, error) {
+ if len(value) >= tsLen {
+ value = value[:len(value)-tsLen]
+ }
+ oldh, err := db.expiration.decodeRawValue(KVType, value)
+ if err != nil {
+ return nil, oldh, err
+ }
+ return oldh.UserData, oldh, nil
+}
+
+// read from db and header and modify time will be removed for returned real value
+func (db *RockDB) getDBKVRealValueAndHeader(ts int64, rawKey []byte, useLock bool) (verKeyInfo, []byte, error) {
+ var keyInfo verKeyInfo
+ var err error
+ var v []byte
+ keyInfo.Table, keyInfo.VerKey, v, keyInfo.Expired, err = db.getRawDBKVValue(ts, rawKey, useLock)
+ if err != nil {
+ return keyInfo, nil, err
+ }
+ // expired is true only if the value is exist and expired time is reached.
+ // we need decode old data on expired to left the caller to check the expired state
+ var realV []byte
+ realV, keyInfo.OldHeader, err = db.decodeDBRawValueToRealValue(v)
+ if err != nil {
+ return keyInfo, nil, err
+ }
+ if realV == nil {
+ return keyInfo, nil, nil
+ }
+ return keyInfo, realV, nil
+}
+
+func (db *RockDB) getAndCheckExpRealValue(ts int64, rawKey []byte, rawValue []byte, useLock bool) (bool, []byte, error) {
+ if rawValue == nil {
+ return false, nil, nil
+ }
+ expired, err := db.expiration.isExpired(ts, KVType, rawKey, rawValue, useLock)
+ if err != nil {
+ return false, nil, err
+ }
+ realV, _, err := db.decodeDBRawValueToRealValue(rawValue)
+ if err != nil {
+ return expired, nil, err
+ }
+ return expired, realV, nil
+}
+
+// should use only in read operation
+func (db *RockDB) isKVExistOrExpired(ts int64, rawKey []byte) (int64, error) {
+ _, kk, err := convertRedisKeyToDBKVKey(rawKey)
+ if err != nil {
+ return 0, err
+ }
+ vref, err := db.rockEng.GetRef(kk)
+ if err != nil {
+ return 0, err
+ }
+ if vref == nil {
+ return 0, nil
+ }
+ defer vref.Free()
+ v := vref.Data()
+ if v == nil {
+ return 0, nil
+ }
+ expired, err := db.expiration.isExpired(ts, KVType, rawKey, v, true)
+ if expired {
+ return 0, err
+ }
+ return 1, err
+}
+
+// Get the kv value including the header meta and modify time
+func (db *RockDB) getRawDBKVValue(ts int64, rawKey []byte, useLock bool) ([]byte, []byte, []byte, bool, error) {
+ table, key, err := convertRedisKeyToDBKVKey(rawKey)
+ if err != nil {
+ return table, key, nil, false, err
+ }
+
+ var v []byte
+ if useLock {
+ v, err = db.GetBytes(key)
+ } else {
+ v, err = db.GetBytesNoLock(key)
+ }
+ if err != nil {
+ return table, key, nil, false, err
+ }
+ if v == nil {
+ return table, key, nil, false, nil
+ }
+ expired, err := db.expiration.isExpired(ts, KVType, rawKey, v, useLock)
+ if err != nil {
+ return table, key, v, expired, err
+ }
+ return table, key, v, expired, nil
+}
+
func (db *RockDB) incr(ts int64, key []byte, delta int64) (int64, error) {
- table, key, err := convertRedisKeyToDBKVKey(key)
+ keyInfo, realV, err := db.prepareKVValueForWrite(ts, key, false)
if err != nil {
return 0, err
}
- v, err := db.eng.GetBytesNoLock(db.defaultReadOpts, key)
created := false
n := int64(0)
- if v == nil {
- created = true
+ if realV == nil || keyInfo.Expired {
+ // expired will rewrite old, which should not change table counter
+ created = (realV == nil)
} else {
- if len(v) < tsLen {
- return 0, errIntNumber
- }
- n, err = StrInt64(v[:len(v)-tsLen], err)
+ n, err = StrInt64(realV, err)
if err != nil {
return 0, err
}
}
- db.wb.Clear()
n += delta
buf := FormatInt64ToSlice(n)
- tsBuf := PutInt64(ts)
- buf = append(buf, tsBuf...)
- db.wb.Put(key, buf)
+ buf = db.encodeRealValueToDBRawValue(ts, keyInfo.OldHeader, buf)
+ db.wb.Put(keyInfo.VerKey, buf)
if created {
- db.IncrTableKeyCount(table, 1, db.wb)
+ db.IncrTableKeyCount(keyInfo.Table, 1, db.wb)
}
- err = db.eng.Write(db.defaultWriteOpts, db.wb)
+ err = db.CommitBatchWrite()
return n, err
}
// ps : here just focus on deleting the key-value data,
// any other likes expire is ignore.
-func (db *RockDB) KVDel(key []byte) (int64, error) {
+func (db *RockDB) kvDel(key []byte, wb engine.WriteBatch) (int64, error) {
rawKey := key
table, key, err := convertRedisKeyToDBKVKey(key)
if err != nil {
return 0, err
}
- db.MaybeClearBatch()
delCnt := int64(1)
if db.cfg.EnableTableCounter {
if !db.cfg.EstimateTableCounter {
- v, _ := db.eng.GetBytesNoLock(db.defaultReadOpts, key)
- if v != nil {
- db.IncrTableKeyCount(table, -1, db.wb)
+ vok, _ := db.ExistNoLock(key)
+ if vok {
+ db.IncrTableKeyCount(table, -1, wb)
} else {
delCnt = int64(0)
}
} else {
- db.IncrTableKeyCount(table, -1, db.wb)
+ db.IncrTableKeyCount(table, -1, wb)
}
}
- db.wb.Delete(key)
- err = db.MaybeCommitBatch()
- if err != nil {
- return 0, err
- }
+ wb.Delete(key)
// fixme: if del is batched, the deleted key may be in write batch while removing cache
// and removed cache may be reload by read before the write batch is committed.
db.delPFCache(rawKey)
return delCnt, nil
}
-func (db *RockDB) KVDelWithBatch(key []byte, wb *gorocksdb.WriteBatch) error {
+func (db *RockDB) KVDelWithBatch(key []byte, wb engine.WriteBatch) error {
table, key, err := convertRedisKeyToDBKVKey(key)
if err != nil {
return err
}
if db.cfg.EnableTableCounter {
if !db.cfg.EstimateTableCounter {
- v, _ := db.eng.GetBytesNoLock(db.defaultReadOpts, key)
- if v != nil {
+ vok, _ := db.ExistNoLock(key)
+ if vok {
db.IncrTableKeyCount(table, -1, wb)
}
} else {
@@ -162,14 +339,13 @@ func (db *RockDB) DelKeys(keys ...[]byte) (int64, error) {
delCnt := int64(0)
for _, k := range keys {
- c, _ := db.KVDel(k)
+ c, _ := db.kvDel(k, db.wb)
delCnt += c
}
//clear all the expire meta data related to the keys
- db.MaybeClearBatch()
for _, k := range keys {
- db.delExpire(KVType, k, db.wb)
+ db.delExpire(KVType, k, nil, false, db.wb)
}
err := db.MaybeCommitBatch()
if err != nil {
@@ -179,7 +355,12 @@ func (db *RockDB) DelKeys(keys ...[]byte) (int64, error) {
}
func (db *RockDB) KVExists(keys ...[]byte) (int64, error) {
+ tn := time.Now().UnixNano()
+ if len(keys) == 1 {
+ return db.isKVExistOrExpired(tn, keys[0])
+ }
keyList := make([][]byte, len(keys))
+ valueList := make([][]byte, len(keys))
errs := make([]error, len(keys))
for i, k := range keys {
_, kk, err := convertRedisKeyToDBKVKey(k)
@@ -191,9 +372,13 @@ func (db *RockDB) KVExists(keys ...[]byte) (int64, error) {
}
}
cnt := int64(0)
- db.eng.MultiGetBytes(db.defaultReadOpts, keyList, keyList, errs)
- for i, v := range keyList {
+ db.MultiGetBytes(keyList, valueList, errs)
+ for i, v := range valueList {
if errs[i] == nil && v != nil {
+ expired, _ := db.expiration.isExpired(tn, KVType, keys[i], v, true)
+ if expired {
+ continue
+ }
cnt++
}
}
@@ -206,24 +391,74 @@ func (db *RockDB) KVGetVer(key []byte) (int64, error) {
return 0, err
}
var ts uint64
- v, err := db.eng.GetBytes(db.defaultReadOpts, key)
+ v, err := db.GetBytes(key)
if len(v) >= tsLen {
ts, err = Uint64(v[len(v)-tsLen:], err)
}
return int64(ts), err
}
+func (db *RockDB) GetValueWithOpNoLock(rawKey []byte,
+ op func([]byte) error) error {
+ _, key, err := convertRedisKeyToDBKVKey(rawKey)
+ if err != nil {
+ return err
+ }
+ return db.rockEng.GetValueWithOpNoLock(key, func(v []byte) error {
+ ts := time.Now().UnixNano()
+ expired, realV, err := db.getAndCheckExpRealValue(ts, rawKey, v, false)
+ if err != nil {
+ return err
+ }
+ if expired {
+ realV = nil
+ }
+ return op(realV)
+ })
+}
+
+func (db *RockDB) GetValueWithOp(rawKey []byte,
+ op func([]byte) error) error {
+ _, key, err := convertRedisKeyToDBKVKey(rawKey)
+ if err != nil {
+ return err
+ }
+ return db.rockEng.GetValueWithOp(key, func(v []byte) error {
+ ts := time.Now().UnixNano()
+ expired, realV, err := db.getAndCheckExpRealValue(ts, rawKey, v, false)
+ if err != nil {
+ return err
+ }
+ if expired {
+ realV = nil
+ }
+ return op(realV)
+ })
+}
+
func (db *RockDB) KVGet(key []byte) ([]byte, error) {
- _, key, err := convertRedisKeyToDBKVKey(key)
+ tn := time.Now().UnixNano()
+ keyInfo, v, err := db.getDBKVRealValueAndHeader(tn, key, true)
if err != nil {
return nil, err
}
+ if keyInfo.Expired || v == nil {
+ return nil, nil
+ }
+ return v, nil
+}
- v, err := db.eng.GetBytes(db.defaultReadOpts, key)
- if len(v) >= tsLen {
- v = v[:len(v)-tsLen]
+// KVGetExpired will get the value even it is expired
+func (db *RockDB) KVGetExpired(key []byte) ([]byte, error) {
+ tn := time.Now().UnixNano()
+ _, v, err := db.getDBKVRealValueAndHeader(tn, key, true)
+ if err != nil {
+ return nil, err
}
- return v, err
+ if v == nil {
+ return nil, nil
+ }
+ return v, nil
}
func (db *RockDB) Incr(ts int64, key []byte) (int64, error) {
@@ -236,6 +471,7 @@ func (db *RockDB) IncrBy(ts int64, key []byte, increment int64) (int64, error) {
func (db *RockDB) MGet(keys ...[]byte) ([][]byte, []error) {
keyList := make([][]byte, len(keys))
+ valueList := make([][]byte, len(keys))
errs := make([]error, len(keys))
for i, k := range keys {
_, kk, err := convertRedisKeyToDBKVKey(k)
@@ -246,14 +482,22 @@ func (db *RockDB) MGet(keys ...[]byte) ([][]byte, []error) {
keyList[i] = kk
}
}
- db.eng.MultiGetBytes(db.defaultReadOpts, keyList, keyList, errs)
+ tn := time.Now().UnixNano()
+ db.MultiGetBytes(keyList, valueList, errs)
//log.Printf("mget: %v", keyList)
- for i, v := range keyList {
- if errs[i] == nil && len(v) >= tsLen {
- keyList[i] = keyList[i][:len(v)-tsLen]
+ for i, v := range valueList {
+ if errs[i] == nil {
+ expired, realV, err := db.getAndCheckExpRealValue(tn, keys[i], v, true)
+ if err != nil {
+ errs[i] = err
+ } else if expired {
+ valueList[i] = nil
+ } else {
+ valueList[i] = realV
+ }
}
}
- return keyList, errs
+ return valueList, errs
}
func (db *RockDB) MSet(ts int64, args ...common.KVRecord) error {
@@ -264,15 +508,12 @@ func (db *RockDB) MSet(ts int64, args ...common.KVRecord) error {
return errTooMuchBatchSize
}
- db.MaybeClearBatch()
-
var err error
var key []byte
var value []byte
tableCnt := make(map[string]int)
var table []byte
- tsBuf := PutInt64(ts)
for i := 0; i < len(args); i++ {
table, key, err = convertRedisKeyToDBKVKey(args[i].Key)
if err != nil {
@@ -283,20 +524,21 @@ func (db *RockDB) MSet(ts int64, args ...common.KVRecord) error {
value = value[:0]
value = append(value, args[i].Value...)
if db.cfg.EnableTableCounter {
- var v []byte
+ vok := false
if !db.cfg.EstimateTableCounter {
- v, _ = db.eng.GetBytesNoLock(db.defaultReadOpts, key)
+ vok, _ = db.ExistNoLock(key)
}
- if v == nil {
+ if !vok {
n := tableCnt[string(table)]
n++
tableCnt[string(table)] = n
}
}
- value = append(value, tsBuf...)
+ value, err = db.resetWithNewKVValue(ts, args[i].Key, value, 0, db.wb)
+ if err != nil {
+ return err
+ }
db.wb.Put(key, value)
- //the expire meta data related to the key should be cleared as the key-value has been reset
- db.delExpire(KVType, args[i].Key, db.wb)
}
for t, num := range tableCnt {
db.IncrTableKeyCount([]byte(t), int64(num), db.wb)
@@ -307,130 +549,179 @@ func (db *RockDB) MSet(ts int64, args ...common.KVRecord) error {
}
func (db *RockDB) KVSet(ts int64, rawKey []byte, value []byte) error {
- table, key, err := convertRedisKeyToDBKVKey(rawKey)
+ return db.setKV(ts, rawKey, value, 0)
+}
+
+func (db *RockDB) KVSetWithOpts(ts int64, rawKey []byte, value []byte, duration int64, createOnly bool, updateOnly bool) (int64, error) {
+ if err := checkValueSize(value); err != nil {
+ return 0, err
+ }
+ keyInfo, realV, err := db.prepareKVValueForWrite(ts, rawKey, false)
if err != nil {
- return err
- } else if err = checkValueSize(value); err != nil {
- return err
+ return 0, err
}
- db.MaybeClearBatch()
- if db.cfg.EnableTableCounter {
- var v []byte
- if !db.cfg.EstimateTableCounter {
- v, _ = db.eng.GetBytesNoLock(db.defaultReadOpts, key)
- }
- if v == nil {
- db.IncrTableKeyCount(table, 1, db.wb)
- }
+
+ if createOnly && realV != nil && !keyInfo.Expired {
+ return 0, nil
}
- tsBuf := PutInt64(ts)
- value = append(value, tsBuf...)
- db.wb.Put(key, value)
+ if updateOnly && (realV == nil || keyInfo.Expired) {
+ return 0, nil
- //db.delExpire(KVType, rawKey, db.wb)
+ }
+ if realV == nil && !keyInfo.Expired {
+ db.IncrTableKeyCount(keyInfo.Table, 1, db.wb)
+ }
+ // prepare for write will renew the expire data on expired value,
+ // however, we still need del the old expire meta data since it may store the
+ // expire meta data in different place under different expire policy.
+ value, err = db.resetWithNewKVValue(ts, rawKey, value, duration, db.wb)
+ db.wb.Put(keyInfo.VerKey, value)
err = db.MaybeCommitBatch()
-
- return err
+ return 1, err
}
-func (db *RockDB) SetEx(ts int64, rawKey []byte, duration int64, value []byte) error {
+func (db *RockDB) setKV(ts int64, rawKey []byte, value []byte, duration int64) error {
table, key, err := convertRedisKeyToDBKVKey(rawKey)
if err != nil {
return err
} else if err = checkValueSize(value); err != nil {
return err
}
- db.MaybeClearBatch()
if db.cfg.EnableTableCounter {
- var v []byte
+ found := false
if !db.cfg.EstimateTableCounter {
- v, _ = db.eng.GetBytesNoLock(db.defaultReadOpts, key)
+ found, _ = db.ExistNoLock(key)
}
- if v == nil {
+ if !found {
db.IncrTableKeyCount(table, 1, db.wb)
}
}
- tsBuf := PutInt64(ts)
- value = append(value, tsBuf...)
+ value, err = db.resetWithNewKVValue(ts, rawKey, value, duration, db.wb)
+ if err != nil {
+ return err
+ }
db.wb.Put(key, value)
+ err = db.MaybeCommitBatch()
- if err := db.rawExpireAt(KVType, rawKey, duration+time.Now().Unix(), db.wb); err != nil {
- return err
+ return err
+}
+
+func (db *RockDB) KVGetSet(ts int64, rawKey []byte, value []byte) ([]byte, error) {
+ if err := checkValueSize(value); err != nil {
+ return nil, err
+ }
+ keyInfo, realOldV, err := db.getDBKVRealValueAndHeader(ts, rawKey, false)
+ if err != nil {
+ return nil, err
+ }
+ if realOldV == nil && !keyInfo.Expired {
+ db.IncrTableKeyCount(keyInfo.Table, 1, db.wb)
+ } else if keyInfo.Expired {
+ realOldV = nil
+ }
+ value, err = db.resetWithNewKVValue(ts, rawKey, value, 0, db.wb)
+ if err != nil {
+ return nil, err
}
+ db.wb.Put(keyInfo.VerKey, value)
err = db.MaybeCommitBatch()
+ if err != nil {
+ return nil, err
+ }
+ if realOldV == nil {
+ return nil, nil
+ }
- return err
+ return realOldV, nil
+}
+func (db *RockDB) SetEx(ts int64, rawKey []byte, duration int64, value []byte) error {
+ if duration <= 0 {
+ return errInvalidTTL
+ }
+ return db.setKV(ts, rawKey, value, duration)
}
-func (db *RockDB) SetNX(ts int64, key []byte, value []byte) (int64, error) {
- table, key, err := convertRedisKeyToDBKVKey(key)
- if err != nil {
+func (db *RockDB) SetNX(ts int64, rawKey []byte, value []byte) (int64, error) {
+ return db.KVSetWithOpts(ts, rawKey, value, 0, true, false)
+}
+
+func (db *RockDB) SetIfEQ(ts int64, rawKey []byte, oldV []byte, value []byte, duration int64) (int64, error) {
+ if err := checkValueSize(value); err != nil {
return 0, err
- } else if err := checkValueSize(value); err != nil {
+ }
+ keyInfo, realV, err := db.prepareKVValueForWrite(ts, rawKey, false)
+ if err != nil {
return 0, err
}
-
- var v []byte
var n int64 = 1
- if v, err = db.eng.GetBytesNoLock(db.defaultReadOpts, key); err != nil {
+ if !bytes.Equal(realV, oldV) && !keyInfo.Expired {
+ n = 0
+ } else {
+ if realV == nil && !keyInfo.Expired {
+ db.IncrTableKeyCount(keyInfo.Table, 1, db.wb)
+ }
+ // prepare for write will renew the expire data on expired value,
+ // however, we still need del the old expire meta data since it may store the
+ // expire meta data in different place under different expire policy.
+ value, err = db.resetWithNewKVValue(ts, rawKey, value, duration, db.wb)
+ db.wb.Put(keyInfo.VerKey, value)
+ err = db.MaybeCommitBatch()
+ }
+ return n, err
+}
+
+func (db *RockDB) DelIfEQ(ts int64, rawKey []byte, oldV []byte) (int64, error) {
+ keyInfo, realV, err := db.prepareKVValueForWrite(ts, rawKey, false)
+ if err != nil {
return 0, err
- } else if v != nil {
+ }
+ var n int64 = 1
+
+ if !bytes.Equal(realV, oldV) && !keyInfo.Expired {
n = 0
} else {
- db.wb.Clear()
- db.IncrTableKeyCount(table, 1, db.wb)
- value = append(value, PutInt64(ts)...)
- db.wb.Put(key, value)
- err = db.eng.Write(db.defaultWriteOpts, db.wb)
+ return db.DelKeys(rawKey)
}
return n, err
}
-func (db *RockDB) SetRange(ts int64, key []byte, offset int, value []byte) (int64, error) {
+func (db *RockDB) SetRange(ts int64, rawKey []byte, offset int, value []byte) (int64, error) {
if len(value) == 0 {
return 0, nil
}
-
- table, key, err := convertRedisKeyToDBKVKey(key)
- if err != nil {
- return 0, err
- } else if len(value)+offset > MaxValueSize {
+ if len(value)+offset > MaxValueSize {
return 0, errValueSize
}
-
- oldValue, err := db.eng.GetBytesNoLock(db.defaultReadOpts, key)
+ keyInfo, realV, err := db.prepareKVValueForWrite(ts, rawKey, false)
if err != nil {
return 0, err
}
- db.wb.Clear()
- if oldValue == nil {
- db.IncrTableKeyCount(table, 1, db.wb)
- } else if len(oldValue) < tsLen {
- return 0, errInvalidDBValue
- } else {
- oldValue = oldValue[:len(oldValue)-tsLen]
- }
- extra := offset + len(value) - len(oldValue)
+ if realV == nil && !keyInfo.Expired {
+ db.IncrTableKeyCount(keyInfo.Table, 1, db.wb)
+ }
+ extra := offset + len(value) - len(realV)
if extra > 0 {
- oldValue = append(oldValue, make([]byte, extra)...)
+ realV = append(realV, make([]byte, extra)...)
}
- copy(oldValue[offset:], value)
- oldValue = append(oldValue, PutInt64(ts)...)
- db.wb.Put(key, oldValue)
+ copy(realV[offset:], value)
+ retn := len(realV)
- err = db.eng.Write(db.defaultWriteOpts, db.wb)
+ realV = db.encodeRealValueToDBRawValue(ts, keyInfo.OldHeader, realV)
+ db.wb.Put(keyInfo.VerKey, realV)
+
+ err = db.CommitBatchWrite()
if err != nil {
return 0, err
}
- return int64(len(oldValue) - tsLen), nil
+ return int64(retn), nil
}
-func getRange(start int, end int, valLen int) (int, int) {
+func getRange(start int64, end int64, valLen int64) (int64, int64) {
if start < 0 {
start = valLen + start
}
@@ -453,13 +744,13 @@ func getRange(start int, end int, valLen int) (int, int) {
return start, end
}
-func (db *RockDB) GetRange(key []byte, start int, end int) ([]byte, error) {
+func (db *RockDB) GetRange(key []byte, start int64, end int64) ([]byte, error) {
value, err := db.KVGet(key)
if err != nil {
return nil, err
}
- valLen := len(value)
+ valLen := int64(len(value))
start, end = getRange(start, end, valLen)
@@ -479,74 +770,159 @@ func (db *RockDB) StrLen(key []byte) (int64, error) {
return int64(n), nil
}
-func (db *RockDB) Append(ts int64, key []byte, value []byte) (int64, error) {
+func (db *RockDB) Append(ts int64, rawKey []byte, value []byte) (int64, error) {
if len(value) == 0 {
return 0, nil
}
- table, key, err := convertRedisKeyToDBKVKey(key)
+ keyInfo, realV, err := db.prepareKVValueForWrite(ts, rawKey, false)
if err != nil {
return 0, err
}
+ if len(realV)+len(value) > MaxValueSize {
+ return 0, errValueSize
+ }
+ if realV == nil && !keyInfo.Expired {
+ db.IncrTableKeyCount(keyInfo.Table, 1, db.wb)
+ }
+
+ newLen := len(realV) + len(value)
+ realV = append(realV, value...)
+ dbv := db.encodeRealValueToDBRawValue(ts, keyInfo.OldHeader, realV)
+ // TODO: do we need make sure delete the old expire meta to avoid expire the rewritten new data?
- oldValue, err := db.eng.GetBytesNoLock(db.defaultReadOpts, key)
+ db.wb.Put(keyInfo.VerKey, dbv)
+ err = db.CommitBatchWrite()
if err != nil {
return 0, err
}
- if len(oldValue)+len(value) > MaxValueSize {
- return 0, errValueSize
+ return int64(newLen), nil
+}
+
+// BitSet set the bitmap data with format as below:
+// key -> 0(first bit) 0 0 0 0 0 0 0 (last bit) | (second byte with 8 bits) | .... | (last byte with 8bits) at most MaxBitOffset/8 bytes for each bitmap
+func (db *RockDB) BitSetOld(ts int64, key []byte, offset int64, on int) (int64, error) {
+ if offset > MaxBitOffset || offset < 0 {
+ return 0, ErrBitOverflow
}
- db.wb.Clear()
- if oldValue == nil {
- db.IncrTableKeyCount(table, 1, db.wb)
- } else if len(oldValue) < tsLen {
- return 0, errInvalidDBValue
- } else {
- oldValue = oldValue[:len(oldValue)-tsLen]
+
+ if (on & ^1) != 0 {
+ return 0, fmt.Errorf("bit should be 0 or 1, got %d", on)
+ }
+ keyInfo, realV, err := db.prepareKVValueForWrite(ts, key, false)
+ if err != nil {
+ return 0, err
+ }
+ if realV == nil && !keyInfo.Expired {
+ db.IncrTableKeyCount(keyInfo.Table, 1, db.wb)
+ }
+ if keyInfo.Expired {
+ realV = nil
}
- oldValue = append(oldValue, value...)
- oldValue = append(oldValue, PutInt64(ts)...)
+ byteOffset := int(uint32(offset) >> 3)
+ expandLen := byteOffset + 1 - len(realV)
+ if expandLen > 0 {
+ if on == 0 {
+ // not changed
+ return 0, nil
+ }
+ realV = append(realV, make([]byte, expandLen)...)
+ }
+ byteVal := realV[byteOffset]
+ bit := 7 - uint8(uint32(offset)&0x7)
+ oldBit := byteVal & (1 << bit)
- db.wb.Put(key, oldValue)
- err = db.eng.Write(db.defaultWriteOpts, db.wb)
+ byteVal &= ^(1 << bit)
+ byteVal |= (uint8(on&0x1) << bit)
+ realV[byteOffset] = byteVal
+
+ realV = db.encodeRealValueToDBRawValue(ts, keyInfo.OldHeader, realV)
+ db.wb.Put(keyInfo.VerKey, realV)
+ err = db.CommitBatchWrite()
if err != nil {
return 0, err
}
+ if oldBit > 0 {
+ return 1, nil
+ }
+ return 0, nil
+}
+
+func popcountBytes(s []byte) (count int64) {
+ for i := 0; i+8 <= len(s); i += 8 {
+ x := binary.LittleEndian.Uint64(s[i:])
+ count += int64(bits.OnesCount64(x))
+ }
- return int64(len(oldValue) - tsLen), nil
+ s = s[len(s)&^7:]
+
+ if len(s) >= 4 {
+ count += int64(bits.OnesCount32(binary.LittleEndian.Uint32(s)))
+ s = s[4:]
+ }
+
+ if len(s) >= 2 {
+ count += int64(bits.OnesCount16(binary.LittleEndian.Uint16(s)))
+ s = s[2:]
+ }
+
+ if len(s) == 1 {
+ count += int64(bits.OnesCount8(s[0]))
+ }
+ return
}
-func (db *RockDB) Expire(key []byte, duration int64) (int64, error) {
- if exists, err := db.KVExists(key); err != nil || exists != 1 {
+func (db *RockDB) bitGetOld(key []byte, offset int64) (int64, error) {
+ v, err := db.KVGet(key)
+ if err != nil {
return 0, err
- } else {
- if err2 := db.expire(KVType, key, duration); err2 != nil {
- return 0, err2
- } else {
- return 1, nil
- }
}
+
+ byteOffset := (uint32(offset) >> 3)
+ if byteOffset >= uint32(len(v)) {
+ return 0, nil
+ }
+ byteVal := v[byteOffset]
+ bit := 7 - uint8(uint32(offset)&0x7)
+ oldBit := byteVal & (1 << bit)
+ if oldBit > 0 {
+ return 1, nil
+ }
+
+ return 0, nil
}
-func (db *RockDB) Persist(key []byte) (int64, error) {
- if exists, err := db.KVExists(key); err != nil || exists != 1 {
+func (db *RockDB) bitCountOld(key []byte, start, end int64) (int64, error) {
+ v, err := db.KVGet(key)
+ if err != nil {
return 0, err
}
+ start, end = getRange(start, end, int64(len(v)))
+ if start > end {
+ return 0, nil
+ }
+ v = v[start : end+1]
+ return popcountBytes(v), nil
+}
- if ttl, err := db.ttl(KVType, key); err != nil || ttl < 0 {
+func (db *RockDB) Expire(ts int64, rawKey []byte, duration int64) (int64, error) {
+ _, _, v, expired, err := db.getRawDBKVValue(ts, rawKey, false)
+ if err != nil || v == nil || expired {
return 0, err
}
+ return db.ExpireAt(KVType, rawKey, v, ts/int64(time.Second)+duration)
+}
- db.wb.Clear()
- if err := db.delExpire(KVType, key, db.wb); err != nil {
+func (db *RockDB) Persist(ts int64, rawKey []byte) (int64, error) {
+ _, _, v, expired, err := db.getRawDBKVValue(ts, rawKey, false)
+ if err != nil {
return 0, err
- } else {
- if err2 := db.eng.Write(db.defaultWriteOpts, db.wb); err2 != nil {
- return 0, err2
- } else {
- return 1, nil
- }
}
+ if v == nil || expired {
+ return 0, nil
+ }
+
+ return db.ExpireAt(KVType, rawKey, v, 0)
}
diff --git a/rockredis/t_kv_test.go b/rockredis/t_kv_test.go
index 5d577013..1ed76ba6 100644
--- a/rockredis/t_kv_test.go
+++ b/rockredis/t_kv_test.go
@@ -1,11 +1,13 @@
package rockredis
import (
- "bytes"
"os"
+ "strconv"
"testing"
+ "time"
- "github.com/absolute8511/ZanRedisDB/common"
+ "github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/common"
)
func TestKVCodec(t *testing.T) {
@@ -15,11 +17,9 @@ func TestKVCodec(t *testing.T) {
ek := encodeKVKey([]byte("key"))
- if k, err := decodeKVKey(ek); err != nil {
- t.Fatal(err)
- } else if string(k) != "key" {
- t.Fatal(string(k))
- }
+ k, err := decodeKVKey(ek)
+ assert.Nil(t, err)
+ assert.Equal(t, "key", string(k))
}
func TestDBKV(t *testing.T) {
@@ -28,134 +28,108 @@ func TestDBKV(t *testing.T) {
defer db.Close()
key1 := []byte("test:testdb_kv_a")
+ key2 := []byte("test:testdb_kv_b")
+ n, err := db.KVExists(key1, key2)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
- if err := db.KVSet(0, key1, []byte("hello world 1")); err != nil {
- t.Fatal(err)
- }
+ err = db.KVSet(0, key1, []byte("hello world 1"))
+ assert.Nil(t, err)
+ n, err = db.KVExists(key1, key2)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
- key2 := []byte("test:testdb_kv_b")
+ err = db.KVSet(0, key2, []byte("hello world 2"))
+ assert.Nil(t, err)
+
+ n, err = db.KVExists(key1, key2)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
- if err := db.KVSet(0, key2, []byte("hello world 2")); err != nil {
- t.Fatal(err)
- }
v1, _ := db.KVGet(key1)
- if string(v1) != "hello world 1" {
- t.Error(v1)
- }
+ assert.Equal(t, "hello world 1", string(v1))
v2, _ := db.KVGet(key2)
- if string(v2) != "hello world 2" {
- t.Error(v2)
- }
+ assert.Equal(t, "hello world 2", string(v2))
num, err := db.GetTableKeyCount([]byte("test"))
- if err != nil {
- t.Error(err)
- } else if num != 2 {
- t.Errorf("table count not as expected: %v", num)
- }
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), num)
ay, errs := db.MGet(key1, key2)
- if len(ay) != 2 {
- t.Errorf("%v, %v", ay, errs)
- }
+ assert.Equal(t, 2, len(errs))
+ assert.Nil(t, errs[0])
+ assert.Nil(t, errs[1])
+ assert.Equal(t, 2, len(ay))
+ assert.Equal(t, v1, ay[0])
+ assert.Equal(t, v2, ay[1])
- if !bytes.Equal(v1, ay[0]) {
- t.Errorf("%v, %v", ay[0], v1)
- }
+ key3 := []byte("test:testdb_kv_range")
- if !bytes.Equal(v2, ay[1]) {
- t.Errorf("%v, %v", ay[1], v2)
- }
+ n, err = db.Append(0, key3, []byte("Hello"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(5), n)
- key3 := []byte("test:testdb_kv_range")
+ n, err = db.Append(0, key3, []byte(" World"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(11), n)
- if n, err := db.Append(0, key3, []byte("Hello")); err != nil {
- t.Fatal(err)
- } else if n != 5 {
- t.Fatal(n)
- }
+ n, err = db.StrLen(key3)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(11), n)
- if n, err := db.Append(0, key3, []byte(" World")); err != nil {
- t.Fatal(err)
- } else if n != 11 {
- t.Fatal(n)
- }
+ v, err := db.GetRange(key3, 0, 4)
+ assert.Nil(t, err)
+ assert.Equal(t, "Hello", string(v))
- if n, err := db.StrLen(key3); err != nil {
- t.Fatal(err)
- } else if n != 11 {
- t.Fatal(n)
- }
+ v, err = db.GetRange(key3, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, "Hello World", string(v))
- if v, err := db.GetRange(key3, 0, 4); err != nil {
- t.Fatal(err)
- } else if string(v) != "Hello" {
- t.Fatal(string(v))
- }
+ v, err = db.GetRange(key3, -5, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, "World", string(v))
- if v, err := db.GetRange(key3, 0, -1); err != nil {
- t.Fatal(err)
- } else if string(v) != "Hello World" {
- t.Fatal(string(v))
- }
+ n, err = db.SetRange(0, key3, 6, []byte("Redis"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(11), n)
- if v, err := db.GetRange(key3, -5, -1); err != nil {
- t.Fatal(err)
- } else if string(v) != "World" {
- t.Fatal(string(v))
- }
+ v, err = db.KVGet(key3)
+ assert.Nil(t, err)
+ assert.Equal(t, "Hello Redis", string(v))
- if n, err := db.SetRange(0, key3, 6, []byte("Redis")); err != nil {
- t.Fatal(err)
- } else if n != 11 {
- t.Fatal(n)
- }
+ v, err = db.KVGetExpired(key3)
+ assert.Nil(t, err)
+ assert.Equal(t, "Hello Redis", string(v))
- if v, err := db.KVGet(key3); err != nil {
- t.Fatal(err)
- } else if string(v) != "Hello Redis" {
- t.Fatal(string(v))
- }
+ n, err = db.KVGetVer(key3)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
key4 := []byte("test:testdb_kv_range_none")
- if n, err := db.SetRange(0, key4, 6, []byte("Redis")); err != nil {
- t.Fatal(err)
- } else if n != 11 {
- t.Fatal(n)
- }
- r, _ := db.KVExists(key3)
- if r == 0 {
- t.Errorf("key should exist: %v", r)
- }
+ n, err = db.SetRange(0, key4, 6, []byte("Redis"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(11), n)
+
+ r, err := db.KVExists(key3)
+ assert.Nil(t, err)
+ assert.NotEqual(t, int64(0), r)
r, err = db.SetNX(0, key3, []byte(""))
- if err != nil {
- t.Errorf("setnx failed: %v", err)
- }
- if r != 0 {
- t.Errorf("should set only not exist: %v", r)
- }
- if v, err := db.KVGet(key3); err != nil {
- t.Error(err)
- } else if string(v) != "Hello Redis" {
- t.Error(string(v))
- }
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), r)
+
+ v, err = db.KVGet(key3)
+ assert.Nil(t, err)
+ assert.Equal(t, "Hello Redis", string(v))
+
num, err = db.GetTableKeyCount([]byte("test"))
- if err != nil {
- t.Error(err)
- } else if num != 4 {
- t.Errorf("table count not as expected: %v", num)
- }
+ assert.Nil(t, err)
+ assert.Equal(t, int64(4), num)
- db.KVDel(key3)
+ db.DelKeys(key3)
r, _ = db.KVExists(key3)
- if r != 0 {
- t.Errorf("key should not exist: %v", r)
- }
+ assert.Equal(t, int64(0), r)
num, err = db.GetTableKeyCount([]byte("test"))
- if err != nil {
- t.Error(err)
- } else if num != 3 {
- t.Errorf("table count not as expected: %v", num)
- }
+ assert.Nil(t, err)
+ assert.Equal(t, int64(3), num)
key5 := []byte("test:test_kv_mset_key5")
key6 := []byte("test:test_kv_mset_key6")
@@ -185,6 +159,169 @@ func TestDBKV(t *testing.T) {
} else if num != 6 {
t.Errorf("table count not as expected: %v", num)
}
+}
+
+func TestDBKVSetIF(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ key1 := []byte("test:testdb_kv_setif_a")
+ ts := time.Now().UnixNano()
+
+ n, err := db.SetIfEQ(ts, key1, nil, []byte("hello world 1"), 30)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ n, err = db.GetTableKeyCount([]byte("test"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.KVTtl(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(30), n)
+
+ n, err = db.SetIfEQ(ts, key1, []byte("not equal"), []byte("hello world 1"), 30)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ v1, _ := db.KVGet(key1)
+ assert.Equal(t, "hello world 1", string(v1))
+
+ n, err = db.SetIfEQ(ts, key1, v1, []byte("hello world 2"), 30)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.GetTableKeyCount([]byte("test"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ v1, _ = db.KVGet(key1)
+ assert.Equal(t, "hello world 2", string(v1))
+
+ n, err = db.DelIfEQ(ts, key1, []byte("not equal"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ n, err = db.GetTableKeyCount([]byte("test"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.DelIfEQ(ts, key1, v1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.GetTableKeyCount([]byte("test"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+}
+
+func TestDBKVBit(t *testing.T) {
+ db := getTestDB(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ key := []byte("test:testdb_kv_bit")
+ n, err := db.BitSetV2(0, key, 5, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ n, err = db.BitGetV2(key, 0)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.BitGetV2(key, 5)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.BitGetV2(key, 100)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ n, err = db.BitCountV2(key, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.BitSetV2(0, key, 5, 0)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ _, err = db.BitSetV2(0, key, -5, 0)
+ assert.NotNil(t, err)
+
+ for i := 0; i < bitmapSegBits*3; i++ {
+ n, err = db.BitGetV2(key, int64(i))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ n, err = db.BitCountV2(key, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ }
+ bitsForOne := make(map[int]bool)
+ bitsForOne[0] = true
+ bitsForOne[bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBytes] = true
+ bitsForOne[bitmapSegBytes+1] = true
+ bitsForOne[bitmapSegBytes*2-1] = true
+ bitsForOne[bitmapSegBytes*2] = true
+ bitsForOne[bitmapSegBits-bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBits-bitmapSegBytes] = true
+ bitsForOne[bitmapSegBits-bitmapSegBytes+1] = true
+
+ bitsForOne[bitmapSegBits-1] = true
+ bitsForOne[bitmapSegBits] = true
+ bitsForOne[bitmapSegBits+1] = true
+ bitsForOne[bitmapSegBits+bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBits+bitmapSegBytes] = true
+ bitsForOne[bitmapSegBits+bitmapSegBytes+1] = true
+ bitsForOne[bitmapSegBits*2-bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBits*2-bitmapSegBytes] = true
+ bitsForOne[bitmapSegBits*2-1] = true
+ bitsForOne[bitmapSegBits*2] = true
+ bitsForOne[bitmapSegBits*2+1] = true
+ bitsForOne[bitmapSegBits*2+bitmapSegBytes-1] = true
+ bitsForOne[bitmapSegBits*2+bitmapSegBytes] = true
+ bitsForOne[bitmapSegBits*2+bitmapSegBytes+1] = true
+
+ for bpos := range bitsForOne {
+ n, err = db.BitSetV2(0, key, int64(bpos), 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ // new v2 should read old
+ n, err = db.BitGetV2(key, int64(bpos))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ }
+
+ for i := 0; i < bitmapSegBits*3; i++ {
+ n, err = db.BitGetV2(key, int64(i))
+ assert.Nil(t, err)
+ if _, ok := bitsForOne[i]; ok {
+ assert.Equal(t, int64(1), n)
+ } else {
+ assert.Equal(t, int64(0), n)
+ }
+ }
+ // new v2 should read old
+ n, err = db.BitCountV2(key, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(bitsForOne)), n)
+ err = db.KVSet(0, key, []byte{0x00, 0x00, 0x00})
+ assert.Nil(t, err)
+
+ err = db.KVSet(0, key, []byte("foobar"))
+ assert.Nil(t, err)
+
+ n, err = db.bitCountOld(key, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(26), n)
+
+ n, err = db.bitCountOld(key, 0, 0)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(4), n)
+
+ n, err = db.bitCountOld(key, 1, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(6), n)
}
@@ -240,3 +377,33 @@ func TestDBKVWithNoTable(t *testing.T) {
t.Error("should get no value")
}
}
+
+func BenchmarkKVSetSingleKey(b *testing.B) {
+ db := getTestDBForBench()
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ b.StopTimer()
+ key := []byte("test:testdb_kv_bench")
+
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ db.KVSet(0, key, key)
+ }
+ b.StopTimer()
+}
+
+func BenchmarkKVSetManyKeys(b *testing.B) {
+ db := getTestDBForBench()
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ b.StopTimer()
+ key := "test:testdb_kv_bench"
+
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ db.KVSet(0, []byte(key+(strconv.Itoa(i))), []byte("1"))
+ }
+ b.StopTimer()
+}
diff --git a/rockredis/t_list.go b/rockredis/t_list.go
index e419cc95..7ead855b 100644
--- a/rockredis/t_list.go
+++ b/rockredis/t_list.go
@@ -3,12 +3,16 @@ package rockredis
import (
"encoding/binary"
"errors"
+ "time"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/gorocksdb"
+ ps "github.com/prometheus/client_golang/prometheus"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/engine"
+ "github.com/youzan/ZanRedisDB/metric"
+ "github.com/youzan/ZanRedisDB/slow"
)
-// TODO: we can use ring buffer to allow the list pop and push many times
+// we can use ring buffer to allow the list pop and push many times
// when the tail reach the end we roll to the start and check if full.
// Note: to clean the huge list, we can set some meta for each list,
// such as max elements or the max keep time, while insert we auto clean
@@ -26,6 +30,7 @@ var errLMetaKey = errors.New("invalid lmeta key")
var errListKey = errors.New("invalid list key")
var errListSeq = errors.New("invalid list sequence, overflow")
var errListIndex = errors.New("invalid list index")
+var errListMeta = errors.New("invalid list meta data")
func lEncodeMetaKey(key []byte) []byte {
buf := make([]byte, len(key)+1+len(metaPrefix))
@@ -60,18 +65,6 @@ func lEncodeMaxKey() []byte {
return ek
}
-func convertRedisKeyToDBListKey(key []byte, seq int64) ([]byte, error) {
- table, rk, err := extractTableFromRedisKey(key)
- if err != nil {
- return nil, err
- }
-
- if err := checkKeySize(rk); err != nil {
- return nil, err
- }
- return lEncodeListKey(table, rk, seq), nil
-}
-
func lEncodeListKey(table []byte, key []byte, seq int64) []byte {
buf := make([]byte, getDataTablePrefixBufLen(ListType, table)+len(key)+2+8)
@@ -112,29 +105,26 @@ func lDecodeListKey(ek []byte) (table []byte, key []byte, seq int64, err error)
return
}
-func (db *RockDB) fixListKey(ts int64, key []byte) {
+func (db *RockDB) scanfixListKey(ts int64, key []byte, wb engine.WriteBatch) {
// fix head and tail by iterator to find if any list key found or not found
var headSeq int64
var tailSeq int64
var llen int64
- var err error
- db.wb.Clear()
- metaKey := lEncodeMetaKey(key)
- if headSeq, tailSeq, llen, _, err = db.lGetMeta(metaKey); err != nil {
- dbLog.Warningf("read list %v meta error: %v", string(key), err.Error())
- return
- }
- dbLog.Infof("list %v before fix: meta: %v, %v", string(key), headSeq, tailSeq)
- startKey, err := convertRedisKeyToDBListKey(key, listMinSeq)
+ keyInfo, headSeq, tailSeq, llen, _, err := db.lHeaderAndMeta(ts, key, false)
if err != nil {
+ dbLog.Warningf("read list %v meta error: %v", string(key), err.Error())
return
}
- stopKey, err := convertRedisKeyToDBListKey(key, listMaxSeq)
- if err != nil {
+ if keyInfo.IsNotExistOrExpired() {
return
}
- rit, err := NewDBRangeIterator(db.eng, startKey, stopKey, common.RangeClose, false)
+ table := keyInfo.Table
+ rk := keyInfo.VerKey
+ dbLog.Infof("list %v before fix: meta: %v, %v", string(key), headSeq, tailSeq)
+ startKey := lEncodeListKey(table, rk, listMinSeq)
+ stopKey := lEncodeListKey(table, rk, listMaxSeq)
+ rit, err := db.NewDBRangeIterator(startKey, stopKey, common.RangeClose, false)
if err != nil {
dbLog.Warningf("read list %v error: %v", string(key), err.Error())
return
@@ -171,17 +161,22 @@ func (db *RockDB) fixListKey(ts int64, key []byte) {
return
}
if cnt == 0 {
- db.wb.Delete(metaKey)
- table, _, _ := extractTableFromRedisKey(key)
- db.IncrTableKeyCount(table, -1, db.wb)
+ metaKey := lEncodeMetaKey(key)
+ wb.Delete(metaKey)
+ db.IncrTableKeyCount(table, -1, wb)
} else {
- _, err = db.lSetMeta(metaKey, fixedHead, fixedTail, ts, db.wb)
+ _, err = db.lSetMeta(key, keyInfo.OldHeader, fixedHead, fixedTail, ts, wb)
if err != nil {
return
}
}
dbLog.Infof("list %v fixed to %v, %v, cnt: %v", string(key), fixedHead, fixedTail, cnt)
- db.eng.Write(db.defaultWriteOpts, db.wb)
+}
+
+func (db *RockDB) fixListKey(ts int64, key []byte) {
+ defer db.wb.Clear()
+ db.scanfixListKey(ts, key, db.wb)
+ db.rockEng.Write(db.wb)
}
func (db *RockDB) lpush(ts int64, key []byte, whereSeq int64, args ...[]byte) (int64, error) {
@@ -189,20 +184,16 @@ func (db *RockDB) lpush(ts int64, key []byte, whereSeq int64, args ...[]byte) (i
return 0, err
}
- table, rk, _ := extractTableFromRedisKey(key)
- if len(table) == 0 {
- return 0, errTableName
+ wb := db.wb
+ defer wb.Clear()
+ keyInfo, err := db.prepareCollKeyForWrite(ts, ListType, key, nil)
+ if err != nil {
+ return 0, err
}
+ table := keyInfo.Table
+ rk := keyInfo.VerKey
- var headSeq int64
- var tailSeq int64
- var size int64
- var err error
-
- wb := db.wb
- wb.Clear()
- metaKey := lEncodeMetaKey(key)
- headSeq, tailSeq, size, _, err = db.lGetMeta(metaKey)
+ headSeq, tailSeq, size, _, err := parseListMeta(keyInfo.MetaData())
if err != nil {
return 0, err
}
@@ -233,7 +224,8 @@ func (db *RockDB) lpush(ts int64, key []byte, whereSeq int64, args ...[]byte) (i
}
for i := 0; i < pushCnt; i++ {
ek := lEncodeListKey(table, rk, seq+int64(i)*delta)
- v, _ := db.eng.GetBytesNoLock(db.defaultReadOpts, ek)
+ // we assume there is no bug, so it must not override
+ v, _ := db.GetBytesNoLock(ek)
if v != nil {
dbLog.Warningf("list %v should not override the old value: %v, meta: %v, %v,%v", string(key),
v, seq, headSeq, tailSeq)
@@ -242,7 +234,8 @@ func (db *RockDB) lpush(ts int64, key []byte, whereSeq int64, args ...[]byte) (i
}
wb.Put(ek, args[i])
}
- if size == 0 && pushCnt > 0 {
+ // rewrite old expired value should keep table counter unchanged
+ if size == 0 && pushCnt > 0 && !keyInfo.Expired {
db.IncrTableKeyCount(table, 1, wb)
}
seq += int64(pushCnt-1) * delta
@@ -253,7 +246,7 @@ func (db *RockDB) lpush(ts int64, key []byte, whereSeq int64, args ...[]byte) (i
tailSeq = seq
}
- _, err = db.lSetMeta(metaKey, headSeq, tailSeq, ts, wb)
+ _, err = db.lSetMeta(key, keyInfo.OldHeader, headSeq, tailSeq, ts, wb)
if dbLog.Level() >= common.LOG_DETAIL {
dbLog.Debugf("lpush %v list %v meta updated to: %v, %v", whereSeq,
string(key), headSeq, tailSeq)
@@ -262,38 +255,43 @@ func (db *RockDB) lpush(ts int64, key []byte, whereSeq int64, args ...[]byte) (i
db.fixListKey(ts, key)
return 0, err
}
- err = db.eng.Write(db.defaultWriteOpts, wb)
- return int64(size) + int64(pushCnt), err
+ err = db.rockEng.Write(wb)
+
+ newNum := int64(size) + int64(pushCnt)
+ db.topLargeCollKeys.Update(key, int(newNum))
+ slow.LogLargeCollection(int(newNum), slow.NewSlowLogInfo(string(table), string(key), "list"))
+ if newNum > collectionLengthForMetric {
+ metric.CollectionLenDist.With(ps.Labels{
+ "table": string(table),
+ }).Observe(float64(newNum))
+ }
+ return newNum, err
}
func (db *RockDB) lpop(ts int64, key []byte, whereSeq int64) ([]byte, error) {
if err := checkKeySize(key); err != nil {
return nil, err
}
- table, rk, _ := extractTableFromRedisKey(key)
- if len(table) == 0 {
- return nil, errTableName
- }
- wb := db.wb
- wb.Clear()
-
- var headSeq int64
- var tailSeq int64
- var size int64
- var err error
-
- metaKey := lEncodeMetaKey(key)
- headSeq, tailSeq, size, _, err = db.lGetMeta(metaKey)
+ keyInfo, headSeq, tailSeq, size, _, err := db.lHeaderAndMeta(ts, key, false)
if err != nil {
return nil, err
- } else if size == 0 {
+ }
+ if keyInfo.IsNotExistOrExpired() {
+ return nil, nil
+ }
+ table := keyInfo.Table
+ rk := keyInfo.VerKey
+
+ if size == 0 {
return nil, nil
}
if dbLog.Level() >= common.LOG_DETAIL {
dbLog.Debugf("pop %v list %v meta: %v, %v", whereSeq, string(key), headSeq, tailSeq)
}
+ wb := db.wb
+ defer wb.Clear()
var value []byte
var seq int64 = headSeq
@@ -302,7 +300,7 @@ func (db *RockDB) lpop(ts int64, key []byte, whereSeq int64) ([]byte, error) {
}
itemKey := lEncodeListKey(table, rk, seq)
- value, err = db.eng.GetBytesNoLock(db.defaultReadOpts, itemKey)
+ value, err = db.GetBytesNoLock(itemKey)
// nil value means not exist
// empty value should be ""
// since we pop should success if size is not zero, we need fix this
@@ -320,21 +318,22 @@ func (db *RockDB) lpop(ts int64, key []byte, whereSeq int64) ([]byte, error) {
}
wb.Delete(itemKey)
- size, err = db.lSetMeta(metaKey, headSeq, tailSeq, ts, wb)
+ newNum, err := db.lSetMeta(key, keyInfo.OldHeader, headSeq, tailSeq, ts, wb)
if dbLog.Level() >= common.LOG_DETAIL {
- dbLog.Debugf("pop %v list %v meta updated to: %v, %v, %v", whereSeq, string(key), headSeq, tailSeq, size)
+ dbLog.Debugf("pop %v list %v meta updated to: %v, %v, %v", whereSeq, string(key), headSeq, tailSeq, newNum)
}
if err != nil {
db.fixListKey(ts, key)
return nil, err
}
- if size == 0 {
+ if newNum == 0 {
// list is empty after delete
db.IncrTableKeyCount(table, -1, wb)
//delete the expire data related to the list key
- db.delExpire(ListType, key, wb)
+ db.delExpire(ListType, key, nil, false, wb)
}
- err = db.eng.Write(db.defaultWriteOpts, wb)
+ db.topLargeCollKeys.Update(key, int(newNum))
+ err = db.rockEng.Write(wb)
return value, err
}
@@ -342,75 +341,76 @@ func (db *RockDB) ltrim2(ts int64, key []byte, startP, stopP int64) error {
if err := checkKeySize(key); err != nil {
return err
}
- table, rk, _ := extractTableFromRedisKey(key)
- if len(table) == 0 {
- return errTableName
- }
+ keyInfo, headSeq, _, llen, _, err := db.lHeaderAndMeta(ts, key, false)
+ if err != nil {
+ return err
+ }
+ if keyInfo.IsNotExistOrExpired() {
+ return nil
+ }
+ table := keyInfo.Table
+ rk := keyInfo.VerKey
wb := db.wb
- wb.Clear()
+ defer wb.Clear()
- var headSeq int64
- var llen int64
- var err error
start := int64(startP)
stop := int64(stopP)
- ek := lEncodeMetaKey(key)
- if headSeq, _, llen, _, err = db.lGetMeta(ek); err != nil {
- return err
+ if start < 0 {
+ start = llen + start
+ }
+ if stop < 0 {
+ stop = llen + stop
+ }
+ newLen := int64(0)
+ // whole list deleted
+ if start >= llen || start > stop {
+ db.lDelete(ts, key, db.wb)
} else {
- if start < 0 {
- start = llen + start
- }
- if stop < 0 {
- stop = llen + stop
- }
- if start >= llen || start > stop {
- //db.lDelete(key, wb)
- return errors.New("trim invalid")
- }
-
if start < 0 {
start = 0
}
if stop >= llen {
stop = llen - 1
}
- }
- if start > 0 {
- if start > RangeDeleteNum {
- wb.DeleteRange(lEncodeListKey(table, rk, headSeq), lEncodeListKey(table, rk, headSeq+start))
- } else {
- for i := int64(0); i < start; i++ {
- wb.Delete(lEncodeListKey(table, rk, headSeq+i))
+ if start > 0 {
+ if start > RangeDeleteNum {
+ wb.DeleteRange(lEncodeListKey(table, rk, headSeq), lEncodeListKey(table, rk, headSeq+start))
+ } else {
+ for i := int64(0); i < start; i++ {
+ wb.Delete(lEncodeListKey(table, rk, headSeq+i))
+ }
}
}
- }
- if stop < int64(llen-1) {
- if llen-stop > RangeDeleteNum {
- wb.DeleteRange(lEncodeListKey(table, rk, headSeq+int64(stop+1)),
- lEncodeListKey(table, rk, headSeq+llen))
- } else {
- for i := int64(stop + 1); i < llen; i++ {
- wb.Delete(lEncodeListKey(table, rk, headSeq+i))
+ if stop < int64(llen-1) {
+ if llen-stop > RangeDeleteNum {
+ wb.DeleteRange(lEncodeListKey(table, rk, headSeq+int64(stop+1)),
+ lEncodeListKey(table, rk, headSeq+llen))
+ } else {
+ for i := int64(stop + 1); i < llen; i++ {
+ wb.Delete(lEncodeListKey(table, rk, headSeq+i))
+ }
}
}
- }
- newLen, err := db.lSetMeta(ek, headSeq+start, headSeq+stop, ts, wb)
- if err != nil {
- db.fixListKey(ts, key)
- return err
+ newLen, err = db.lSetMeta(key, keyInfo.OldHeader, headSeq+start, headSeq+stop, ts, wb)
+ if err != nil {
+ db.fixListKey(ts, key)
+ return err
+ }
}
if llen > 0 && newLen == 0 {
db.IncrTableKeyCount(table, -1, wb)
+ }
+ if newLen == 0 {
//delete the expire data related to the list key
- db.delExpire(ListType, key, wb)
+ db.delExpire(ListType, key, nil, false, wb)
}
- return db.eng.Write(db.defaultWriteOpts, wb)
+ db.topLargeCollKeys.Update(key, int(newLen))
+ return db.rockEng.Write(wb)
}
func (db *RockDB) ltrim(ts int64, key []byte, trimSize, whereSeq int64) (int64, error) {
@@ -421,24 +421,18 @@ func (db *RockDB) ltrim(ts int64, key []byte, trimSize, whereSeq int64) (int64,
if trimSize == 0 {
return 0, nil
}
- table, rk, _ := extractTableFromRedisKey(key)
- if len(table) == 0 {
- return 0, errTableName
- }
-
- wb := db.wb
- wb.Clear()
-
- var headSeq int64
- var tailSeq int64
- var size int64
- var err error
- metaKey := lEncodeMetaKey(key)
- headSeq, tailSeq, size, _, err = db.lGetMeta(metaKey)
+ keyInfo, headSeq, tailSeq, size, _, err := db.lHeaderAndMeta(ts, key, false)
if err != nil {
return 0, err
- } else if size == 0 {
+ }
+ if keyInfo.IsNotExistOrExpired() {
+ return 0, nil
+ }
+ table := keyInfo.Table
+ rk := keyInfo.VerKey
+
+ if size == 0 {
return 0, nil
}
@@ -457,6 +451,8 @@ func (db *RockDB) ltrim(ts int64, key []byte, trimSize, whereSeq int64) (int64,
tailSeq = trimStartSeq - 1
}
+ wb := db.wb
+ defer wb.Clear()
if trimEndSeq-trimStartSeq > RangeDeleteNum {
itemStartKey := lEncodeListKey(table, rk, trimStartSeq)
itemEndKey := lEncodeListKey(table, rk, trimEndSeq)
@@ -469,41 +465,47 @@ func (db *RockDB) ltrim(ts int64, key []byte, trimSize, whereSeq int64) (int64,
}
}
- size, err = db.lSetMeta(metaKey, headSeq, tailSeq, ts, wb)
+ newLen, err := db.lSetMeta(key, keyInfo.OldHeader, headSeq, tailSeq, ts, wb)
if err != nil {
db.fixListKey(ts, key)
return 0, err
}
- if size == 0 {
+ if newLen == 0 {
// list is empty after trim
db.IncrTableKeyCount(table, -1, wb)
//delete the expire data related to the list key
- db.delExpire(ListType, key, wb)
+ db.delExpire(ListType, key, nil, false, wb)
}
- err = db.eng.Write(db.defaultWriteOpts, wb)
+ db.topLargeCollKeys.Update(key, int(newLen))
+ err = db.rockEng.Write(wb)
return trimEndSeq - trimStartSeq + 1, err
}
// ps : here just focus on deleting the list data,
// any other likes expire is ignore.
-func (db *RockDB) lDelete(key []byte, wb *gorocksdb.WriteBatch) int64 {
- table, rk, _ := extractTableFromRedisKey(key)
- if len(table) == 0 {
+func (db *RockDB) lDelete(ts int64, key []byte, wb engine.WriteBatch) int64 {
+ keyInfo, headSeq, tailSeq, size, _, err := db.lHeaderAndMeta(ts, key, false)
+ if err != nil {
+ return 0
+ }
+ // no need delete if expired
+ if keyInfo.IsNotExistOrExpired() || size == 0 {
return 0
}
+ table := keyInfo.Table
mk := lEncodeMetaKey(key)
-
- var headSeq int64
- var tailSeq int64
- var size int64
- var err error
-
- headSeq, tailSeq, size, _, err = db.lGetMeta(mk)
- if err != nil {
- return 0
+ wb.Delete(mk)
+ if size > 0 {
+ db.IncrTableKeyCount(table, -1, wb)
+ }
+ db.topLargeCollKeys.Update(key, int(0))
+ if db.cfg.ExpirationPolicy == common.WaitCompact {
+ // for compact ttl , we can just delete the meta
+ return size
}
+ rk := keyInfo.VerKey
startKey := lEncodeListKey(table, rk, headSeq)
stopKey := lEncodeListKey(table, rk, tailSeq)
@@ -511,7 +513,10 @@ func (db *RockDB) lDelete(key []byte, wb *gorocksdb.WriteBatch) int64 {
if size > RangeDeleteNum {
wb.DeleteRange(startKey, stopKey)
} else {
- rit, err := NewDBRangeIterator(db.eng, startKey, stopKey, common.RangeClose, false)
+ opts := engine.IteratorOpts{
+ Range: engine.Range{Min: startKey, Max: stopKey, Type: common.RangeClose},
+ }
+ rit, err := db.NewDBRangeIteratorWithOpts(opts)
if err != nil {
return 0
}
@@ -523,25 +528,20 @@ func (db *RockDB) lDelete(key []byte, wb *gorocksdb.WriteBatch) int64 {
// delete range is [left, right), so we need delete end
wb.Delete(stopKey)
- if size > 0 {
- db.IncrTableKeyCount(table, -1, wb)
- }
-
- wb.Delete(mk)
return size
}
-func (db *RockDB) lGetMeta(ek []byte) (headSeq int64, tailSeq int64, size int64, ts int64, err error) {
- var v []byte
- v, err = db.eng.GetBytes(db.defaultReadOpts, ek)
- if err != nil {
- return
- } else if v == nil {
+func parseListMeta(v []byte) (headSeq int64, tailSeq int64, size int64, ts int64, err error) {
+ if len(v) == 0 {
headSeq = listInitialSeq
tailSeq = listInitialSeq
size = 0
return
} else {
+ if len(v) < 16 {
+ err = errListMeta
+ return
+ }
headSeq = int64(binary.BigEndian.Uint64(v[0:8]))
tailSeq = int64(binary.BigEndian.Uint64(v[8:16]))
if len(v) >= 24 {
@@ -552,71 +552,84 @@ func (db *RockDB) lGetMeta(ek []byte) (headSeq int64, tailSeq int64, size int64,
return
}
-func (db *RockDB) lSetMeta(ek []byte, headSeq int64, tailSeq int64, ts int64, wb *gorocksdb.WriteBatch) (int64, error) {
+func encodeListMeta(oldh *headerMetaValue, headSeq int64, tailSeq int64, ts int64) []byte {
+ buf := make([]byte, 24)
+ binary.BigEndian.PutUint64(buf[0:8], uint64(headSeq))
+ binary.BigEndian.PutUint64(buf[8:16], uint64(tailSeq))
+ binary.BigEndian.PutUint64(buf[16:24], uint64(ts))
+ oldh.UserData = buf
+ nv := oldh.encodeWithData()
+ return nv
+}
+
+func (db *RockDB) lSetMeta(key []byte, oldh *headerMetaValue, headSeq int64, tailSeq int64, ts int64, wb engine.WriteBatch) (int64, error) {
+ metaKey := lEncodeMetaKey(key)
size := tailSeq - headSeq + 1
if size < 0 {
- // todo : log error + panic
- dbLog.Warningf("list %v invalid meta sequence range [%d, %d]", string(ek), headSeq, tailSeq)
+ dbLog.Warningf("list %v invalid meta sequence range [%d, %d]", string(key), headSeq, tailSeq)
return 0, errListSeq
} else if size == 0 {
- wb.Delete(ek)
+ wb.Delete(metaKey)
} else {
- buf := make([]byte, 24)
- binary.BigEndian.PutUint64(buf[0:8], uint64(headSeq))
- binary.BigEndian.PutUint64(buf[8:16], uint64(tailSeq))
- binary.BigEndian.PutUint64(buf[16:24], uint64(ts))
- wb.Put(ek, buf)
+ buf := encodeListMeta(oldh, headSeq, tailSeq, ts)
+ wb.Put(metaKey, buf)
}
return size, nil
}
-func (db *RockDB) LIndex(key []byte, index int64) ([]byte, error) {
- if err := checkKeySize(key); err != nil {
- return nil, err
+func (db *RockDB) lHeaderAndMeta(ts int64, key []byte, useLock bool) (collVerKeyInfo, int64, int64, int64, int64, error) {
+ keyInfo, err := db.GetCollVersionKey(ts, ListType, key, useLock)
+ if err != nil {
+ return keyInfo, 0, 0, 0, 0, err
}
+ headSeq, tailSeq, size, ts, err := parseListMeta(keyInfo.MetaData())
+ return keyInfo, headSeq, tailSeq, size, ts, err
+}
- var seq int64
- var headSeq int64
- var tailSeq int64
- var err error
-
- metaKey := lEncodeMetaKey(key)
-
- headSeq, tailSeq, _, _, err = db.lGetMeta(metaKey)
+func (db *RockDB) LIndex(key []byte, index int64) ([]byte, error) {
+ ts := time.Now().UnixNano()
+ keyInfo, headSeq, tailSeq, _, _, err := db.lHeaderAndMeta(ts, key, true)
if err != nil {
return nil, err
}
+ if keyInfo.IsNotExistOrExpired() {
+ return nil, nil
+ }
+ table := keyInfo.Table
+ rk := keyInfo.VerKey
+ var seq int64
if index >= 0 {
seq = headSeq + index
} else {
seq = tailSeq + index + 1
}
-
- sk, err := convertRedisKeyToDBListKey(key, seq)
- if err != nil {
- return nil, err
+ if seq < headSeq || seq > tailSeq {
+ return nil, nil
}
- return db.eng.GetBytes(db.defaultReadOpts, sk)
+ sk := lEncodeListKey(table, rk, seq)
+ return db.GetBytes(sk)
}
func (db *RockDB) LVer(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
+ keyInfo, err := db.GetCollVersionKey(0, ListType, key, true)
+ if err != nil {
return 0, err
}
-
- ek := lEncodeMetaKey(key)
- _, _, _, ts, err := db.lGetMeta(ek)
+ _, _, _, ts, err := parseListMeta(keyInfo.MetaData())
return ts, err
}
func (db *RockDB) LLen(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
+ ts := time.Now().UnixNano()
+ keyInfo, err := db.GetCollVersionKey(ts, ListType, key, true)
+ if err != nil {
return 0, err
}
-
- ek := lEncodeMetaKey(key)
- _, _, size, _, err := db.lGetMeta(ek)
+ if keyInfo.IsNotExistOrExpired() {
+ return 0, nil
+ }
+ _, _, size, _, err := parseListMeta(keyInfo.MetaData())
return int64(size), err
}
@@ -641,7 +654,7 @@ func (db *RockDB) LTrimBack(ts int64, key []byte, trimSize int64) (int64, error)
}
func (db *RockDB) LPush(ts int64, key []byte, args ...[]byte) (int64, error) {
- if len(args) >= MAX_BATCH_NUM {
+ if len(args) > MAX_BATCH_NUM {
return 0, errTooMuchBatchSize
}
return db.lpush(ts, key, listHeadSeq, args...)
@@ -650,24 +663,20 @@ func (db *RockDB) LSet(ts int64, key []byte, index int64, value []byte) error {
if err := checkKeySize(key); err != nil {
return err
}
-
- var seq int64
- var headSeq int64
- var tailSeq int64
- var size int64
- var err error
- metaKey := lEncodeMetaKey(key)
-
- headSeq, tailSeq, size, _, err = db.lGetMeta(metaKey)
+ keyInfo, headSeq, tailSeq, size, _, err := db.lHeaderAndMeta(ts, key, false)
if err != nil {
return err
}
+ if keyInfo.IsNotExistOrExpired() {
+ return errListIndex
+ }
+ table := keyInfo.Table
+ rk := keyInfo.VerKey
if size == 0 {
return errListIndex
}
- db.wb.Clear()
- wb := db.wb
+ var seq int64
if index >= 0 {
seq = headSeq + index
} else {
@@ -676,13 +685,11 @@ func (db *RockDB) LSet(ts int64, key []byte, index int64, value []byte) error {
if seq < headSeq || seq > tailSeq {
return errListIndex
}
- sk, err := convertRedisKeyToDBListKey(key, seq)
- if err != nil {
- return err
- }
- db.lSetMeta(metaKey, headSeq, tailSeq, ts, wb)
+ wb := db.wb
+ sk := lEncodeListKey(table, rk, seq)
+ db.lSetMeta(key, keyInfo.OldHeader, headSeq, tailSeq, ts, wb)
wb.Put(sk, value)
- err = db.eng.Write(db.defaultWriteOpts, wb)
+ err = db.CommitBatchWrite()
return err
}
@@ -690,17 +697,16 @@ func (db *RockDB) LRange(key []byte, start int64, stop int64) ([][]byte, error)
if err := checkKeySize(key); err != nil {
return nil, err
}
-
- var headSeq int64
- var tailSeq int64
- var llen int64
- var err error
-
- metaKey := lEncodeMetaKey(key)
-
- if headSeq, tailSeq, llen, _, err = db.lGetMeta(metaKey); err != nil {
+ ts := time.Now().UnixNano()
+ keyInfo, headSeq, tailSeq, llen, _, err := db.lHeaderAndMeta(ts, key, true)
+ if err != nil {
return nil, err
}
+ if keyInfo.IsNotExistOrExpired() {
+ return [][]byte{}, nil
+ }
+ table := keyInfo.Table
+ rk := keyInfo.VerKey
if start < 0 {
start = llen + start
@@ -721,22 +727,18 @@ func (db *RockDB) LRange(key []byte, start int64, stop int64) ([][]byte, error)
}
limit := (stop - start) + 1
- if limit >= MAX_BATCH_NUM {
+ if limit > MAX_BATCH_NUM {
return nil, errTooMuchBatchSize
}
headSeq += start
+ // TODO: use pool for large alloc
v := make([][]byte, 0, limit)
- startKey, err := convertRedisKeyToDBListKey(key, headSeq)
- if err != nil {
- return nil, err
- }
- stopKey, err := convertRedisKeyToDBListKey(key, tailSeq)
- if err != nil {
- return nil, err
- }
- rit, err := NewDBRangeLimitIterator(db.eng, startKey, stopKey, common.RangeClose, 0, int(limit), false)
+ startKey := lEncodeListKey(table, rk, headSeq)
+ stopKey := lEncodeListKey(table, rk, tailSeq)
+
+ rit, err := db.NewDBRangeLimitIterator(startKey, stopKey, common.RangeClose, 0, int(limit), false)
if err != nil {
return nil, err
}
@@ -744,10 +746,6 @@ func (db *RockDB) LRange(key []byte, start int64, stop int64) ([][]byte, error)
v = append(v, rit.Value())
}
rit.Close()
- if int64(len(v)) < llen && int64(len(v)) < limit {
- dbLog.Infof("list %v range count %v not match llen: %v, meta: %v, %v",
- string(key), len(v), llen, headSeq, tailSeq)
- }
return v, nil
}
@@ -756,40 +754,40 @@ func (db *RockDB) RPop(ts int64, key []byte) ([]byte, error) {
}
func (db *RockDB) RPush(ts int64, key []byte, args ...[]byte) (int64, error) {
- if len(args) >= MAX_BATCH_NUM {
+ if len(args) > MAX_BATCH_NUM {
return 0, errTooMuchBatchSize
}
return db.lpush(ts, key, listTailSeq, args...)
}
-func (db *RockDB) LClear(key []byte) (int64, error) {
+func (db *RockDB) LClear(ts int64, key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
- db.wb.Clear()
- num := db.lDelete(key, db.wb)
+ num := db.lDelete(ts, key, db.wb)
+ //delete the expire data related to the list key
+ db.delExpire(ListType, key, nil, false, db.wb)
+ err := db.CommitBatchWrite()
+ // num should be the deleted key number
if num > 0 {
- //delete the expire data related to the list key
- db.delExpire(ListType, key, db.wb)
+ return 1, err
}
- err := db.eng.Write(db.defaultWriteOpts, db.wb)
- return num, err
+ return 0, err
}
func (db *RockDB) LMclear(keys ...[]byte) (int64, error) {
- if len(keys) >= MAX_BATCH_NUM {
+ if len(keys) > MAX_BATCH_NUM {
return 0, errTooMuchBatchSize
}
- db.wb.Clear()
for _, key := range keys {
if err := checkKeySize(key); err != nil {
return 0, err
}
- db.lDelete(key, db.wb)
- db.delExpire(ListType, key, db.wb)
+ db.lDelete(0, key, db.wb)
+ db.delExpire(ListType, key, nil, false, db.wb)
}
- err := db.eng.Write(db.defaultWriteOpts, db.wb)
+ err := db.CommitBatchWrite()
if err != nil {
// TODO: log here , the list maybe corrupt
}
@@ -797,8 +795,8 @@ func (db *RockDB) LMclear(keys ...[]byte) (int64, error) {
return int64(len(keys)), err
}
-func (db *RockDB) lMclearWithBatch(wb *gorocksdb.WriteBatch, keys ...[]byte) error {
- if len(keys) >= MAX_BATCH_NUM {
+func (db *RockDB) lMclearWithBatch(wb engine.WriteBatch, keys ...[]byte) error {
+ if len(keys) > MAX_BATCH_NUM {
return errTooMuchBatchSize
}
@@ -806,53 +804,20 @@ func (db *RockDB) lMclearWithBatch(wb *gorocksdb.WriteBatch, keys ...[]byte) err
if err := checkKeySize(key); err != nil {
return err
}
- db.lDelete(key, wb)
- db.delExpire(ListType, key, wb)
+ db.lDelete(0, key, wb)
+ db.delExpire(ListType, key, nil, false, wb)
}
return nil
}
func (db *RockDB) LKeyExists(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return 0, err
- }
- sk := lEncodeMetaKey(key)
- v, err := db.eng.GetBytes(db.defaultReadOpts, sk)
- if v != nil && err == nil {
- return 1, nil
- }
- return 0, err
+ return db.collKeyExists(ListType, key)
}
-func (db *RockDB) LExpire(key []byte, duration int64) (int64, error) {
- if exists, err := db.LKeyExists(key); err != nil || exists != 1 {
- return 0, err
- } else {
- if err2 := db.expire(ListType, key, duration); err2 != nil {
- return 0, err2
- } else {
- return 1, nil
- }
- }
+func (db *RockDB) LExpire(ts int64, key []byte, duration int64) (int64, error) {
+ return db.collExpire(ts, ListType, key, duration)
}
-func (db *RockDB) LPersist(key []byte) (int64, error) {
- if exists, err := db.LKeyExists(key); err != nil || exists != 1 {
- return 0, err
- }
-
- if ttl, err := db.ttl(ListType, key); err != nil || ttl < 0 {
- return 0, err
- }
-
- db.wb.Clear()
- if err := db.delExpire(ListType, key, db.wb); err != nil {
- return 0, err
- } else {
- if err2 := db.eng.Write(db.defaultWriteOpts, db.wb); err2 != nil {
- return 0, err2
- } else {
- return 1, nil
- }
- }
+func (db *RockDB) LPersist(ts int64, key []byte) (int64, error) {
+ return db.collPersist(ts, ListType, key)
}
diff --git a/rockredis/t_list_test.go b/rockredis/t_list_test.go
index f8df532b..d013a001 100644
--- a/rockredis/t_list_test.go
+++ b/rockredis/t_list_test.go
@@ -26,7 +26,7 @@ func TestListCodec(t *testing.T) {
t.Fatal(string(k))
}
- ek, _ = convertRedisKeyToDBListKey(key, 1024)
+ ek = lEncodeListKey([]byte("test"), []byte("key"), 1024)
if tb, k, seq, err := lDecodeListKey(ek); err != nil {
t.Fatal(err)
} else if string(k) != "key" {
@@ -46,7 +46,7 @@ func TestListTrim(t *testing.T) {
key := []byte("test:test_list_trim")
init := func() {
- db.LClear(key)
+ db.LClear(0, key)
for i := 0; i < 100; i++ {
n, err := db.RPush(0, key, []byte(strconv.Itoa(i)))
if err != nil {
@@ -126,7 +126,41 @@ func TestListTrim(t *testing.T) {
if string(v) != "97" {
t.Fatal("wrong value", string(v))
}
- // TODO: LTrimFront, LTrimBack
+ err = db.LTrim(0, key, 10, 1)
+ assert.Nil(t, err)
+ n, err := db.LLen(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ vlist, err := db.LRange(key, 0, 100)
+ assert.Nil(t, err)
+ assert.Equal(t, int(0), len(vlist))
+ init()
+ n, err = db.LLen(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(100), n)
+
+ err = db.LTrim(0, key, 1000, 10000)
+ assert.Nil(t, err)
+ n, err = db.LLen(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ vlist, err = db.LRange(key, 0, 100)
+ assert.Nil(t, err)
+ assert.Equal(t, int(0), len(vlist))
+
+ init()
+ n, err = db.LLen(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(100), n)
+
+ err = db.LTrim(0, key, 2, 1)
+ assert.Nil(t, err)
+ n, err = db.LLen(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ vlist, err = db.LRange(key, 0, 100)
+ assert.Nil(t, err)
+ assert.Equal(t, int(0), len(vlist))
}
func TestDBList(t *testing.T) {
@@ -136,11 +170,13 @@ func TestDBList(t *testing.T) {
key := []byte("test:testdb_list_a")
- if n, err := db.RPush(0, key, []byte("1"), []byte("2"), []byte("3")); err != nil {
- t.Fatal(err)
- } else if n != 3 {
- t.Fatal(n)
- }
+ n, err := db.RPush(0, key, []byte("1"), []byte("2"), []byte("3"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(3), n)
+
+ llen, err := db.LLen(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(3), llen)
if ay, err := db.LRange(key, 0, -1); err != nil {
t.Fatal(err)
@@ -154,33 +190,37 @@ func TestDBList(t *testing.T) {
}
}
- if k, err := db.RPop(0, key); err != nil {
- t.Fatal(err)
- } else if string(k) != "3" {
- t.Fatal(string(k))
- }
+ k, err := db.RPop(0, key)
+ assert.Nil(t, err)
+ assert.Equal(t, "3", string(k))
- if k, err := db.LPop(0, key); err != nil {
- t.Fatal(err)
- } else if string(k) != "1" {
- t.Fatal(string(k))
- }
+ llen, err = db.LLen(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), llen)
- if llen, err := db.LLen(key); err != nil {
- t.Fatal(err)
- } else if llen != 1 {
- t.Fatal(llen)
- }
+ k, err = db.LPop(0, key)
+ assert.Nil(t, err)
+ assert.Equal(t, "1", string(k))
+
+ ay, err := db.LRange(key, 0, -1)
+ assert.Nil(t, err)
+ t.Log(ay)
+ assert.Equal(t, 1, len(ay))
+ assert.Equal(t, "2", string(ay[0]))
- if num, err := db.LClear(key); err != nil {
+ llen, err = db.LLen(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), llen)
+
+ if num, err := db.LClear(0, key); err != nil {
t.Fatal(err)
} else if num != 1 {
t.Error(num)
}
- if llen, _ := db.LLen(key); llen != 0 {
- t.Fatal(llen)
- }
+ llen, err = db.LLen(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), llen)
// TODO: LSet
}
@@ -356,7 +396,7 @@ func TestListLPushRPop(t *testing.T) {
assert.Nil(t, err)
atomic.AddInt32(&pushed, 1)
time.Sleep(time.Microsecond * time.Duration(r.Int31n(1000)))
- if time.Since(start) > time.Second*10 {
+ if time.Since(start) > time.Second*3 {
break
}
}
@@ -374,7 +414,7 @@ func TestListLPushRPop(t *testing.T) {
atomic.AddInt32(&poped, 1)
}
time.Sleep(time.Microsecond * time.Duration(r.Int31n(1000)))
- if time.Since(start) > time.Second*10 {
+ if time.Since(start) > time.Second*3 {
break
}
}
@@ -387,3 +427,91 @@ func TestListLPushRPop(t *testing.T) {
assert.True(t, pushed >= poped)
assert.Equal(t, int64(pushed-poped), length)
}
+
+func TestDBListClearInCompactTTL(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ key := []byte("test:testdb_list_clear_compact_a")
+ member := []byte("member")
+ memberNew := []byte("memberNew")
+
+ ts := time.Now().UnixNano()
+ db.RPush(ts, key, member)
+ db.RPush(ts, key, member)
+
+ n, err := db.LLen(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
+
+ v, err := db.LIndex(key, 0)
+ assert.Nil(t, err)
+ assert.Equal(t, member, v)
+
+ vlist, err := db.LRange(key, 0, 100)
+ assert.Nil(t, err)
+ assert.Equal(t, member, vlist[0])
+ assert.Equal(t, member, vlist[1])
+ assert.Equal(t, int(n), len(vlist))
+
+ ts = time.Now().UnixNano()
+ n, err = db.LClear(ts, key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.LLen(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ n, err = db.LKeyExists(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ v, err = db.LIndex(key, 0)
+ assert.Nil(t, err)
+ assert.Nil(t, v)
+
+ vlist, err = db.LRange(key, 0, 100)
+ assert.Nil(t, err)
+ assert.Equal(t, int(0), len(vlist))
+
+ ts = time.Now().UnixNano()
+ v, err = db.LPop(ts, key)
+ assert.Nil(t, err)
+ assert.Nil(t, v)
+
+ // renew
+ ts = time.Now().UnixNano()
+ db.RPush(ts, key, memberNew)
+
+ n, err = db.LLen(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ v, err = db.LIndex(key, 0)
+ assert.Nil(t, err)
+ assert.Equal(t, memberNew, v)
+
+ vlist, err = db.LRange(key, 0, 100)
+ assert.Nil(t, err)
+ assert.Equal(t, memberNew, vlist[0])
+ assert.Equal(t, int(n), len(vlist))
+}
+
+func BenchmarkListAddAndLtrim(b *testing.B) {
+ db := getTestDBForBench()
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ key := []byte("test:list_addtrim_bench")
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ db.RPush(0, key, []byte(strconv.Itoa(i)))
+ }
+ for i := 100; i >= 1; i-- {
+ db.LTrim(0, key, 0, int64(i))
+ }
+ b.StopTimer()
+}
diff --git a/rockredis/t_set.go b/rockredis/t_set.go
index d33e1bbf..49e5041f 100644
--- a/rockredis/t_set.go
+++ b/rockredis/t_set.go
@@ -3,31 +3,20 @@ package rockredis
import (
"encoding/binary"
"errors"
+ "time"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/gorocksdb"
+ ps "github.com/prometheus/client_golang/prometheus"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/engine"
+ "github.com/youzan/ZanRedisDB/metric"
+ "github.com/youzan/ZanRedisDB/slow"
)
var (
- errSetKey = errors.New("invalid set key")
- errSSizeKey = errors.New("invalid ssize key")
- errSetMemberSize = errors.New("invalid set member size")
+ errSetKey = errors.New("invalid set key")
+ errSSizeKey = errors.New("invalid ssize key")
)
-const (
- setStartSep byte = ':'
- setStopSep byte = setStartSep + 1
-)
-
-func checkSetKMSize(key []byte, member []byte) error {
- if len(key) > MaxKeySize || len(key) == 0 {
- return errKeySize
- } else if len(member) > MaxSetMemberSize {
- return errSetMemberSize
- }
- return nil
-}
-
func sEncodeSizeKey(key []byte) []byte {
buf := make([]byte, len(key)+1+len(metaPrefix))
@@ -52,63 +41,18 @@ func sDecodeSizeKey(ek []byte) ([]byte, error) {
return ek[pos:], nil
}
-func convertRedisKeyToDBSKey(key []byte, member []byte) ([]byte, error) {
- table, rk, err := extractTableFromRedisKey(key)
- if err != nil {
- return nil, err
- }
- if err := checkSetKMSize(rk, member); err != nil {
- return nil, err
- }
- dbKey := sEncodeSetKey(table, rk, member)
- return dbKey, nil
-}
-
func sEncodeSetKey(table []byte, key []byte, member []byte) []byte {
- buf := make([]byte, getDataTablePrefixBufLen(SetType, table)+len(key)+len(member)+1+2)
-
- pos := encodeDataTablePrefixToBuf(buf, SetType, table)
-
- binary.BigEndian.PutUint16(buf[pos:], uint16(len(key)))
- pos += 2
-
- copy(buf[pos:], key)
- pos += len(key)
-
- buf[pos] = setStartSep
- pos++
- copy(buf[pos:], member)
-
- return buf
+ return encodeCollSubKey(SetType, table, key, member)
}
func sDecodeSetKey(ek []byte) ([]byte, []byte, []byte, error) {
- table, pos, err := decodeDataTablePrefixFromBuf(ek, SetType)
-
+ dt, table, key, member, err := decodeCollSubKey(ek)
if err != nil {
return nil, nil, nil, err
}
-
- if pos+2 > len(ek) {
- return nil, nil, nil, errSetKey
- }
-
- keyLen := int(binary.BigEndian.Uint16(ek[pos:]))
- pos += 2
-
- if keyLen+pos > len(ek) {
- return table, nil, nil, errSetKey
+ if dt != SetType {
+ return table, key, member, errCollTypeMismatch
}
-
- key := ek[pos : pos+keyLen]
- pos += keyLen
-
- if ek[pos] != hashStartSep {
- return table, nil, nil, errSetKey
- }
-
- pos++
- member := ek[pos:]
return table, key, member, nil
}
@@ -118,56 +62,70 @@ func sEncodeStartKey(table []byte, key []byte) []byte {
func sEncodeStopKey(table []byte, key []byte) []byte {
k := sEncodeSetKey(table, key, nil)
-
- k[len(k)-1] = setStopSep
-
+ k[len(k)-1] = collStopSep
return k
}
-func (db *RockDB) sDelete(key []byte, wb *gorocksdb.WriteBatch) int64 {
- table, rk, err := extractTableFromRedisKey(key)
- if len(table) == 0 {
- return 0
- }
-
+func (db *RockDB) sDelete(tn int64, key []byte, wb engine.WriteBatch) (int64, error) {
sk := sEncodeSizeKey(key)
- start := sEncodeStartKey(table, rk)
- stop := sEncodeStopKey(table, rk)
-
- num, err := db.SCard(key)
+ keyInfo, err := db.getCollVerKeyForRange(tn, SetType, key, false)
+ if err != nil {
+ return 0, err
+ }
+ // no need delete if expired
+ if keyInfo.IsNotExistOrExpired() {
+ return 0, nil
+ }
+ num, err := db.sGetSize(tn, key, false)
if err != nil {
- return 0
+ return 0, err
}
+ if num == 0 {
+ return 0, nil
+ }
+ if num > 0 {
+ db.IncrTableKeyCount(keyInfo.Table, -1, wb)
+ }
+ wb.Delete(sk)
+
+ db.topLargeCollKeys.Update(key, int(0))
+ if db.cfg.ExpirationPolicy == common.WaitCompact {
+ // for compact ttl , we can just delete the meta
+ return num, nil
+ }
+ start := keyInfo.RangeStart
+ stop := keyInfo.RangeEnd
+
if num > RangeDeleteNum {
wb.DeleteRange(start, stop)
} else {
- it, err := NewDBRangeIterator(db.eng, start, stop, common.RangeROpen, false)
+ opts := engine.IteratorOpts{
+ Range: engine.Range{Min: start, Max: stop, Type: common.RangeROpen},
+ }
+ it, err := db.NewDBRangeIteratorWithOpts(opts)
if err != nil {
- return 0
+ return 0, err
}
for ; it.Valid(); it.Next() {
wb.Delete(it.RefKey())
}
it.Close()
}
- if num > 0 {
- db.IncrTableKeyCount(table, -1, wb)
- db.delExpire(SetType, key, wb)
- }
- wb.Delete(sk)
- return num
+ _, err = db.delExpire(SetType, key, nil, false, wb)
+ if err != nil {
+ return 0, err
+ }
+ return num, nil
}
// size key include set size and set modify timestamp
-func (db *RockDB) sIncrSize(ts int64, key []byte, delta int64, wb *gorocksdb.WriteBatch) (int64, error) {
+func (db *RockDB) sIncrSize(ts int64, key []byte, oldh *headerMetaValue, delta int64, wb engine.WriteBatch) (int64, error) {
sk := sEncodeSizeKey(key)
+ meta := oldh.UserData
var size int64
- meta, err := db.eng.GetBytesNoLock(db.defaultReadOpts, sk)
- if err != nil {
- return 0, err
- }
+ var err error
if len(meta) == 0 {
size = 0
} else if len(meta) < 8 {
@@ -185,113 +143,97 @@ func (db *RockDB) sIncrSize(ts int64, key []byte, delta int64, wb *gorocksdb.Wri
buf := make([]byte, 16)
binary.BigEndian.PutUint64(buf[0:8], uint64(size))
binary.BigEndian.PutUint64(buf[8:16], uint64(ts))
- wb.Put(sk, buf)
+ oldh.UserData = buf
+ nv := oldh.encodeWithData()
+ wb.Put(sk, nv)
}
return size, nil
}
-func (db *RockDB) sGetSize(key []byte) (int64, error) {
+func (db *RockDB) sGetSize(tn int64, key []byte, useLock bool) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
- sk := sEncodeSizeKey(key)
- meta, err := db.eng.GetBytesNoLock(db.defaultReadOpts, sk)
+ oldh, expired, err := db.collHeaderMeta(tn, SetType, key, useLock)
if err != nil {
return 0, err
}
- if len(meta) == 0 {
+ if len(oldh.UserData) == 0 || expired {
return 0, nil
}
- if len(meta) < 8 {
+ if len(oldh.UserData) < 8 {
return 0, errIntNumber
}
- return Int64(meta[:8], err)
+ return Int64(oldh.UserData[:8], err)
}
func (db *RockDB) sGetVer(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
- sk := sEncodeSizeKey(key)
- meta, err := db.eng.GetBytesNoLock(db.defaultReadOpts, sk)
+ oldh, _, err := db.collHeaderMeta(time.Now().UnixNano(), SetType, key, true)
if err != nil {
return 0, err
}
- if len(meta) == 0 {
+ if len(oldh.UserData) == 0 {
return 0, nil
}
- if len(meta) < 16 {
+ if len(oldh.UserData) < 16 {
return 0, errIntNumber
}
- return Int64(meta[8:16], err)
-}
-
-func (db *RockDB) sSetItem(ts int64, key []byte, member []byte, wb *gorocksdb.WriteBatch) (int64, error) {
- table, _, err := extractTableFromRedisKey(key)
- if err != nil {
- return 0, err
- }
-
- ek, err := convertRedisKeyToDBSKey(key, member)
- if err != nil {
- return 0, err
- }
-
- var n int64 = 1
- if v, _ := db.eng.GetBytesNoLock(db.defaultReadOpts, ek); v != nil {
- n = 0
- } else {
- if newNum, err := db.sIncrSize(ts, key, 1, wb); err != nil {
- return 0, err
- } else if newNum == 1 {
- db.IncrTableKeyCount(table, 1, wb)
- }
- }
-
- wb.Put(ek, nil)
- return n, nil
+ return Int64(oldh.UserData[8:16], err)
}
func (db *RockDB) SAdd(ts int64, key []byte, args ...[]byte) (int64, error) {
- if len(args) >= MAX_BATCH_NUM {
+ if len(args) > MAX_BATCH_NUM {
return 0, errTooMuchBatchSize
}
- table, rk, _ := extractTableFromRedisKey(key)
- if len(table) == 0 {
- return 0, errTableName
- }
wb := db.wb
- wb.Clear()
+ defer wb.Clear()
+
+ keyInfo, err := db.prepareCollKeyForWrite(ts, SetType, key, nil)
+ if err != nil {
+ return 0, err
+ }
+ table := keyInfo.Table
+ rk := keyInfo.VerKey
+ oldh := keyInfo.OldHeader
- var err error
var ek []byte
var num int64 = 0
for i := 0; i < len(args); i++ {
- if err := checkSetKMSize(key, args[i]); err != nil {
+ if err := checkCollKFSize(key, args[i]); err != nil {
return 0, err
}
ek = sEncodeSetKey(table, rk, args[i])
- // TODO: how to tell not found and nil value (member value is also nil)
- if v, err := db.eng.GetBytesNoLock(db.defaultReadOpts, ek); err != nil {
+ // must use exist to tell the different of not found and nil value (member value is also nil)
+ if vok, err := db.ExistNoLock(ek); err != nil {
return 0, err
- } else if v == nil {
+ } else if !vok {
num++
+ wb.Put(ek, nil)
}
- wb.Put(ek, nil)
}
- if newNum, err := db.sIncrSize(ts, key, num, wb); err != nil {
+ newNum, err := db.sIncrSize(ts, key, oldh, num, wb)
+ if err != nil {
return 0, err
- } else if newNum > 0 && newNum == num {
+ } else if newNum > 0 && newNum == num && !keyInfo.Expired {
db.IncrTableKeyCount(table, 1, wb)
}
+ db.topLargeCollKeys.Update(key, int(newNum))
+ slow.LogLargeCollection(int(newNum), slow.NewSlowLogInfo(string(table), string(key), "set"))
+ if newNum > collectionLengthForMetric {
+ metric.CollectionLenDist.With(ps.Labels{
+ "table": string(table),
+ }).Observe(float64(newNum))
+ }
- err = db.eng.Write(db.defaultWriteOpts, wb)
+ err = db.rockEng.Write(wb)
return num, err
-
}
func (db *RockDB) SGetVer(key []byte) (int64, error) {
@@ -299,59 +241,91 @@ func (db *RockDB) SGetVer(key []byte) (int64, error) {
}
func (db *RockDB) SCard(key []byte) (int64, error) {
- return db.sGetSize(key)
+ tn := time.Now().UnixNano()
+ return db.sGetSize(tn, key, true)
}
-func (db *RockDB) SKeyExists(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
+func (db *RockDB) SIsMember(key []byte, member []byte) (int64, error) {
+ tn := time.Now().UnixNano()
+ keyInfo, err := db.GetCollVersionKey(tn, SetType, key, true)
+ if err != nil {
return 0, err
}
- sk := sEncodeSizeKey(key)
- v, err := db.eng.GetBytes(db.defaultReadOpts, sk)
- if v != nil && err == nil {
- return 1, nil
+ if keyInfo.IsNotExistOrExpired() {
+ return 0, nil
}
- return 0, err
-}
-
-func (db *RockDB) SIsMember(key []byte, member []byte) (int64, error) {
- ek, err := convertRedisKeyToDBSKey(key, member)
- if err != nil {
+ if err := common.CheckSubKey(member); err != nil {
return 0, err
}
+ table := keyInfo.Table
+ rk := keyInfo.VerKey
+ ek := sEncodeSetKey(table, rk, member)
var n int64 = 1
- if v, err := db.eng.GetBytes(db.defaultReadOpts, ek); err != nil {
+ if vok, err := db.Exist(ek); err != nil {
return 0, err
- } else if v == nil {
+ } else if !vok {
n = 0
}
return n, nil
}
func (db *RockDB) SMembers(key []byte) ([][]byte, error) {
- num, err := db.sGetSize(key)
+ tn := time.Now().UnixNano()
+ num, err := db.sGetSize(tn, key, true)
if err != nil {
return nil, err
}
- return db.sMembersN(key, int(num))
+ if num == 0 {
+ return nil, nil
+ }
+ return db.sMembersN(tn, key, int(num))
+}
+
+// we do not use rand here
+func (db *RockDB) SRandMembers(key []byte, count int64) ([][]byte, error) {
+ tn := time.Now().UnixNano()
+ return db.sMembersN(tn, key, int(count))
}
-func (db *RockDB) sMembersN(key []byte, num int) ([][]byte, error) {
+func (db *RockDB) sMembersN(tn int64, key []byte, num int) ([][]byte, error) {
if num > MAX_BATCH_NUM {
return nil, errTooMuchBatchSize
}
- table, rk, err := extractTableFromRedisKey(key)
+ if num <= 0 {
+ return nil, common.ErrInvalidArgs
+ }
+
+ keyInfo, err := db.getCollVerKeyForRange(tn, SetType, key, true)
if err != nil {
return nil, err
}
- start := sEncodeStartKey(table, rk)
- stop := sEncodeStopKey(table, rk)
+ if keyInfo.IsNotExistOrExpired() {
+ return [][]byte{}, nil
+ }
+ preAlloc := num
+ oldh := keyInfo.OldHeader
+ if len(oldh.UserData) < 8 {
+ return nil, errIntNumber
+ }
+ n, err := Int64(oldh.UserData[:8], nil)
+ if err != nil {
+ return nil, err
+ }
+ if n == 0 {
+ return [][]byte{}, nil
+ }
+ if n > 0 && n < int64(preAlloc) {
+ preAlloc = int(n)
+ }
+ // TODO: use pool for large alloc
+ v := make([][]byte, 0, preAlloc)
- v := make([][]byte, 0, num)
+ start := keyInfo.RangeStart
+ stop := keyInfo.RangeEnd
- it, err := NewDBRangeIterator(db.eng, start, stop, common.RangeROpen, false)
+ it, err := db.NewDBRangeIterator(start, stop, common.RangeROpen, false)
if err != nil {
return nil, err
}
@@ -370,7 +344,7 @@ func (db *RockDB) sMembersN(key []byte, num int) ([][]byte, error) {
}
func (db *RockDB) SPop(ts int64, key []byte, count int) ([][]byte, error) {
- vals, err := db.sMembersN(key, count)
+ vals, err := db.sMembersN(ts, key, count)
if err != nil {
return nil, err
}
@@ -380,27 +354,36 @@ func (db *RockDB) SPop(ts int64, key []byte, count int) ([][]byte, error) {
}
func (db *RockDB) SRem(ts int64, key []byte, args ...[]byte) (int64, error) {
- table, rk, _ := extractTableFromRedisKey(key)
- if len(table) == 0 {
- return 0, errTableName
+ if len(args) == 0 {
+ return 0, nil
+ }
+ if len(args) > MAX_BATCH_NUM {
+ return 0, errTooMuchBatchSize
}
-
wb := db.wb
- wb.Clear()
+ defer wb.Clear()
+ keyInfo, err := db.GetCollVersionKey(ts, SetType, key, false)
+ if err != nil {
+ return 0, err
+ }
+ table := keyInfo.Table
+ rk := keyInfo.VerKey
+ oldh := keyInfo.OldHeader
var ek []byte
- var v []byte
- var err error
var num int64 = 0
for i := 0; i < len(args); i++ {
- if err := checkSetKMSize(key, args[i]); err != nil {
+ if err := checkCollKFSize(key, args[i]); err != nil {
return 0, err
}
ek = sEncodeSetKey(table, rk, args[i])
- v, err = db.eng.GetBytesNoLock(db.defaultReadOpts, ek)
- if v == nil {
+ vok, err := db.ExistNoLock(ek)
+ if err != nil {
+ return 0, err
+ }
+ if !vok {
continue
} else {
num++
@@ -408,88 +391,94 @@ func (db *RockDB) SRem(ts int64, key []byte, args ...[]byte) (int64, error) {
}
}
- if newNum, err := db.sIncrSize(ts, key, -num, wb); err != nil {
+ newNum, err := db.sIncrSize(ts, key, oldh, -num, wb)
+ if err != nil {
return 0, err
- } else if num > 0 && newNum == 0 {
+ }
+
+ if num > 0 && newNum == 0 {
db.IncrTableKeyCount(table, -1, wb)
- db.delExpire(SetType, key, wb)
}
+ if newNum == 0 {
+ _, err := db.delExpire(SetType, key, nil, false, wb)
+ if err != nil {
+ return 0, err
+ }
+ }
+ db.topLargeCollKeys.Update(key, int(newNum))
- err = db.eng.Write(db.defaultWriteOpts, wb)
+ err = db.rockEng.Write(wb)
return num, err
}
-func (db *RockDB) SClear(key []byte) (int64, error) {
+func (db *RockDB) SClear(ts int64, key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
- wb := db.wb
- wb.Clear()
- num := db.sDelete(key, wb)
- err := db.eng.Write(db.defaultWriteOpts, wb)
- return num, err
+ num, err := db.sDelete(ts, key, db.wb)
+ if err != nil {
+ return 0, err
+ }
+ err = db.CommitBatchWrite()
+ if num > 0 {
+ return 1, err
+ }
+ return 0, err
}
func (db *RockDB) SMclear(keys ...[]byte) (int64, error) {
- if len(keys) >= MAX_BATCH_NUM {
+ if len(keys) > MAX_BATCH_NUM {
return 0, errTooMuchBatchSize
}
- wb := gorocksdb.NewWriteBatch()
+ wb := db.rockEng.NewWriteBatch()
defer wb.Destroy()
+ cnt := 0
for _, key := range keys {
if err := checkKeySize(key); err != nil {
return 0, err
}
- db.sDelete(key, wb)
+ n, err := db.sDelete(0, key, wb)
+ if err != nil {
+ return 0, err
+ }
+ if n > 0 {
+ cnt++
+ }
}
- err := db.eng.Write(db.defaultWriteOpts, wb)
- return int64(len(keys)), err
+ err := db.rockEng.Write(wb)
+ return int64(cnt), err
}
-func (db *RockDB) sMclearWithBatch(wb *gorocksdb.WriteBatch, keys ...[]byte) error {
- if len(keys) >= MAX_BATCH_NUM {
+func (db *RockDB) sMclearWithBatch(wb engine.WriteBatch, keys ...[]byte) error {
+ if len(keys) > MAX_BATCH_NUM {
return errTooMuchBatchSize
}
for _, key := range keys {
if err := checkKeySize(key); err != nil {
return err
}
- db.sDelete(key, wb)
+ _, err := db.sDelete(0, key, wb)
+ if err != nil {
+ return err
+ }
}
return nil
}
-func (db *RockDB) SExpire(key []byte, duration int64) (int64, error) {
- if exists, err := db.SKeyExists(key); err != nil || exists != 1 {
+func (db *RockDB) SKeyExists(key []byte) (int64, error) {
+ if err := checkKeySize(key); err != nil {
return 0, err
- } else {
- if err2 := db.expire(SetType, key, duration); err2 != nil {
- return 0, err2
- } else {
- return 1, nil
- }
}
-}
-func (db *RockDB) SPersist(key []byte) (int64, error) {
- if exists, err := db.SKeyExists(key); err != nil || exists != 1 {
- return 0, err
- }
+ return db.collKeyExists(SetType, key)
+}
- if ttl, err := db.ttl(SetType, key); err != nil || ttl < 0 {
- return 0, err
- }
+func (db *RockDB) SExpire(ts int64, key []byte, duration int64) (int64, error) {
+ return db.collExpire(ts, SetType, key, duration)
+}
- db.wb.Clear()
- if err := db.delExpire(SetType, key, db.wb); err != nil {
- return 0, err
- } else {
- if err2 := db.eng.Write(db.defaultWriteOpts, db.wb); err2 != nil {
- return 0, err2
- } else {
- return 1, nil
- }
- }
+func (db *RockDB) SPersist(ts int64, key []byte) (int64, error) {
+ return db.collPersist(ts, SetType, key)
}
diff --git a/rockredis/t_set_test.go b/rockredis/t_set_test.go
index d31e75d9..94432f83 100644
--- a/rockredis/t_set_test.go
+++ b/rockredis/t_set_test.go
@@ -2,9 +2,27 @@ package rockredis
import (
"os"
+ "path"
+ "strconv"
"testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/common"
)
+func convertRedisKeyToDBSKey(key []byte, member []byte) ([]byte, error) {
+ table, rk, err := extractTableFromRedisKey(key)
+ if err != nil {
+ return nil, err
+ }
+ if err := checkCollKFSize(rk, member); err != nil {
+ return nil, err
+ }
+ dbKey := sEncodeSetKey(table, rk, member)
+ return dbKey, nil
+}
+
func TestSetCodec(t *testing.T) {
db := getTestDB(t)
defer os.RemoveAll(db.cfg.DataDir)
@@ -81,6 +99,9 @@ func TestDBSetWithEmptyMember(t *testing.T) {
} else if len(v) != 0 {
t.Fatal(string(v[0]))
}
+ n, err := db.SCard(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
}
func TestDBSet(t *testing.T) {
@@ -100,6 +121,12 @@ func TestDBSet(t *testing.T) {
} else if n != 1 {
t.Fatal(n)
}
+ tbcnt, err := db.GetTableKeyCount([]byte("test"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), tbcnt)
+ n, err := db.SAdd(0, key, member)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
if cnt, err := db.SCard(key); err != nil {
t.Fatal(err)
@@ -127,25 +154,168 @@ func TestDBSet(t *testing.T) {
db.SAdd(0, key1, member1, member2)
- if n, err := db.SClear(key1); err != nil {
+ tbcnt, err = db.GetTableKeyCount([]byte("test"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), tbcnt)
+ if n, err := db.SClear(0, key1); err != nil {
t.Fatal(err)
- } else if n != 2 {
+ } else if n != 1 {
t.Fatal(n)
}
+ tbcnt, err = db.GetTableKeyCount([]byte("test"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), tbcnt)
db.SAdd(0, key1, member1, member2)
db.SAdd(0, key2, member1, member2, []byte("xxx"))
if n, _ := db.SCard(key2); n != 3 {
t.Fatal(n)
}
+
+ tbcnt, err = db.GetTableKeyCount([]byte("test"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), tbcnt)
if n, err := db.SMclear(key1, key2); err != nil {
t.Fatal(err)
} else if n != 2 {
t.Fatal(n)
}
+ n, err = db.SCard(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.SCard(key2)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ tbcnt, err = db.GetTableKeyCount([]byte("test"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), tbcnt)
db.SAdd(0, key2, member1, member2)
+
+ tbcnt, err = db.GetTableKeyCount([]byte("test"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), tbcnt)
+
+ n, err = db.SCard(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.SCard(key2)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
+}
+
+func TestDBSetClearInCompactTTL(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ key := []byte("test:testdb_set_clear_compact_a")
+ member := []byte("member")
+ memberNew := []byte("memberNew")
+ key1 := []byte("test:testdb_set_clear_compact_a1")
+ member1 := []byte("testdb_set_m1")
+ member2 := []byte("testdb_set_m2")
+
+ ts := time.Now().UnixNano()
+ db.SAdd(ts, key, member)
+ db.SAdd(ts, key, member)
+
+ n, err := db.SCard(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.SIsMember(key, member)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ v, err := db.SMembers(key)
+ assert.Nil(t, err)
+ assert.Equal(t, member, v[0])
+
+ ts = time.Now().UnixNano()
+ n, err = db.SClear(ts, key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.SCard(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ n, err = db.SIsMember(key, member)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ v, err = db.SMembers(key)
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(v))
+
+ // renew
+ ts = time.Now().UnixNano()
+ db.SAdd(ts, key, memberNew)
+ n, err = db.SCard(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.SIsMember(key, member)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.SIsMember(key, memberNew)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ v, err = db.SMembers(key)
+ assert.Nil(t, err)
+ assert.Equal(t, memberNew, v[0])
+
+ ts = time.Now().UnixNano()
+ db.SAdd(ts, key1, member1, member2)
+
+ ts = time.Now().UnixNano()
+ n, err = db.SClear(ts, key1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.SCard(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ n, err = db.SIsMember(key1, member1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ v, err = db.SMembers(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(v))
+
+ n, err = db.SCard(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ ts = time.Now().UnixNano()
+ db.SAdd(ts, key1, member, memberNew)
+
+ n, err = db.SCard(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
+
+ n, err = db.SIsMember(key1, member1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.SIsMember(key1, member2)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.SIsMember(key1, member)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ n, err = db.SIsMember(key1, memberNew)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ v, err = db.SMembers(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, 2, len(v))
}
func TestSKeyExists(t *testing.T) {
@@ -166,7 +336,6 @@ func TestSKeyExists(t *testing.T) {
} else if n != 1 {
t.Fatal("invalid value ", n)
}
-
}
func TestDBSPop(t *testing.T) {
@@ -202,4 +371,66 @@ func TestDBSPop(t *testing.T) {
if vals, _ := db.SMembers(key); len(vals) != 0 {
t.Errorf("should empty set")
}
+
+ n, err := db.SCard(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+}
+
+func BenchmarkSIsMember(b *testing.B) {
+ db := getTestDBForBench()
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+ key := []byte("test:sismember_bench")
+
+ for i := 0; i < b.N+1000; i++ {
+ if i%2 == 0 {
+ continue
+ }
+ db.SAdd(0, key, []byte("hello"+strconv.Itoa(i)))
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ db.SIsMember(key, []byte("hello"+strconv.Itoa(i)))
+ }
+ b.StopTimer()
+}
+
+func BenchmarkSAddAndSPop(b *testing.B) {
+ db := getTestDBForBench()
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+ key := []byte("test:spop_bench")
+
+ b.ResetTimer()
+
+ stopC := make(chan bool)
+ go func() {
+ tmp := path.Join(db.cfg.DataDir, "snapshot")
+ os.MkdirAll(tmp, common.DIR_PERM)
+ for i := 0; ; i++ {
+ db.SIsMember(key, []byte("hello"+strconv.Itoa(i%b.N)))
+ db.SMembers(key)
+ //ck, _ := db.rockEng.NewCheckpoint(false)
+ //ck.Save(tmp, nil)
+ select {
+ case <-stopC:
+ return
+ default:
+ }
+ }
+ }()
+ for i := 0; i < b.N+1000; i++ {
+ db.SAdd(0, key, []byte("hello"+strconv.Itoa(i)))
+ if i%121 == 0 {
+ db.SPop(0, key, 100)
+ }
+ }
+
+ for i := 0; i < b.N; i++ {
+ db.SPop(0, key, 100)
+ }
+ close(stopC)
+ b.StopTimer()
}
diff --git a/rockredis/t_table.go b/rockredis/t_table.go
index 3fd7a0cc..09844b85 100644
--- a/rockredis/t_table.go
+++ b/rockredis/t_table.go
@@ -5,8 +5,8 @@ import (
"encoding/binary"
"errors"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/gorocksdb"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/engine"
)
// Note: since different data structure has different prefix,
@@ -54,8 +54,7 @@ func extractTableFromRedisKey(key []byte) ([]byte, []byte, error) {
}
func packRedisKey(table, key []byte) []byte {
- var newKey []byte
-
+ newKey := make([]byte, 0, len(table)+len(key)+1)
newKey = append(newKey, table...)
newKey = append(newKey, tableStartSep)
newKey = append(newKey, key...)
@@ -168,7 +167,7 @@ func (db *RockDB) GetTables() [][]byte {
ch := make([][]byte, 0, 100)
s := encodeTableMetaStartKey()
e := encodeTableMetaStopKey()
- it, err := NewDBRangeIterator(db.eng, s, e, common.RangeOpen, false)
+ it, err := db.NewDBRangeIterator(s, e, common.RangeOpen, false)
if err != nil {
return nil
}
@@ -184,7 +183,7 @@ func (db *RockDB) GetTables() [][]byte {
return ch
}
-func (db *RockDB) DelTableKeyCount(table []byte, wb *gorocksdb.WriteBatch) error {
+func (db *RockDB) DelTableKeyCount(table []byte, wb engine.WriteBatch) error {
if !db.cfg.EnableTableCounter {
return nil
}
@@ -193,7 +192,7 @@ func (db *RockDB) DelTableKeyCount(table []byte, wb *gorocksdb.WriteBatch) error
return nil
}
-func (db *RockDB) IncrTableKeyCount(table []byte, delta int64, wb *gorocksdb.WriteBatch) error {
+func (db *RockDB) IncrTableKeyCount(table []byte, delta int64, wb engine.WriteBatch) error {
if !db.cfg.EnableTableCounter {
return nil
}
@@ -206,7 +205,7 @@ func (db *RockDB) GetTableKeyCount(table []byte) (int64, error) {
tm := encodeTableMetaKey(table)
var err error
var size uint64
- if size, err = GetRocksdbUint64(db.eng.GetBytes(db.defaultReadOpts, tm)); err != nil {
+ if size, err = GetRocksdbUint64(db.GetBytes(tm)); err != nil {
}
return int64(size), err
}
@@ -250,7 +249,7 @@ func (db *RockDB) GetHsetIndexTables() [][]byte {
ch := make([][]byte, 0, 100)
s := encodeTableIndexMetaStartKey(hsetIndexMeta)
e := encodeTableIndexMetaStopKey(hsetIndexMeta)
- it, err := NewDBRangeIterator(db.eng, s, e, common.RangeOpen, false)
+ it, err := db.NewDBRangeIterator(s, e, common.RangeOpen, false)
if err != nil {
return nil
}
@@ -270,15 +269,15 @@ func (db *RockDB) GetHsetIndexTables() [][]byte {
func (db *RockDB) GetTableHsetIndexValue(table []byte) ([]byte, error) {
key := encodeTableIndexMetaKey(table, hsetIndexMeta)
- return db.eng.GetBytes(db.defaultReadOpts, key)
+ return db.GetBytes(key)
}
func (db *RockDB) SetTableHsetIndexValue(table []byte, value []byte) error {
// this may not run in raft loop
// so we should use new db write batch here
key := encodeTableIndexMetaKey(table, hsetIndexMeta)
- wb := gorocksdb.NewWriteBatch()
+ wb := db.rockEng.NewWriteBatch()
defer wb.Destroy()
wb.Put(key, value)
- return db.eng.Write(db.defaultWriteOpts, wb)
+ return db.rockEng.Write(wb)
}
diff --git a/rockredis/t_ttl.go b/rockredis/t_ttl.go
index 82373b2e..9347e841 100644
--- a/rockredis/t_ttl.go
+++ b/rockredis/t_ttl.go
@@ -7,8 +7,8 @@ import (
"sync"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/gorocksdb"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/engine"
)
var (
@@ -97,38 +97,92 @@ type expiredMetaBuffer interface {
}
type expiration interface {
- rawExpireAt(byte, []byte, int64, *gorocksdb.WriteBatch) error
- expireAt(byte, []byte, int64) error
- ttl(byte, []byte) (int64, error)
- delExpire(byte, []byte, *gorocksdb.WriteBatch) error
+ // key here should be full key in ns:table:realK
+ getRawValueForHeader(ts int64, dt byte, key []byte) ([]byte, error)
+ ExpireAt(dt byte, key []byte, rawValue []byte, when int64) (int64, error)
+ rawExpireAt(dt byte, key []byte, rawValue []byte, when int64, wb engine.WriteBatch) ([]byte, error)
+ // should be called only in read operation
+ ttl(ts int64, dt byte, key []byte, rawValue []byte) (int64, error)
+ // if in raft write loop should avoid lock, otherwise lock should be used
+ // should mark as not expired if the value is not exist
+ isExpired(ts int64, dt byte, key []byte, rawValue []byte, useLock bool) (bool, error)
+ decodeRawValue(dt byte, rawValue []byte) (*headerMetaValue, error)
+ encodeToRawValue(dt byte, h *headerMetaValue) []byte
+ delExpire(dt byte, key []byte, rawValue []byte, keepValue bool, wb engine.WriteBatch) ([]byte, error)
+ renewOnExpired(ts int64, dt byte, key []byte, oldh *headerMetaValue)
check(common.ExpiredDataBuffer, chan struct{}) error
Start()
Stop()
Destroy()
+
+ // key here should be key without namespace in table:realK
+ encodeToVersionKey(dt byte, h *headerMetaValue, key []byte) []byte
+ decodeFromVersionKey(dt byte, vk []byte) ([]byte, int64, error)
}
-func (db *RockDB) expire(dataType byte, key []byte, duration int64) error {
- return db.expiration.expireAt(dataType, key, time.Now().Unix()+duration)
+func (db *RockDB) expire(ts int64, dataType byte, key []byte, rawValue []byte, duration int64) (int64, error) {
+ var err error
+ if rawValue == nil {
+ rawValue, err = db.expiration.getRawValueForHeader(ts, dataType, key)
+ if err != nil {
+ return 0, err
+ }
+ }
+ return db.expiration.ExpireAt(dataType, key, rawValue, ts/int64(time.Second)+duration)
}
func (db *RockDB) KVTtl(key []byte) (t int64, err error) {
- return db.ttl(KVType, key)
+ tn := time.Now().UnixNano()
+ v, err := db.expiration.getRawValueForHeader(tn, KVType, key)
+ if err != nil {
+ return -1, err
+ }
+ return db.ttl(tn, KVType, key, v)
}
func (db *RockDB) HashTtl(key []byte) (t int64, err error) {
- return db.ttl(HashType, key)
+ tn := time.Now().UnixNano()
+ v, err := db.expiration.getRawValueForHeader(tn, HashType, key)
+ if err != nil {
+ return -1, err
+ }
+ return db.ttl(tn, HashType, key, v)
+}
+
+func (db *RockDB) BitTtl(key []byte) (t int64, err error) {
+ tn := time.Now().UnixNano()
+ v, err := db.expiration.getRawValueForHeader(tn, BitmapType, key)
+ if err != nil {
+ return -1, err
+ }
+ return db.ttl(tn, BitmapType, key, v)
}
func (db *RockDB) ListTtl(key []byte) (t int64, err error) {
- return db.ttl(ListType, key)
+ tn := time.Now().UnixNano()
+ v, err := db.expiration.getRawValueForHeader(tn, ListType, key)
+ if err != nil {
+ return -1, err
+ }
+ return db.ttl(tn, ListType, key, v)
}
func (db *RockDB) SetTtl(key []byte) (t int64, err error) {
- return db.ttl(SetType, key)
+ tn := time.Now().UnixNano()
+ v, err := db.expiration.getRawValueForHeader(tn, SetType, key)
+ if err != nil {
+ return -1, err
+ }
+ return db.ttl(tn, SetType, key, v)
}
func (db *RockDB) ZSetTtl(key []byte) (t int64, err error) {
- return db.ttl(ZSetType, key)
+ tn := time.Now().UnixNano()
+ v, err := db.expiration.getRawValueForHeader(tn, ZSetType, key)
+ if err != nil {
+ return -1, err
+ }
+ return db.ttl(tn, ZSetType, key, v)
}
type TTLChecker struct {
@@ -160,6 +214,11 @@ func (c *TTLChecker) setNextCheckTime(when int64, force bool) {
c.Unlock()
}
+// do not run while iterator or in other db read lock
+func (c *TTLChecker) compactTTLMeta() {
+ c.db.CompactOldExpireData()
+}
+
func (c *TTLChecker) check(expiredBuf expiredMetaBuffer, stop chan struct{}) (err error) {
defer func() {
if e := recover(); e != nil {
@@ -189,7 +248,7 @@ func (c *TTLChecker) check(expiredBuf expiredMetaBuffer, stop chan struct{}) (er
var scanned int64
checkStart := time.Now()
- it, err := NewDBRangeLimitIterator(c.db.eng, minKey, maxKey,
+ it, err := c.db.NewDBRangeLimitIterator(minKey, maxKey,
common.RangeROpen, 0, -1, false)
if err != nil {
c.setNextCheckTime(now, false)
@@ -200,6 +259,7 @@ func (c *TTLChecker) check(expiredBuf expiredMetaBuffer, stop chan struct{}) (er
}
defer it.Close()
+ tableStats := make(map[string]int, 100)
for ; it.Valid(); it.Next() {
if scanned%100 == 0 {
select {
@@ -243,13 +303,27 @@ func (c *TTLChecker) check(expiredBuf expiredMetaBuffer, stop chan struct{}) (er
nc = now
break
}
+ table, _, inerr := extractTableFromRedisKey(k)
+ if inerr == nil && len(tableStats) < 100 {
+ n, _ := tableStats[string(table)]
+ n++
+ tableStats[string(table)] = n
+ }
}
c.setNextCheckTime(nc, true)
- checkCost := time.Since(checkStart).Nanoseconds() / 1000
- if dbLog.Level() >= common.LOG_DEBUG || eCount > 10000 || checkCost >= time.Second.Nanoseconds() {
- dbLog.Infof("[%d/%d] keys have expired during ttl checking, cost:%d us", eCount, scanned, checkCost)
+ checkCost := time.Since(checkStart)
+ if dbLog.Level() >= common.LOG_DEBUG || eCount > 10000 || checkCost >= time.Second {
+ dbLog.Infof("[%d/%d] keys have expired during ttl checking, cost:%s, tables: %v", eCount, scanned, checkCost, len(tableStats))
+ // only log the stats if the expired data is in small tables (which means most are in the same table)
+ if len(tableStats) < 10 || dbLog.Level() >= common.LOG_DEBUG {
+ dbLog.Infof("expired table stats %v", tableStats)
+ }
+ if checkCost > time.Minute/2 {
+ // too long scan, maybe compact to reduce the cpu cost
+ err = errTTLCheckTooLong
+ }
}
return err
diff --git a/rockredis/t_ttl_c.go b/rockredis/t_ttl_c.go
deleted file mode 100644
index 39d4ae3c..00000000
--- a/rockredis/t_ttl_c.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package rockredis
-
-import (
- "time"
-
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/gorocksdb"
-)
-
-type consistencyExpiration struct {
- *TTLChecker
- db *RockDB
-}
-
-func newConsistencyExpiration(db *RockDB) *consistencyExpiration {
- exp := &consistencyExpiration{
- db: db,
- TTLChecker: newTTLChecker(db),
- }
- return exp
-}
-
-func (exp *consistencyExpiration) expireAt(dataType byte, key []byte, when int64) error {
- mk := expEncodeMetaKey(dataType, key)
-
- wb := exp.db.wb
- wb.Clear()
-
- if t, err := Int64(exp.db.eng.GetBytes(exp.db.defaultReadOpts, mk)); err != nil {
- return err
- } else if t != 0 {
- wb.Delete(expEncodeTimeKey(dataType, key, t))
- }
-
- tk := expEncodeTimeKey(dataType, key, when)
-
- wb.Put(tk, mk)
- wb.Put(mk, PutInt64(when))
-
- if err := exp.db.eng.Write(exp.db.defaultWriteOpts, wb); err != nil {
- return err
- } else {
- exp.setNextCheckTime(when, false)
- return nil
- }
-}
-
-func (exp *consistencyExpiration) rawExpireAt(dataType byte, key []byte, when int64, wb *gorocksdb.WriteBatch) error {
- mk := expEncodeMetaKey(dataType, key)
-
- if t, err := Int64(exp.db.eng.GetBytes(exp.db.defaultReadOpts, mk)); err != nil {
- return err
- } else if t != 0 {
- wb.Delete(expEncodeTimeKey(dataType, key, t))
- }
-
- tk := expEncodeTimeKey(dataType, key, when)
-
- wb.Put(tk, mk)
- wb.Put(mk, PutInt64(when))
-
- exp.setNextCheckTime(when, false)
- return nil
-}
-
-func (exp *consistencyExpiration) ttl(dataType byte, key []byte) (int64, error) {
- mk := expEncodeMetaKey(dataType, key)
-
- t, err := Int64(exp.db.eng.GetBytes(exp.db.defaultReadOpts, mk))
- if err != nil || t == 0 {
- t = -1
- } else {
- t -= time.Now().Unix()
- if t <= 0 {
- t = -1
- }
- //TODO, if the key has expired, remove it right now
- // if t == -1 : to remove ????
- }
- return t, err
-}
-
-func (exp *consistencyExpiration) Start() {
-}
-
-func (exp *consistencyExpiration) Destroy() {
-}
-
-func (exp *consistencyExpiration) Stop() {
-}
-
-func (exp *consistencyExpiration) delExpire(dataType byte, key []byte, wb *gorocksdb.WriteBatch) error {
- mk := expEncodeMetaKey(dataType, key)
-
- if t, err := Int64(exp.db.eng.GetBytes(exp.db.defaultReadOpts, mk)); err != nil {
- return err
- } else if t == 0 {
- return nil
- } else {
- tk := expEncodeTimeKey(dataType, key, t)
- wb.Delete(tk)
- wb.Delete(mk)
- return nil
- }
-}
-
-type expiredBufferWrapper struct {
- internal common.ExpiredDataBuffer
-}
-
-func (wrapper *expiredBufferWrapper) Write(meta *expiredMeta) error {
- if dt, key, _, err := expDecodeTimeKey(meta.timeKey); err != nil {
- return err
- } else {
- return wrapper.internal.Write(dataType2CommonType(dt), key)
- }
-}
-
-func (exp *consistencyExpiration) check(buffer common.ExpiredDataBuffer, stop chan struct{}) error {
- wrapper := &expiredBufferWrapper{internal: buffer}
- return exp.TTLChecker.check(wrapper, stop)
-}
diff --git a/rockredis/t_ttl_c_test.go b/rockredis/t_ttl_c_test.go
deleted file mode 100644
index 106a5053..00000000
--- a/rockredis/t_ttl_c_test.go
+++ /dev/null
@@ -1,385 +0,0 @@
-package rockredis
-
-import (
- "bytes"
- "math/rand"
- "os"
- "strconv"
- "testing"
- "time"
-
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/gorocksdb"
-)
-
-func TestKVTTL_C(t *testing.T) {
- db := getTestDBWithExpirationPolicy(t, common.ConsistencyDeletion)
- defer os.RemoveAll(db.cfg.DataDir)
- defer db.Close()
-
- key1 := []byte("test:testdbTTL_kv_c")
- var ttl1 int64 = rand.Int63()
-
- if v, err := db.Expire(key1, ttl1); err != nil {
- t.Fatal(err)
- } else if v != 0 {
- t.Fatal("return value from expire of not exist key != 0")
- }
-
- if v, err := db.Persist(key1); err != nil {
- t.Fatal(err)
- } else if v != 0 {
- t.Fatal("return value from persist of not exist key != 0")
- }
-
- if err := db.KVSet(0, key1, []byte("hello world 1")); err != nil {
- t.Fatal(err)
- }
-
- if v, err := db.Expire(key1, ttl1); err != nil {
- t.Fatal(err)
- } else if v != 1 {
- t.Fatal("return value from expire != 1")
- }
-
- if v, err := db.KVTtl(key1); err != nil {
- t.Fatal(err)
- } else if v != ttl1 {
- t.Fatal("ttl != expire")
- }
-
- if v, err := db.Persist(key1); err != nil {
- t.Fatal(err)
- } else if v != 1 {
- t.Fatal("return value from persist != 1")
- }
-
- if v, err := db.KVTtl(key1); err != nil {
- t.Fatal(err)
- } else if v != -1 {
- t.Fatal("KVPersist do not clear the ttl")
- }
-
- testValue := []byte("test value for SetEx command")
- if err := db.SetEx(0, key1, ttl1, testValue); err != nil {
- t.Fatal(err)
- }
-
- if v, err := db.KVGet(key1); err != nil {
- t.Fatal(err)
- } else if !bytes.Equal(v, testValue) {
- t.Fatal("SetEx: gotten value != set value")
- }
-
- if v, err := db.KVTtl(key1); err != nil {
- t.Fatal(err)
- } else if v != ttl1 {
- t.Fatal("ttl != setex")
- }
-}
-
-func TestHashTTL_C(t *testing.T) {
- db := getTestDBWithExpirationPolicy(t, common.ConsistencyDeletion)
- defer os.RemoveAll(db.cfg.DataDir)
- defer db.Close()
-
- hashKey := []byte("test:testdbTTL_hash_c")
- var hashTTL int64 = rand.Int63()
-
- if v, err := db.HashTtl(hashKey); err != nil {
- t.Fatal(err)
- } else if v != -1 {
- t.Fatal("ttl of not exist hash key is not -1")
- }
-
- if v, err := db.HExpire(hashKey, hashTTL); err != nil {
- t.Fatal(err)
- } else if v != 0 {
- t.Fatal("return value from expire of not exist hash key != 0")
- }
-
- if v, err := db.HPersist(hashKey); err != nil {
- t.Fatal(err)
- } else if v != 0 {
- t.Fatal("return value from hpersist of not exist hash key != 0")
- }
-
- hash_val := []common.KVRecord{
- {Key: []byte("field0"), Value: []byte("value0")},
- {Key: []byte("field1"), Value: []byte("value1")},
- {Key: []byte("field2"), Value: []byte("value2")},
- }
-
- if err := db.HMset(0, hashKey, hash_val...); err != nil {
- t.Fatal(err)
- }
-
- if v, err := db.HExpire(hashKey, hashTTL); err != nil {
- t.Fatal(err)
- } else if v != 1 {
- t.Fatal("return value from hexpire != 1")
- }
-
- if v, err := db.HashTtl(hashKey); err != nil {
- t.Fatal(err)
- } else if v != hashTTL {
- t.Fatal("ttl != expire")
- }
-
- if v, err := db.HPersist(hashKey); err != nil {
- t.Fatal(err)
- } else if v != 1 {
- t.Fatal("return value from hpersist is != 1")
- }
-
- if v, err := db.HashTtl(hashKey); err != nil {
- t.Fatal(err)
- } else if v != -1 {
- t.Fatal("HashPersist do not clear the ttl")
- }
-}
-
-func TestListTTL_C(t *testing.T) {
- db := getTestDBWithExpirationPolicy(t, common.ConsistencyDeletion)
- defer os.RemoveAll(db.cfg.DataDir)
- defer db.Close()
-
- listKey := []byte("test:testdbTTL_list_c")
- var listTTL int64 = rand.Int63()
-
- if v, err := db.ListTtl(listKey); err != nil {
- t.Fatal(err)
- } else if v != -1 {
- t.Fatal("ttl of not exist list key is not -1")
- }
-
- if v, err := db.LExpire(listKey, listTTL); err != nil {
- t.Fatal(err)
- } else if v != 0 {
- t.Fatal("return value from expire of not exist list key != 0")
- }
-
- if v, err := db.LPersist(listKey); err != nil {
- t.Fatal(err)
- } else if v != 0 {
- t.Fatal("return value from lpersist of not exist list key != 0")
- }
-
- if _, err := db.LPush(0, listKey, []byte("this"), []byte("is"), []byte("list"),
- []byte("ttl"), []byte("test")); err != nil {
- t.Fatal(err)
- }
-
- if v, err := db.LExpire(listKey, listTTL); err != nil {
- t.Fatal(err)
- } else if v != 1 {
- t.Fatal("return value from lexpire != 1")
- }
-
- if v, err := db.ListTtl(listKey); err != nil {
- t.Fatal(err)
- } else if v != listTTL {
- t.Fatal("ttl != expire")
- }
-
- if v, err := db.LPersist(listKey); err != nil {
- t.Fatal(err)
- } else if v != 1 {
- t.Fatal("return value from lpersist != 1")
- }
-
- if v, err := db.ListTtl(listKey); err != nil {
- t.Fatal(err)
- } else if v != -1 {
- t.Fatal("ListPersist do not clear the ttl")
- }
-}
-
-func TestSetTTL_C(t *testing.T) {
- db := getTestDBWithExpirationPolicy(t, common.ConsistencyDeletion)
- defer os.RemoveAll(db.cfg.DataDir)
- defer db.Close()
-
- setKey := []byte("test:testdbTTL_set_c")
- var setTTL int64 = rand.Int63()
-
- if v, err := db.SetTtl(setKey); err != nil {
- t.Fatal(err)
- } else if v != -1 {
- t.Fatal("ttl of not exist set key is not -1")
- }
-
- if v, err := db.SExpire(setKey, setTTL); err != nil {
- t.Fatal(err)
- } else if v != 0 {
- t.Fatal("return value from expire of not exist set key != 0")
- }
-
- if v, err := db.SPersist(setKey); err != nil {
- t.Fatal(err)
- } else if v != 0 {
- t.Fatal("return value from spersist of not exist set key != 0")
- }
-
- if _, err := db.SAdd(0, setKey, []byte("this"), []byte("is"), []byte("set"),
- []byte("ttl"), []byte("test")); err != nil {
- t.Fatal(err)
- }
-
- if v, err := db.SExpire(setKey, setTTL); err != nil {
- t.Fatal(err)
- } else if v != 1 {
- t.Fatal("return value from sexpire != 1")
- }
-
- if v, err := db.SetTtl(setKey); err != nil {
- t.Fatal(err)
- } else if v != setTTL {
- t.Fatal("ttl != expire")
- }
-
- if v, err := db.SPersist(setKey); err != nil {
- t.Fatal(err)
- } else if v != 1 {
- t.Fatal("return value from spersist!= 1")
- }
-
- if v, err := db.SetTtl(setKey); err != nil {
- t.Fatal(err)
- } else if v != -1 {
- t.Fatal("SetPersist do not clear the ttl")
- }
-}
-
-func TestZSetTTL_C(t *testing.T) {
- db := getTestDBWithExpirationPolicy(t, common.ConsistencyDeletion)
- defer os.RemoveAll(db.cfg.DataDir)
- defer db.Close()
-
- zsetKey := []byte("test:testdbTTL_zset_c")
- var zsetTTL int64 = rand.Int63()
-
- if v, err := db.ZSetTtl(zsetKey); err != nil {
- t.Fatal(err)
- } else if v != -1 {
- t.Fatal("ttl of not exist zset key is not -1")
- }
-
- if v, err := db.ZExpire(zsetKey, zsetTTL); err != nil {
- t.Fatal(err)
- } else if v != 0 {
- t.Fatal("return value from expire of not exist zset key != 0")
- }
-
- if v, err := db.ZPersist(zsetKey); err != nil {
- t.Fatal(err)
- } else if v != 0 {
- t.Fatal("return value from zpersist of not exist zset key != 0")
- }
-
- members := []common.ScorePair{
- {Member: []byte("member1"), Score: 10},
- {Member: []byte("member2"), Score: 20},
- {Member: []byte("member3"), Score: 30},
- {Member: []byte("member4"), Score: 40},
- }
-
- if _, err := db.ZAdd(0, zsetKey, members...); err != nil {
- t.Fatal(err)
- }
-
- if v, err := db.ZExpire(zsetKey, zsetTTL); err != nil {
- t.Fatal(err)
- } else if v != 1 {
- t.Fatal("return value from zexpire != 1")
- }
-
- if v, err := db.ZSetTtl(zsetKey); err != nil {
- t.Fatal(err)
- } else if v != zsetTTL {
- t.Fatal("ttl != expire")
- }
-
- if v, err := db.ZPersist(zsetKey); err != nil {
- t.Fatal(err)
- } else if v != 1 {
- t.Fatal("return value from zpersist != 1")
- }
-
- if v, err := db.ZSetTtl(zsetKey); err != nil {
- t.Fatal(err)
- } else if v != -1 {
- t.Fatal("ZSetPersist do not clear the ttl")
- }
-}
-
-type TExpiredDataBuffer struct {
- db *RockDB
- wb *gorocksdb.WriteBatch
- kTypeMap map[string]byte
- expiredCount int
- t *testing.T
-}
-
-func (buff *TExpiredDataBuffer) Write(dt common.DataType, key []byte) error {
- buff.expiredCount += 1
- if kt, ok := buff.kTypeMap[string(key)]; !ok {
- buff.t.Fatalf("unknown expired key: %v", string(key))
- } else if dataType2CommonType(kt) != dt {
- buff.t.Fatalf("mismatched key-type, %s - %d, should be [%s - %d]", string(key), dt, string(key), dataType2CommonType(kt))
- } else {
- buff.wb.Clear()
- buff.db.delExpire(kt, key, buff.wb)
- buff.db.eng.Write(buff.db.defaultWriteOpts, buff.wb)
- delete(buff.kTypeMap, string(key))
- }
- return nil
-}
-
-func (buff *TExpiredDataBuffer) Full() bool {
- return false
-}
-
-func TestConsistencyTTLChecker(t *testing.T) {
- db := getTestDBWithExpirationPolicy(t, common.ConsistencyDeletion)
- defer os.RemoveAll(db.cfg.DataDir)
- defer db.Close()
-
- kTypeMap := make(map[string]byte)
- dataTypes := []byte{KVType, ListType, HashType, SetType, ZSetType}
-
- for i := 0; i < 10000*3+rand.Intn(10000); i++ {
- key := "test:ttl_checker_consistency:" + strconv.Itoa(i)
- dataType := dataTypes[rand.Int()%len(dataTypes)]
- kTypeMap[key] = dataType
- if err := db.expire(dataType, []byte(key), 2); err != nil {
- t.Fatal(err)
- }
- }
-
- time.Sleep(3 * time.Second)
- buffer := &TExpiredDataBuffer{
- t: t,
- db: db,
- wb: gorocksdb.NewWriteBatch(),
- kTypeMap: kTypeMap,
- }
-
- if err := db.CheckExpiredData(buffer, make(chan struct{})); err != nil {
- t.Fatal(err)
- }
-
- if len(kTypeMap) != 0 {
- t.Fatal("not all keys has expired")
- }
-
- buffer.expiredCount = 0
-
- if err := db.CheckExpiredData(buffer, make(chan struct{})); err != nil {
- t.Fatal(err)
- }
-
- if buffer.expiredCount != 0 {
- t.Fatal("find some keys expired after all the keys stored has expired and deleted")
- }
-}
diff --git a/rockredis/t_ttl_compact.go b/rockredis/t_ttl_compact.go
new file mode 100644
index 00000000..7409a44a
--- /dev/null
+++ b/rockredis/t_ttl_compact.go
@@ -0,0 +1,347 @@
+package rockredis
+
+import (
+ "encoding/binary"
+ "errors"
+ math "math"
+ "time"
+
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/engine"
+)
+
+var errHeaderMetaValue = errors.New("invalid header meta value")
+var errHeaderVersion = errors.New("invalid header version")
+var errExpOverflow = errors.New("expiration time overflow")
+var errInvalidVerKey = errors.New("invalid versioned key")
+
+const headerV1Len = 1 + 4 + 8
+
+type headerMetaValue struct {
+ Ver byte
+ ExpireAt uint32
+ ValueVersion int64
+ UserData []byte
+}
+
+// note the default empty header Ver is used for old header format
+// in old format, only the UserData(which contain the old header data) should be used
+
+func newHeaderMetaV1() *headerMetaValue {
+ return &headerMetaValue{
+ Ver: byte(common.ValueHeaderV1),
+ }
+}
+
+func (h *headerMetaValue) hdlen() int {
+ if h.Ver == byte(common.ValueHeaderV1) {
+ return headerV1Len
+ }
+ return 0
+}
+
+// only useful for compact ttl policy
+func (h *headerMetaValue) ttl(ts int64) int64 {
+ if h.ExpireAt == 0 {
+ return -1
+ }
+ // should not use time now to check ttl, since it may be different on different nodes
+ ttl := int64(h.ExpireAt) - ts/int64(time.Second)
+ if ttl <= 0 {
+ ttl = -1
+ }
+ return ttl
+}
+
+// only useful for compact ttl policy
+func (h *headerMetaValue) isExpired(ts int64) bool {
+ if h.Ver != byte(common.ValueHeaderV1) {
+ return false
+ }
+ if h.ExpireAt == 0 || ts == 0 {
+ return false
+ }
+ // should not use time now to check ttl, since it may be different on different nodes
+ ttl := int64(h.ExpireAt) - ts/int64(time.Second)
+ return ttl <= 0
+}
+
+func (h *headerMetaValue) encodeTo(old []byte) (int, []byte) {
+ switch h.Ver {
+ case byte(0):
+ return 0, old
+ case byte(common.ValueHeaderV1):
+ b := old
+ if len(old) < headerV1Len {
+ b = make([]byte, headerV1Len)
+ }
+ b[0] = h.Ver
+ binary.BigEndian.PutUint32(b[1:], uint32(h.ExpireAt))
+ binary.BigEndian.PutUint64(b[1+4:], uint64(h.ValueVersion))
+ return headerV1Len, b
+ default:
+ panic("unknown value header")
+ }
+}
+
+func (h *headerMetaValue) encodeWithDataTo(old []byte) []byte {
+ var n int
+ n, old = h.encodeTo(old)
+ if h.UserData == nil {
+ return old
+ }
+ copy(old[n:], h.UserData)
+ return old
+}
+
+func (h *headerMetaValue) encodeWithData() []byte {
+ b := make([]byte, h.hdlen()+len(h.UserData))
+ return h.encodeWithDataTo(b)
+}
+
+func (h *headerMetaValue) encode() []byte {
+ b := make([]byte, h.hdlen())
+ _, b = h.encodeTo(b)
+ return b
+}
+
+func (h *headerMetaValue) decode(b []byte) (int, error) {
+ if len(b) < headerV1Len {
+ return 0, errHeaderMetaValue
+ }
+ h.Ver = b[0]
+ if h.Ver != byte(common.ValueHeaderV1) {
+ return 0, errHeaderVersion
+ }
+ h.ExpireAt = binary.BigEndian.Uint32(b[1:])
+ h.ValueVersion = int64(binary.BigEndian.Uint64(b[1+4:]))
+ h.UserData = b[headerV1Len:]
+ return headerV1Len, nil
+}
+
+type compactExpiration struct {
+ db *RockDB
+ localExp *localExpiration
+}
+
+func newCompactExpiration(db *RockDB) *compactExpiration {
+ exp := &compactExpiration{
+ db: db,
+ localExp: newLocalExpiration(db),
+ }
+ return exp
+}
+
+func encodeVerKey(h *headerMetaValue, key []byte) []byte {
+ var b []byte
+ b, _ = EncodeMemCmpKey(b, key, defaultSep, h.ValueVersion, defaultSep)
+ return b
+}
+
+func decodeVerKey(b []byte) ([]byte, int64, error) {
+ vals, err := Decode(b, len(b))
+ if err != nil {
+ return nil, 0, err
+ }
+ if len(vals) < 4 {
+ return nil, 0, errInvalidVerKey
+ }
+ // key, sep, value version, sep
+ key := vals[0].([]byte)
+ ver := vals[2].(int64)
+ return key, ver, nil
+}
+
+func (exp *compactExpiration) encodeToVersionKey(dt byte, h *headerMetaValue, key []byte) []byte {
+ switch dt {
+ case HashType, SetType, BitmapType, ListType, ZSetType:
+ return encodeVerKey(h, key)
+ default:
+ return exp.localExp.encodeToVersionKey(dt, h, key)
+ }
+}
+
+func (exp *compactExpiration) decodeFromVersionKey(dt byte, key []byte) ([]byte, int64, error) {
+ switch dt {
+ case HashType, SetType, BitmapType, ListType, ZSetType:
+ return decodeVerKey(key)
+ default:
+ return exp.localExp.decodeFromVersionKey(dt, key)
+ }
+}
+
+func (exp *compactExpiration) encodeToRawValue(dataType byte, h *headerMetaValue) []byte {
+ switch dataType {
+ case HashType, KVType, SetType, BitmapType, ListType, ZSetType:
+ if h == nil {
+ h = newHeaderMetaV1()
+ }
+ v := h.encodeWithData()
+ return v
+ default:
+ return exp.localExp.encodeToRawValue(dataType, h)
+ }
+}
+
+func (exp *compactExpiration) decodeRawValue(dataType byte, rawValue []byte) (*headerMetaValue, error) {
+ h := newHeaderMetaV1()
+ switch dataType {
+ case HashType, KVType, SetType, BitmapType, ListType, ZSetType:
+ if rawValue == nil {
+ return h, nil
+ }
+ _, err := h.decode(rawValue)
+ if err != nil {
+ return h, err
+ }
+ return h, nil
+ default:
+ return exp.localExp.decodeRawValue(dataType, rawValue)
+ }
+}
+
+func (exp *compactExpiration) getRawValueForHeader(ts int64, dataType byte, key []byte) ([]byte, error) {
+ switch dataType {
+ case KVType:
+ _, _, v, _, err := exp.db.getRawDBKVValue(ts, key, true)
+ if err != nil {
+ return nil, err
+ }
+ if v == nil {
+ return nil, nil
+ }
+ return v, nil
+ case HashType, SetType, BitmapType, ListType, ZSetType:
+ metaKey, _ := encodeMetaKey(dataType, key)
+ return exp.db.GetBytes(metaKey)
+ default:
+ return exp.localExp.getRawValueForHeader(ts, dataType, key)
+ }
+}
+
+func (exp *compactExpiration) isExpired(ts int64, dataType byte, key []byte, rawValue []byte, useLock bool) (bool, error) {
+ switch dataType {
+ case HashType, KVType, SetType, BitmapType, ListType, ZSetType:
+ if rawValue == nil {
+ return false, nil
+ }
+ var h headerMetaValue
+ _, err := h.decode(rawValue)
+ if err != nil {
+ return false, err
+ }
+ return h.isExpired(ts), nil
+ default:
+ return exp.localExp.isExpired(ts, dataType, key, rawValue, useLock)
+ }
+}
+
+func (exp *compactExpiration) ExpireAt(dataType byte, key []byte, rawValue []byte, when int64) (int64, error) {
+ switch dataType {
+ case HashType, KVType, SetType, BitmapType, ListType, ZSetType:
+ wb := exp.db.wb
+ defer wb.Clear()
+ if rawValue == nil {
+ // key not exist
+ return 0, nil
+ }
+ newValue, err := exp.rawExpireAt(dataType, key, rawValue, when, wb)
+ if err != nil {
+ return 0, err
+ }
+ key, err = encodeMetaKey(dataType, key)
+ if err != nil {
+ return 0, err
+ }
+ wb.Put(key, newValue)
+ if err := exp.db.rockEng.Write(wb); err != nil {
+ return 0, err
+ }
+ return 1, nil
+ default:
+ return exp.localExp.ExpireAt(dataType, key, rawValue, when)
+ }
+}
+
+func (exp *compactExpiration) rawExpireAt(dataType byte, key []byte, rawValue []byte, when int64, wb engine.WriteBatch) ([]byte, error) {
+ switch dataType {
+ case HashType, KVType, SetType, BitmapType, ListType, ZSetType:
+ h := newHeaderMetaV1()
+ if when >= int64(math.MaxUint32-1) {
+ return nil, errExpOverflow
+ }
+ _, err := h.decode(rawValue)
+ if err != nil {
+ return nil, err
+ }
+ h.ExpireAt = uint32(when)
+ _, v := h.encodeTo(rawValue)
+ return v, nil
+ default:
+ return exp.localExp.rawExpireAt(dataType, key, rawValue, when, wb)
+ }
+}
+
+func (exp *compactExpiration) ttl(ts int64, dataType byte, key []byte, rawValue []byte) (int64, error) {
+ switch dataType {
+ case KVType, HashType, SetType, BitmapType, ListType, ZSetType:
+ if rawValue == nil {
+ return -1, nil
+ }
+ var h headerMetaValue
+ _, err := h.decode(rawValue)
+ if err != nil {
+ return 0, err
+ }
+ return h.ttl(ts), nil
+ default:
+ return exp.localExp.ttl(ts, dataType, key, rawValue)
+ }
+}
+
+func (exp *compactExpiration) renewOnExpired(ts int64, dataType byte, key []byte, oldh *headerMetaValue) {
+ if oldh == nil {
+ return
+ }
+ switch dataType {
+ case KVType, HashType, SetType, BitmapType, ListType, ZSetType:
+ oldh.ExpireAt = 0
+ oldh.UserData = nil
+ oldh.ValueVersion = ts
+ return
+ default:
+ exp.localExp.renewOnExpired(ts, dataType, key, oldh)
+ }
+}
+
+func (exp *compactExpiration) Start() {
+ exp.localExp.Start()
+}
+
+func (exp *compactExpiration) Destroy() {
+ exp.localExp.Destroy()
+}
+
+func (exp *compactExpiration) Stop() {
+ exp.localExp.Stop()
+}
+
+func (exp *compactExpiration) delExpire(dataType byte, key []byte, rawValue []byte, keepValue bool, wb engine.WriteBatch) ([]byte, error) {
+ switch dataType {
+ case HashType, KVType, SetType, BitmapType, ListType, ZSetType:
+ if !keepValue {
+ return rawValue, nil
+ }
+ newValue, err := exp.rawExpireAt(dataType, key, rawValue, 0, wb)
+ if err != nil {
+ return newValue, err
+ }
+ return newValue, nil
+ default:
+ return exp.localExp.delExpire(dataType, key, rawValue, keepValue, wb)
+ }
+}
+
+func (exp *compactExpiration) check(buffer common.ExpiredDataBuffer, stop chan struct{}) error {
+ return nil
+}
diff --git a/rockredis/t_ttl_compact_test.go b/rockredis/t_ttl_compact_test.go
new file mode 100644
index 00000000..67936d59
--- /dev/null
+++ b/rockredis/t_ttl_compact_test.go
@@ -0,0 +1,2097 @@
+package rockredis
+
+import (
+ "bytes"
+ math "math"
+ "math/rand"
+ "os"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/common"
+)
+
+func TestCompactionFilterInWaitCompact(t *testing.T) {
+ // test case need for
+ // 1. expired in ttl
+ // 2. meta not exist (coll delete)
+ // 3. version changed (coll update)
+ // 4. lazy check should be ensured
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+ key1 := []byte("test:testdbTTL_compactfilter1")
+ key2 := []byte("test:testdbTTL_compactfilter2")
+ tn := time.Now().UnixNano()
+ err := db.KVSet(tn, key1, []byte("hello world 1"))
+ assert.Nil(t, err)
+ err = db.KVSet(tn, key2, []byte("hello world 2"))
+ assert.Nil(t, err)
+
+ hkeyKeep := []byte("test:testdbTTL_compactfilter_hashkeep")
+ hkeyDel := []byte("test:testdbTTL_compactfilter_hashdel")
+ hkeyUpdate := []byte("test:testdbTTL_compactfilter_hashudpate")
+ hkeyExpire := []byte("test:testdbTTL_compactfilter_hashexpire")
+ db.HSet(tn, false, hkeyKeep, []byte("test_hash_keep"), []byte("value_hash_keep"))
+ db.HSet(tn, false, hkeyDel, []byte("test_hash_del"), []byte("value_hash_del"))
+ db.HSet(tn, false, hkeyDel, []byte("test_hash_del2"), []byte("value_hash_del2"))
+ db.HSet(tn, false, hkeyUpdate, []byte("test_hash_update"), []byte("value_hash_update"))
+ db.HSet(tn, false, hkeyExpire, []byte("test_hash_expired"), []byte("value_hash_expire"))
+ db.HSet(tn, false, hkeyExpire, []byte("test_hash_expired2"), []byte("value_hash_expire2"))
+
+ db.Expire(tn, key1, 2)
+ db.Expire(tn, key2, 3)
+ db.HExpire(tn, hkeyExpire, 4)
+ // compact filter should hit all keys
+ db.CompactAllRange()
+ stats := db.GetCompactFilterStats()
+ assert.Equal(t, int64(0), stats.ExpiredCleanCnt)
+ assert.Equal(t, int64(0), stats.DelCleanCnt)
+ assert.Equal(t, int64(0), stats.VersionCleanCnt)
+
+ time.Sleep(time.Millisecond)
+ tn = time.Now().UnixNano()
+ db.DelKeys(key1)
+ db.HClear(tn, hkeyDel)
+ db.HClear(tn, hkeyUpdate)
+ db.HSet(tn, false, hkeyUpdate, []byte("test_hash_update2"), []byte("value_hash_update2"))
+
+ // compact filter should hit all undeleted keys
+ db.CompactAllRange()
+ db.CompactAllRange()
+ stats = db.GetCompactFilterStats()
+ // should clean deleted until lazy period
+ assert.Equal(t, int64(0), stats.ExpiredCleanCnt)
+ assert.Equal(t, int64(0), stats.DelCleanCnt)
+ assert.Equal(t, int64(0), stats.VersionCleanCnt)
+ // wait ttl
+ time.Sleep(time.Second * 5)
+ db.CompactAllRange()
+ db.CompactAllRange()
+ // should clean expired until lazy period
+ stats = db.GetCompactFilterStats()
+ assert.Equal(t, int64(0), stats.ExpiredCleanCnt)
+ // should clean deleted after lazy period (in test is 3s)
+ assert.Equal(t, int64(2), stats.DelCleanCnt)
+ assert.Equal(t, int64(1), stats.VersionCleanCnt)
+
+ // wait lazy check
+ time.Sleep(lazyCleanExpired * 2)
+ db.CompactAllRange()
+ stats = db.GetCompactFilterStats()
+ assert.Equal(t, int64(4), stats.ExpiredCleanCnt)
+ assert.Equal(t, int64(2), stats.DelCleanCnt)
+ assert.Equal(t, int64(1), stats.VersionCleanCnt)
+ hv, err := db.HGet(hkeyUpdate, []byte("test_hash_update2"))
+ assert.Nil(t, err)
+ assert.Equal(t, []byte("value_hash_update2"), hv)
+ hv, err = db.HGet(hkeyUpdate, []byte("test_hash_update"))
+ assert.Nil(t, err)
+ assert.Nil(t, hv)
+ db.CompactAllRange()
+ stats = db.GetCompactFilterStats()
+ assert.Equal(t, int64(4), stats.ExpiredCleanCnt)
+ assert.Equal(t, int64(2), stats.DelCleanCnt)
+ assert.Equal(t, int64(1), stats.VersionCleanCnt)
+}
+
+func TestCompactionFilterInWaitCompactForZSet(t *testing.T) {
+ // test case need for
+ // 1. expired in ttl
+ // 2. meta not exist (coll delete)
+ // 3. version changed (coll update)
+ // 4. lazy check should be ensured
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ tn := time.Now().UnixNano()
+ keyKeep := []byte("test:testdbTTL_compactfilter_keep")
+ keyDel := []byte("test:testdbTTL_compactfilter_del")
+ keyUpdate := []byte("test:testdbTTL_compactfilter_udpate")
+ keyExpire := []byte("test:testdbTTL_compactfilter_expire")
+ db.ZAdd(tn, keyKeep, common.ScorePair{Member: []byte("test_keep"), Score: 1})
+ db.ZAdd(tn, keyDel, common.ScorePair{Member: []byte("test_del"), Score: 1})
+ db.ZAdd(tn, keyDel, common.ScorePair{Member: []byte("test_del2"), Score: 2})
+ db.ZAdd(tn, keyUpdate, common.ScorePair{Member: []byte("test_update"), Score: 1})
+ db.ZAdd(tn, keyExpire, common.ScorePair{Member: []byte("test_expired"), Score: 1})
+ db.ZAdd(tn, keyExpire, common.ScorePair{Member: []byte("test_expired2"), Score: 2})
+
+ db.ZExpire(tn, keyExpire, 4)
+ // compact filter should hit all keys
+ db.CompactAllRange()
+ stats := db.GetCompactFilterStats()
+ assert.Equal(t, int64(0), stats.ExpiredCleanCnt)
+ assert.Equal(t, int64(0), stats.DelCleanCnt)
+ assert.Equal(t, int64(0), stats.VersionCleanCnt)
+
+ time.Sleep(time.Millisecond)
+ tn = time.Now().UnixNano()
+ db.ZClear(tn, keyDel)
+ db.ZClear(tn, keyUpdate)
+ db.ZAdd(tn, keyUpdate, common.ScorePair{Member: []byte("test_update2"), Score: 2})
+
+ // compact filter should hit all undeleted keys
+ db.CompactAllRange()
+ db.CompactAllRange()
+ stats = db.GetCompactFilterStats()
+ // should clean deleted until lazy period
+ assert.Equal(t, int64(0), stats.ExpiredCleanCnt)
+ assert.Equal(t, int64(0), stats.DelCleanCnt)
+ assert.Equal(t, int64(0), stats.VersionCleanCnt)
+ // wait ttl
+ time.Sleep(time.Second * 5)
+ db.CompactAllRange()
+ db.CompactAllRange()
+ // should clean expired until lazy period
+ stats = db.GetCompactFilterStats()
+ assert.Equal(t, int64(0), stats.ExpiredCleanCnt)
+ // should clean deleted after lazy period (in test is 3s)
+ assert.Equal(t, int64(4), stats.DelCleanCnt)
+ assert.Equal(t, int64(2), stats.VersionCleanCnt)
+
+ // wait lazy check
+ time.Sleep(lazyCleanExpired * 2)
+ db.CompactAllRange()
+ stats = db.GetCompactFilterStats()
+ assert.Equal(t, int64(5), stats.ExpiredCleanCnt)
+ assert.Equal(t, int64(4), stats.DelCleanCnt)
+ assert.Equal(t, int64(2), stats.VersionCleanCnt)
+ hv, err := db.ZScore(keyUpdate, []byte("test_update2"))
+ assert.Nil(t, err)
+ assert.Equal(t, float64(2), hv)
+ hv, err = db.ZScore(keyUpdate, []byte("test_update"))
+ assert.Equal(t, errScoreMiss, err)
+ db.CompactAllRange()
+ stats = db.GetCompactFilterStats()
+ assert.Equal(t, int64(5), stats.ExpiredCleanCnt)
+ assert.Equal(t, int64(4), stats.DelCleanCnt)
+ assert.Equal(t, int64(2), stats.VersionCleanCnt)
+}
+
+func TestCompactionFilterInWaitCompactForList(t *testing.T) {
+ // test case need for
+ // 1. expired in ttl
+ // 2. meta not exist (coll delete)
+ // 3. version changed (coll update)
+ // 4. lazy check should be ensured
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+ tn := time.Now().UnixNano()
+
+ keyKeep := []byte("test:testdbTTL_compactfilter_keep")
+ keyDel := []byte("test:testdbTTL_compactfilter_del")
+ keyUpdate := []byte("test:testdbTTL_compactfilter_update")
+ keyExpire := []byte("test:testdbTTL_compactfilter_expire")
+ db.LPush(tn, keyKeep, []byte("test_keep"))
+ db.LPush(tn, keyDel, []byte("test_del"))
+ db.LPush(tn, keyDel, []byte("test_del2"))
+ db.LPush(tn, keyUpdate, []byte("test_update"))
+ db.LPush(tn, keyExpire, []byte("test_expired"))
+ db.LPush(tn, keyExpire, []byte("test_expired2"))
+
+ db.LExpire(tn, keyExpire, 4)
+ // compact filter should hit all keys
+ db.CompactAllRange()
+ stats := db.GetCompactFilterStats()
+ assert.Equal(t, int64(0), stats.ExpiredCleanCnt)
+ assert.Equal(t, int64(0), stats.DelCleanCnt)
+ assert.Equal(t, int64(0), stats.VersionCleanCnt)
+
+ time.Sleep(time.Millisecond)
+ tn = time.Now().UnixNano()
+ db.LClear(tn, keyDel)
+ db.LClear(tn, keyUpdate)
+ db.LPush(tn, keyUpdate, []byte("test_update2"))
+
+ // compact filter should hit all undeleted keys
+ db.CompactAllRange()
+ db.CompactAllRange()
+ stats = db.GetCompactFilterStats()
+ // should clean deleted until lazy period
+ assert.Equal(t, int64(0), stats.ExpiredCleanCnt)
+ assert.Equal(t, int64(0), stats.DelCleanCnt)
+ assert.Equal(t, int64(0), stats.VersionCleanCnt)
+ // wait ttl
+ time.Sleep(time.Second * 5)
+ db.CompactAllRange()
+ db.CompactAllRange()
+ // should clean expired until lazy period
+ stats = db.GetCompactFilterStats()
+ assert.Equal(t, int64(0), stats.ExpiredCleanCnt)
+ // should clean deleted after lazy period (in test is 3s)
+ assert.Equal(t, int64(2), stats.DelCleanCnt)
+ assert.Equal(t, int64(1), stats.VersionCleanCnt)
+
+ // wait lazy check
+ time.Sleep(lazyCleanExpired * 2)
+ db.CompactAllRange()
+ stats = db.GetCompactFilterStats()
+ assert.Equal(t, int64(3), stats.ExpiredCleanCnt)
+ assert.Equal(t, int64(2), stats.DelCleanCnt)
+ assert.Equal(t, int64(1), stats.VersionCleanCnt)
+ hv, err := db.LIndex(keyUpdate, 0)
+ assert.Nil(t, err)
+ assert.Equal(t, []byte("test_update2"), hv)
+ db.CompactAllRange()
+ stats = db.GetCompactFilterStats()
+ assert.Equal(t, int64(3), stats.ExpiredCleanCnt)
+ assert.Equal(t, int64(2), stats.DelCleanCnt)
+ assert.Equal(t, int64(1), stats.VersionCleanCnt)
+}
+
+func TestCompactionFilterInWaitCompactForBitmap(t *testing.T) {
+ // test case need for
+ // 1. expired in ttl
+ // 2. meta not exist (coll delete)
+ // 3. version changed (coll update)
+ // 4. lazy check should be ensured
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+ tn := time.Now().UnixNano()
+
+ keyKeep := []byte("test:testdbTTL_compactfilter_keep")
+ keyDel := []byte("test:testdbTTL_compactfilter_del")
+ keyUpdate := []byte("test:testdbTTL_compactfilter_udpate")
+ keyExpire := []byte("test:testdbTTL_compactfilter_expire")
+ db.BitSetV2(tn, keyKeep, 1, 1)
+ db.BitSetV2(tn, keyDel, 1, 1)
+ db.BitSetV2(tn, keyDel, 1024000, 1)
+ db.BitSetV2(tn, keyUpdate, 1, 1)
+ db.BitSetV2(tn, keyExpire, 1, 1)
+ db.BitSetV2(tn, keyExpire, 1024000, 1)
+
+ db.BitExpire(tn, keyExpire, 4)
+ // compact filter should hit all keys
+ db.CompactAllRange()
+ stats := db.GetCompactFilterStats()
+ assert.Equal(t, int64(0), stats.ExpiredCleanCnt)
+ assert.Equal(t, int64(0), stats.DelCleanCnt)
+ assert.Equal(t, int64(0), stats.VersionCleanCnt)
+
+ time.Sleep(time.Millisecond)
+ tn = time.Now().UnixNano()
+ db.BitClear(tn, keyDel)
+ db.BitClear(tn, keyUpdate)
+ db.BitSetV2(tn, keyUpdate, 2, 1)
+
+ // compact filter should hit all undeleted keys
+ db.CompactAllRange()
+ db.CompactAllRange()
+ stats = db.GetCompactFilterStats()
+ // should clean deleted until lazy period
+ assert.Equal(t, int64(0), stats.ExpiredCleanCnt)
+ assert.Equal(t, int64(0), stats.DelCleanCnt)
+ assert.Equal(t, int64(0), stats.VersionCleanCnt)
+ // wait ttl
+ time.Sleep(time.Second * 5)
+ db.CompactAllRange()
+ db.CompactAllRange()
+ // should clean expired until lazy period
+ stats = db.GetCompactFilterStats()
+ assert.Equal(t, int64(0), stats.ExpiredCleanCnt)
+ // should clean deleted after lazy period (in test is 3s)
+ assert.Equal(t, int64(2), stats.DelCleanCnt)
+ assert.Equal(t, int64(1), stats.VersionCleanCnt)
+
+ // wait lazy check
+ time.Sleep(lazyCleanExpired * 2)
+ db.CompactAllRange()
+ stats = db.GetCompactFilterStats()
+ assert.Equal(t, int64(3), stats.ExpiredCleanCnt)
+ assert.Equal(t, int64(2), stats.DelCleanCnt)
+ assert.Equal(t, int64(1), stats.VersionCleanCnt)
+ v, err := db.BitGetV2(keyUpdate, 2)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), v)
+ v, err = db.BitGetV2(keyUpdate, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), v)
+ db.CompactAllRange()
+ stats = db.GetCompactFilterStats()
+ assert.Equal(t, int64(3), stats.ExpiredCleanCnt)
+ assert.Equal(t, int64(2), stats.DelCleanCnt)
+ assert.Equal(t, int64(1), stats.VersionCleanCnt)
+}
+
+func TestKVTTL_Compact(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ key1 := []byte("test:testdbTTL_kv_compact")
+ var ttl1 int64 = int64(rand.Int31() - 10)
+ ttl2 := ttl1 + 10
+ tn := time.Now().UnixNano()
+ if v, err := db.Expire(tn, key1, ttl1); err != nil {
+ t.Fatal(err)
+ } else if v != 0 {
+ t.Fatal("return value from expire of not exist key != 0")
+ }
+
+ if v, err := db.Persist(tn, key1); err != nil {
+ t.Fatal(err)
+ } else if v != 0 {
+ t.Fatal("return value from persist of not exist key != 0")
+ }
+
+ if err := db.KVSet(0, key1, []byte("hello world 1")); err != nil {
+ t.Fatal(err)
+ }
+
+ if v, err := db.Expire(tn, key1, ttl1); err != nil {
+ t.Fatal(err)
+ } else if v != 1 {
+ t.Fatal("return value from expire != 1")
+ }
+
+ if v, err := db.KVTtl(key1); err != nil {
+ t.Fatal(err)
+ } else if v != ttl1 {
+ t.Fatal("ttl != expire")
+ }
+
+ // test change ttl
+ n, err := db.Expire(tn, key1, ttl2)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ if v, err := db.KVTtl(key1); err != nil {
+ t.Fatal(err)
+ } else if v != ttl2 {
+ t.Fatal("ttl != expire")
+ }
+
+ if v, err := db.Persist(tn, key1); err != nil {
+ t.Fatal(err)
+ } else if v != 1 {
+ t.Fatal("return value from persist != 1")
+ }
+
+ if v, err := db.KVTtl(key1); err != nil {
+ t.Fatal(err)
+ } else if v != -1 {
+ t.Fatal("KVPersist do not clear the ttl")
+ }
+
+ tn = time.Now().UnixNano()
+ testValue := []byte("test value for SetEx command")
+ if err := db.SetEx(tn, key1, ttl1, testValue); err != nil {
+ t.Fatal(err)
+ }
+
+ if v, err := db.KVGet(key1); err != nil {
+ t.Fatal(err)
+ } else if !bytes.Equal(v, testValue) {
+ t.Fatal("SetEx: gotten value != set value")
+ }
+
+ if v, err := db.KVTtl(key1); err != nil {
+ t.Fatal(err)
+ } else if v != ttl1 {
+ t.Fatalf("ttl != setex: %v vs %v", v, ttl1)
+ }
+ n, err = db.KVGetVer(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, tn, n)
+ v, err := db.KVGetExpired(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, testValue, v)
+
+ err = db.SetEx(tn, key1, ttl2, testValue)
+ assert.Nil(t, err)
+
+ n, err = db.KVTtl(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, ttl2, n)
+ // set, setnx, mset should clean ttl
+ err = db.KVSet(0, key1, testValue)
+ assert.Nil(t, err)
+ n, err = db.KVTtl(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+
+ err = db.SetEx(tn, key1, ttl2, testValue)
+ assert.Nil(t, err)
+ n, err = db.KVTtl(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, ttl2, n)
+
+ // set not success should keep ttl
+ changed, err := db.SetNX(0, key1, testValue)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), changed)
+ n, err = db.KVTtl(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, ttl2, n)
+
+ db.MSet(0, common.KVRecord{Key: key1, Value: testValue})
+ n, err = db.KVTtl(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+
+ err = db.SetEx(tn, key1, 1, testValue)
+ assert.Nil(t, err)
+ n, err = db.KVTtl(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ time.Sleep(time.Second * 2)
+ // set success should clean ttl
+ tn = time.Now().UnixNano()
+ changed, err = db.SetNX(tn, key1, testValue)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), changed)
+ n, err = db.KVTtl(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+ // compact should clean all expired data
+ db.CompactAllRange()
+}
+
+func TestKVTTL_CompactKeepTTL(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ key1 := []byte("test:testdbTTL_kv_compact_keepttl")
+ var ttl1 int64 = int64(rand.Int31())
+ tn := time.Now().UnixNano()
+ if c, err := db.Incr(0, key1); err != nil {
+ t.Fatal(err)
+ } else {
+ assert.Equal(t, int64(1), c)
+ }
+
+ if v, err := db.Expire(tn, key1, ttl1); err != nil {
+ t.Fatal(err)
+ } else if v != 1 {
+ t.Fatal("return value from expire != 1")
+ }
+ n, err := db.KVTtl(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(ttl1), n)
+ // incr should keep ttl
+ if c, err := db.Incr(0, key1); err != nil {
+ t.Fatal(err)
+ } else {
+ assert.Equal(t, int64(2), c)
+ }
+ n, err = db.KVTtl(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(ttl1), n)
+
+ // append
+ _, err = db.Append(0, key1, []byte("append"))
+ assert.Nil(t, err)
+ if v, err := db.KVTtl(key1); err != nil {
+ t.Fatal(err)
+ } else if v != ttl1 {
+ t.Fatal("ttl != expire")
+ }
+ // setrange
+ _, err = db.SetRange(0, key1, 1, []byte("range"))
+ assert.Nil(t, err)
+ if v, err := db.KVTtl(key1); err != nil {
+ t.Fatal(err)
+ } else if v != ttl1 {
+ t.Fatal("ttl != expire")
+ }
+ v, err := db.KVGet(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, "2ranged", string(v))
+ db.DelKeys(key1)
+ // bitset
+ db.BitSetOld(0, key1, 1, 1)
+ if v, err := db.Expire(tn, key1, ttl1); err != nil {
+ t.Fatal(err)
+ } else if v != 1 {
+ t.Fatalf("return value from expire %v != 1", v)
+ }
+ db.BitSetOld(0, key1, 2, 1)
+ db.BitSetOld(0, key1, 1, 0)
+ if v, err := db.KVTtl(key1); err != nil {
+ t.Fatal(err)
+ } else if v != ttl1 {
+ t.Fatal("ttl != expire")
+ }
+ bitV, err := db.BitGetV2(key1, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), bitV)
+ bitV, err = db.BitGetV2(key1, 2)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), bitV)
+}
+
+func TestKVTTL_Compact_TTLExpired(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ key1 := []byte("test:testdbTTL_kv_compact_ttlexpire")
+ var ttl1 int64 = int64(2)
+ tn := time.Now().UnixNano()
+ if c, err := db.Incr(tn, key1); err != nil {
+ t.Fatal(err)
+ } else {
+ assert.Equal(t, int64(1), c)
+ }
+
+ if v, err := db.Expire(tn, key1, ttl1); err != nil {
+ t.Fatal(err)
+ } else if v != 1 {
+ t.Fatal("return value from expire != 1")
+ }
+ if v, err := db.KVTtl(key1); err != nil {
+ t.Fatal(err)
+ } else if v != ttl1 {
+ t.Fatal("ttl != expire")
+ }
+ // incr should keep ttl
+ if c, err := db.Incr(tn, key1); err != nil {
+ t.Fatal(err)
+ } else {
+ assert.Equal(t, int64(2), c)
+ }
+ if v, err := db.KVTtl(key1); err != nil {
+ t.Fatal(err)
+ } else if v != ttl1 {
+ t.Fatal("ttl != expire")
+ }
+ // setnx no ok should keep ttl
+ n, err := db.SetNX(tn, key1, []byte("new"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.KVTtl(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(ttl1), n)
+
+ n, err = db.KVGetVer(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, tn, n)
+ v, err := db.KVGetExpired(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, []byte("2"), v)
+
+ time.Sleep(time.Second * time.Duration(ttl1+1))
+ if v, err := db.KVTtl(key1); err != nil {
+ t.Fatal(err)
+ } else if v != -1 {
+ t.Fatalf("should expired: %v", v)
+ }
+ exist, err := db.KVExists(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), exist)
+ v, err = db.KVGet(key1)
+ assert.Nil(t, err)
+ assert.Nil(t, v)
+
+ n, err = db.KVGetVer(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, tn, n)
+ v, err = db.KVGetExpired(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, []byte("2"), v)
+
+ vlist, errs := db.MGet(key1)
+ assert.Nil(t, errs[0])
+ assert.Nil(t, vlist[0])
+
+ // success setnx should renew ttl
+ tn = time.Now().UnixNano()
+ n, err = db.SetNX(tn, key1, []byte("new1"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ n, err = db.KVTtl(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+ v, err = db.KVGet(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, []byte("new1"), v)
+
+ n, err = db.KVGetVer(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, tn, n)
+ v, err = db.KVGetExpired(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, []byte("new1"), v)
+
+ n, err = db.Expire(tn, key1, ttl1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ n, err = db.KVTtl(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(ttl1), n)
+
+ time.Sleep(time.Second * time.Duration(ttl1+1))
+ exist, err = db.KVExists(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), exist)
+ v, err = db.KVGet(key1)
+ assert.Nil(t, err)
+ assert.Nil(t, v)
+ vlist, errs = db.MGet(key1)
+ assert.Nil(t, errs[0])
+ assert.Nil(t, vlist[0])
+ n, err = db.KVTtl(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+
+ tn = time.Now().UnixNano()
+ v, err = db.KVGetSet(tn, key1, []byte("new2"))
+ assert.Nil(t, err)
+ assert.Nil(t, v)
+ n, err = db.KVTtl(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+
+ v, err = db.KVGet(key1)
+ assert.Nil(t, err)
+ assert.Equal(t, []byte("new2"), v)
+}
+
+func TestKVTTL_CompactOverflow(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ key1 := []byte("test:testdbTTL_kv_compact")
+ var ttl1 int64 = math.MaxUint32
+ tn := time.Now().UnixNano()
+ err := db.KVSet(0, key1, []byte("hello world 1"))
+ assert.Nil(t, err)
+
+ _, err = db.Expire(tn, key1, ttl1)
+ assert.NotNil(t, err)
+
+ if v, err := db.KVTtl(key1); err != nil {
+ t.Fatal(err)
+ } else if v != -1 {
+ t.Fatal("ttl != -1")
+ }
+
+ tn = time.Now().UnixNano()
+ testValue := []byte("test value for SetEx command")
+ err = db.SetEx(tn, key1, ttl1, testValue)
+ assert.NotNil(t, err)
+
+ if v, err := db.KVTtl(key1); err != nil {
+ t.Fatal(err)
+ } else if v != -1 {
+ t.Fatalf("ttl != setex: %v vs %v", v, ttl1)
+ }
+}
+
+func TestHashTTL_Compact(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ hashKey := []byte("test:testdbTTL_hash_compact")
+ var hashTTL int64 = int64(rand.Int31()) - 10
+
+ if v, err := db.HashTtl(hashKey); err != nil {
+ t.Fatal(err)
+ } else if v != -1 {
+ t.Fatal("ttl of not exist hash key is not -1")
+ }
+
+ tn := time.Now().UnixNano()
+ if v, err := db.HExpire(tn, hashKey, hashTTL); err != nil {
+ t.Fatal(err)
+ } else if v != 0 {
+ t.Fatal("return value from expire of not exist hash key != 0")
+ }
+
+ if v, err := db.HPersist(tn, hashKey); err != nil {
+ t.Fatal(err)
+ } else if v != 0 {
+ t.Fatal("return value from hpersist of not exist hash key != 0")
+ }
+
+ hash_val := []common.KVRecord{
+ {Key: []byte("field0"), Value: []byte("value0")},
+ {Key: []byte("field1"), Value: []byte("value1")},
+ {Key: []byte("field2"), Value: []byte("value2")},
+ }
+
+ if err := db.HMset(tn, hashKey, hash_val...); err != nil {
+ t.Fatal(err)
+ }
+
+ if v, err := db.HExpire(tn, hashKey, hashTTL); err != nil {
+ t.Fatal(err)
+ } else if v != 1 {
+ t.Fatal("return value from hexpire != 1")
+ }
+
+ v, err := db.HashTtl(hashKey)
+ assert.Nil(t, err)
+ assert.Equal(t, hashTTL, v)
+
+ if v, err := db.HPersist(tn, hashKey); err != nil {
+ t.Fatal(err)
+ } else if v != 1 {
+ t.Fatal("return value from hpersist is != 1")
+ }
+
+ if v, err := db.HashTtl(hashKey); err != nil {
+ t.Fatal(err)
+ } else if v != -1 {
+ t.Fatal("HashPersist do not clear the ttl")
+ }
+
+ // compact should clean all expired data
+ db.CompactAllRange()
+}
+
+func TestHashTTL_Compact_KeepTTL(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ hashKey := []byte("test:testdbTTL_hash_compact_keepttl")
+ var hashTTL int64 = int64(rand.Int31() - 10)
+ tn := time.Now().UnixNano()
+ hash_val := []common.KVRecord{
+ {Key: []byte("field0"), Value: []byte("0")},
+ {Key: []byte("field1"), Value: []byte("value1")},
+ {Key: []byte("field2"), Value: []byte("value2")},
+ }
+
+ err := db.HMset(tn, hashKey, hash_val...)
+ assert.Nil(t, err)
+ n, err := db.HExpire(tn, hashKey, hashTTL)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ // should keep ttl
+ n, err = db.HSet(tn, false, hashKey, hash_val[0].Key, hash_val[1].Value)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.HashTtl(hashKey)
+ assert.Nil(t, err)
+ assert.Equal(t, hashTTL, n)
+
+ // hmset
+ err = db.HMset(tn, hashKey, hash_val...)
+ assert.Nil(t, err)
+ n, err = db.HashTtl(hashKey)
+ assert.Nil(t, err)
+ assert.Equal(t, hashTTL, n)
+ // hdel
+ n, err = db.HDel(0, hashKey, hash_val[0].Key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ n, err = db.HashTtl(hashKey)
+ assert.Nil(t, err)
+ assert.Equal(t, hashTTL, n)
+ // hincrby
+ n, err = db.HIncrBy(tn, hashKey, hash_val[0].Key, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ n, err = db.HashTtl(hashKey)
+ assert.Nil(t, err)
+ assert.Equal(t, hashTTL, n)
+}
+
+func TestHashTTL_Compact_TTLExpired(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ hashKey := []byte("test:testdbTTL_hash_compact_expired")
+ var hashTTL int64 = int64(3)
+ tn := time.Now().UnixNano()
+ hash_val := []common.KVRecord{
+ {Key: []byte("field0"), Value: []byte("0")},
+ {Key: []byte("field1"), Value: []byte("value1")},
+ {Key: []byte("field2"), Value: []byte("value2")},
+ }
+
+ err := db.HMset(tn, hashKey, hash_val...)
+ assert.Nil(t, err)
+ n, err := db.HExpire(tn, hashKey, hashTTL)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.HashTtl(hashKey)
+ assert.Nil(t, err)
+ assert.Equal(t, hashTTL, n)
+
+ // test hash get exipred
+ n, err = db.HGetVer(hashKey, hash_val[0].Key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(tn), n)
+ tn1 := tn
+
+ n, vals, err := db.HGetAll(hashKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(3), n)
+ assert.Equal(t, 3, len(vals))
+ n, vals, err = db.HGetAllExpired(hashKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(3), n)
+ assert.Equal(t, 3, len(vals))
+
+ time.Sleep(time.Second * time.Duration(hashTTL+1))
+ dbLog.Infof("wait expired done")
+ n, err = db.HashTtl(hashKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+
+ tn = time.Now().UnixNano()
+ v, err := db.HGet(hashKey, hash_val[0].Key)
+ assert.Nil(t, err)
+ assert.Nil(t, v)
+ n, err = db.HKeyExists(hashKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ vlist, err := db.HMget(hashKey, hash_val[0].Key, hash_val[1].Key)
+ assert.Nil(t, err)
+ assert.Nil(t, vlist[0])
+ assert.Nil(t, vlist[1])
+ vlist, err = db.HMgetExpired(hashKey, hash_val[0].Key, hash_val[1].Key)
+ assert.Nil(t, err)
+ assert.Equal(t, hash_val[0].Value, vlist[0])
+ assert.Equal(t, hash_val[1].Value, vlist[1])
+
+ n, recs, err := db.HGetAll(hashKey)
+ assert.Equal(t, int64(0), n)
+ assert.Equal(t, 0, len(recs))
+ n, recs, err = db.HKeys(hashKey)
+ assert.Equal(t, int64(0), n)
+ assert.Equal(t, 0, len(recs))
+ n, recs, err = db.HValues(hashKey)
+ assert.Equal(t, int64(0), n)
+ assert.Equal(t, 0, len(recs))
+ n, err = db.HLen(hashKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ // test hash get exipred
+ n, err = db.HGetVer(hashKey, hash_val[0].Key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(tn1), n)
+
+ n, vals2, err := db.HGetAllExpired(hashKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(3), n)
+ assert.Equal(t, 3, len(vals2))
+ assert.Equal(t, vals, vals2)
+
+ // renew hash
+ hash_val2 := []common.KVRecord{
+ {Key: []byte("field0"), Value: []byte("new")},
+ {Key: []byte("field1"), Value: []byte("value1_new")},
+ }
+ time.Sleep(time.Second)
+ tn = time.Now().UnixNano()
+ err = db.HMset(tn, hashKey, hash_val2...)
+ assert.Nil(t, err)
+
+ n, err = db.HashTtl(hashKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+
+ n, err = db.HLen(hashKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(hash_val2)), n)
+
+ n, recs, err = db.HGetAll(hashKey)
+ assert.Equal(t, int64(len(hash_val2)), n)
+ assert.Equal(t, len(hash_val2), len(recs))
+
+ v, err = db.HGet(hashKey, hash_val2[0].Key)
+ assert.Nil(t, err)
+ assert.Equal(t, hash_val2[0].Value, v)
+ n, err = db.HKeyExists(hashKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.HGetVer(hashKey, hash_val2[0].Key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(tn), n)
+
+ n, vals2, err = db.HGetAllExpired(hashKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
+ assert.Equal(t, 2, len(vals2))
+ assert.NotEqual(t, vals, vals2)
+
+ vlist, err = db.HMget(hashKey, hash_val2[0].Key, hash_val2[1].Key)
+ assert.Nil(t, err)
+ assert.Equal(t, hash_val2[0].Value, vlist[0])
+ assert.Equal(t, hash_val2[1].Value, vlist[1])
+
+ n, recs, err = db.HKeys(hashKey)
+ assert.Equal(t, int64(len(hash_val2)), n)
+ assert.Equal(t, len(hash_val2), len(recs))
+ n, recs, err = db.HValues(hashKey)
+ assert.Equal(t, int64(len(hash_val2)), n)
+ assert.Equal(t, len(hash_val2), len(recs))
+}
+
+func TestBitmapTTL_Compact(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ setKey := []byte("test:testdbTTL_bitmap_compact")
+ var setTTL int64 = int64(rand.Int31() - 10)
+
+ if v, err := db.BitTtl(setKey); err != nil {
+ t.Fatal(err)
+ } else if v != -1 {
+ t.Fatal("ttl of not exist key is not -1")
+ }
+ tn := time.Now().UnixNano()
+
+ if v, err := db.BitExpire(tn, setKey, setTTL); err != nil {
+ t.Fatal(err)
+ } else if v != 0 {
+ t.Fatal("return value from expire of not exist key != 0")
+ }
+
+ if v, err := db.BitPersist(tn, setKey); err != nil {
+ t.Fatal(err)
+ } else if v != 0 {
+ t.Fatal("return value from persist of not exist set key != 0")
+ }
+
+ if _, err := db.BitSetV2(tn, setKey, 1, 1); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := db.BitSetV2(tn, setKey, 2, 1); err != nil {
+ t.Fatal(err)
+ }
+
+ if v, err := db.BitExpire(tn, setKey, setTTL); err != nil {
+ t.Fatal(err)
+ } else if v != 1 {
+ t.Fatal("return value from expire != 1")
+ }
+
+ if v, err := db.BitTtl(setKey); err != nil {
+ t.Fatal(err)
+ } else if v != setTTL {
+ t.Errorf("ttl != expire, %v, %v", v, setTTL)
+ }
+
+ if v, err := db.BitPersist(tn, setKey); err != nil {
+ t.Fatal(err)
+ } else if v != 1 {
+ t.Fatal("return value from persist!= 1")
+ }
+
+ if v, err := db.BitTtl(setKey); err != nil {
+ t.Fatal(err)
+ } else if v != -1 {
+ t.Fatal("Persist do not clear the ttl")
+ }
+}
+
+func TestBitmapTTL_Compact_KeepTTL(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ setKey := []byte("test:testdbTTL_bitmap_compact_keepttl")
+ var ttl int64 = int64(rand.Int31() - 10)
+ tn := time.Now().UnixNano()
+
+ n, err := db.BitSetV2(tn, setKey, 1, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.BitSetV2(tn, setKey, 2, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.BitExpire(tn, setKey, ttl)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ n, err = db.BitTtl(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, ttl, n)
+
+ // should keep ttl
+ n, err = db.BitSetV2(tn, setKey, 3, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.BitTtl(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, ttl, n)
+
+ n, err = db.BitSetV2(tn, setKey, 1, 0)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ n, err = db.BitTtl(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, ttl, n)
+ _, err = db.BitSetV2(tn, setKey, 2, 0)
+ assert.Nil(t, err)
+ n, err = db.BitTtl(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, ttl, n)
+}
+
+func TestBitmapTTL_Compact_TTLExpired(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ setKey := []byte("test:testdbTTL_bitmap_compact_expired")
+ var ttl int64 = int64(3)
+ tn := time.Now().UnixNano()
+
+ n, err := db.BitSetV2(tn, setKey, 1, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.BitSetV2(tn, setKey, 2, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.BitExpire(tn, setKey, ttl)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.BitTtl(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, ttl, n)
+
+ n, err = db.BitGetV2(setKey, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ n, err = db.BitCountV2(setKey, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
+ n, err = db.BitKeyExist(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ time.Sleep(time.Second * time.Duration(ttl+1))
+ dbLog.Infof("wait expired done")
+ n, err = db.BitTtl(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+
+ tn = time.Now().UnixNano()
+ n, err = db.BitGetV2(setKey, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.BitKeyExist(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.BitCountV2(setKey, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ // renew
+ time.Sleep(time.Second)
+ tn = time.Now().UnixNano()
+ n, err = db.BitSetV2(tn, setKey, 3, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.BitSetV2(tn, setKey, 4, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ n, err = db.BitTtl(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+
+ n, err = db.BitCountV2(setKey, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
+
+ n, err = db.BitGetV2(setKey, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.BitGetV2(setKey, 3)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.BitKeyExist(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+}
+
+func TestListTTL_Compact(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ listKey := []byte("test:testdbTTL_list_c")
+ var listTTL int64 = 1000
+ tn := time.Now().UnixNano()
+
+ if v, err := db.ListTtl(listKey); err != nil {
+ t.Fatal(err)
+ } else if v != -1 {
+ t.Fatal("ttl of not exist list key is not -1")
+ }
+
+ if v, err := db.LExpire(tn, listKey, listTTL); err != nil {
+ t.Fatal(err)
+ } else if v != 0 {
+ t.Fatal("return value from expire of not exist list key != 0")
+ }
+
+ if v, err := db.LPersist(tn, listKey); err != nil {
+ t.Fatal(err)
+ } else if v != 0 {
+ t.Fatal("return value from lpersist of not exist list key != 0")
+ }
+
+ if _, err := db.LPush(0, listKey, []byte("this"), []byte("is"), []byte("list"),
+ []byte("ttl"), []byte("test")); err != nil {
+ t.Fatal(err)
+ }
+
+ if v, err := db.LExpire(tn, listKey, listTTL); err != nil {
+ t.Fatal(err)
+ } else if v != 1 {
+ t.Fatal("return value from lexpire != 1")
+ }
+
+ if v, err := db.ListTtl(listKey); err != nil {
+ t.Fatal(err)
+ } else if v != listTTL {
+ t.Fatal("ttl != expire")
+ }
+
+ if v, err := db.LPersist(tn, listKey); err != nil {
+ t.Fatal(err)
+ } else if v != 1 {
+ t.Fatal("return value from lpersist != 1")
+ }
+
+ if v, err := db.ListTtl(listKey); err != nil {
+ t.Fatal(err)
+ } else if v != -1 {
+ t.Fatal("ListPersist do not clear the ttl")
+ }
+}
+func TestListTTL_Compact_KeepTTL(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ setKey := []byte("test:testdbTTL_list_compact_keepttl")
+ var ttl int64 = int64(rand.Int31() - 10)
+ tn := time.Now().UnixNano()
+ mems := [][]byte{
+ []byte("m1"), []byte("m2"), []byte("m3"),
+ }
+
+ n, err := db.RPush(tn, setKey, mems...)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(mems)), n)
+ n, err = db.LExpire(tn, setKey, ttl)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ n, err = db.ListTtl(setKey)
+ assert.Nil(t, err)
+ assert.InDelta(t, ttl, n, 1)
+
+ // should keep ttl
+ n, err = db.RPush(tn, setKey, []byte("newm1"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(4), n)
+ n, err = db.ListTtl(setKey)
+ assert.Nil(t, err)
+ assert.InDelta(t, ttl, n, 1)
+
+ elem, err := db.LPop(tn, setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, mems[0], elem)
+ n, err = db.ListTtl(setKey)
+ assert.Nil(t, err)
+ assert.InDelta(t, ttl, n, 1)
+}
+
+func TestListTTL_Compact_TTLExpired(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ setKey := []byte("test:testdbTTL_list_compact_expired")
+ var ttl int64 = int64(3)
+ tn := time.Now().UnixNano()
+ mems := [][]byte{
+ []byte("mem1"), []byte("mem2"), []byte("mem3"),
+ }
+
+ n, err := db.RPush(tn, setKey, mems...)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(mems)), n)
+ n, err = db.LExpire(tn, setKey, ttl)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.ListTtl(setKey)
+ assert.Nil(t, err)
+ assert.InDelta(t, ttl, n, 1)
+
+ elem, err := db.LIndex(setKey, 0)
+ assert.Nil(t, err)
+ assert.Equal(t, mems[0], elem)
+ vlist, err := db.LRange(setKey, 0, 100)
+ assert.Nil(t, err)
+ assert.Equal(t, len(mems), len(vlist))
+ n, err = db.LLen(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(mems)), n)
+
+ err = db.LSet(tn, setKey, 0, []byte("newv"))
+ assert.Nil(t, err)
+ elem, err = db.LIndex(setKey, 0)
+ assert.Nil(t, err)
+ assert.Equal(t, []byte("newv"), elem)
+
+ time.Sleep(time.Second * time.Duration(ttl+1))
+ dbLog.Infof("wait expired done")
+ n, err = db.ListTtl(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+
+ tn = time.Now().UnixNano()
+ elem, err = db.LIndex(setKey, 0)
+ assert.Nil(t, err)
+ assert.Nil(t, elem)
+ err = db.LSet(tn, setKey, 0, []byte("newv2"))
+ assert.NotNil(t, err)
+ n, err = db.LKeyExists(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ vlist, err = db.LRange(setKey, 0, 100)
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(vlist))
+ n, err = db.LLen(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ elem, err = db.LPop(tn, setKey)
+ assert.Nil(t, err)
+ assert.Nil(t, elem)
+
+ // renew
+ mems2 := [][]byte{
+ []byte("newmem1"), []byte("newmem2"),
+ }
+ time.Sleep(time.Second)
+ tn = time.Now().UnixNano()
+ n, err = db.RPush(tn, setKey, mems2...)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(mems2)), n)
+
+ n, err = db.ListTtl(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+
+ n, err = db.LLen(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(mems2)), n)
+
+ vlist, err = db.LRange(setKey, 0, 100)
+ assert.Nil(t, err)
+ assert.Equal(t, len(mems2), len(vlist))
+ assert.Equal(t, mems2[0], vlist[0])
+
+ elem, err = db.LIndex(setKey, 0)
+ assert.Nil(t, err)
+ assert.Equal(t, mems2[0], elem)
+ err = db.LSet(tn, setKey, 0, []byte("newv3"))
+ assert.Nil(t, err)
+ elem, err = db.LIndex(setKey, 0)
+ assert.Nil(t, err)
+ assert.Equal(t, []byte("newv3"), elem)
+
+ n, err = db.LKeyExists(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+}
+
+func TestSetTTL_Compact(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ setKey := []byte("test:testdbTTL_set_compact")
+ var setTTL int64 = int64(rand.Int31() - 10)
+
+ if v, err := db.SetTtl(setKey); err != nil {
+ t.Fatal(err)
+ } else if v != -1 {
+ t.Fatal("ttl of not exist set key is not -1")
+ }
+ tn := time.Now().UnixNano()
+
+ if v, err := db.SExpire(tn, setKey, setTTL); err != nil {
+ t.Fatal(err)
+ } else if v != 0 {
+ t.Fatal("return value from expire of not exist set key != 0")
+ }
+
+ if v, err := db.SPersist(tn, setKey); err != nil {
+ t.Fatal(err)
+ } else if v != 0 {
+ t.Fatal("return value from spersist of not exist set key != 0")
+ }
+
+ if _, err := db.SAdd(0, setKey, []byte("this"), []byte("is"), []byte("set"),
+ []byte("ttl"), []byte("test")); err != nil {
+ t.Fatal(err)
+ }
+
+ if v, err := db.SExpire(tn, setKey, setTTL); err != nil {
+ t.Fatal(err)
+ } else if v != 1 {
+ t.Fatal("return value from sexpire != 1")
+ }
+
+ if v, err := db.SetTtl(setKey); err != nil {
+ t.Fatal(err)
+ } else if v != setTTL {
+ t.Fatal("ttl != expire")
+ }
+
+ if v, err := db.SPersist(tn, setKey); err != nil {
+ t.Fatal(err)
+ } else if v != 1 {
+ t.Fatal("return value from spersist!= 1")
+ }
+
+ if v, err := db.SetTtl(setKey); err != nil {
+ t.Fatal(err)
+ } else if v != -1 {
+ t.Fatal("SetPersist do not clear the ttl")
+ }
+}
+
+func TestSetTTL_Compact_KeepTTL(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ setKey := []byte("test:testdbTTL_set_compact_keepttl")
+ var ttl int64 = int64(rand.Int31() - 10)
+ tn := time.Now().UnixNano()
+ mems := [][]byte{
+ []byte("m1"), []byte("m2"), []byte("m3"),
+ }
+
+ n, err := db.SAdd(tn, setKey, mems...)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(mems)), n)
+ n, err = db.SExpire(tn, setKey, ttl)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ n, err = db.SetTtl(setKey)
+ assert.Nil(t, err)
+ assert.InDelta(t, ttl, n, 1)
+
+ // should keep ttl
+ n, err = db.SAdd(tn, setKey, []byte("newm1"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ n, err = db.SetTtl(setKey)
+ assert.Nil(t, err)
+ assert.InDelta(t, ttl, n, 1)
+
+ n, err = db.SRem(tn, setKey, mems[0])
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ n, err = db.SetTtl(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, ttl, n)
+ _, err = db.SPop(tn, setKey, 1)
+ assert.Nil(t, err)
+ n, err = db.SetTtl(setKey)
+ assert.Nil(t, err)
+ assert.InDelta(t, ttl, n, 1)
+}
+
+func TestSetTTL_Compact_TTLExpired(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ setKey := []byte("test:testdbTTL_set_compact_expired")
+ var ttl int64 = int64(3)
+ tn := time.Now().UnixNano()
+ mems := [][]byte{
+ []byte("mem1"), []byte("mem2"), []byte("mem3"),
+ }
+
+ n, err := db.SAdd(tn, setKey, mems...)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(mems)), n)
+ n, err = db.SExpire(tn, setKey, ttl)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.SetTtl(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, ttl, n)
+
+ n, err = db.SIsMember(setKey, mems[0])
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ vlist, err := db.SMembers(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, len(mems), len(vlist))
+ n, err = db.SCard(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(mems)), n)
+
+ time.Sleep(time.Second * time.Duration(ttl+1))
+ dbLog.Infof("wait expired done")
+ n, err = db.SetTtl(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+
+ tn = time.Now().UnixNano()
+ n, err = db.SIsMember(setKey, mems[0])
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.SKeyExists(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ vlist, err = db.SMembers(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(vlist))
+ vlist, err = db.SScan(setKey, []byte(""), -1, "", false)
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(vlist))
+ n, err = db.SCard(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ vlist, err = db.SPop(tn, setKey, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(vlist))
+
+ // renew
+ mems2 := [][]byte{
+ []byte("newmem1"), []byte("newmem2"),
+ }
+ time.Sleep(time.Second)
+ tn = time.Now().UnixNano()
+ n, err = db.SAdd(tn, setKey, mems2...)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(mems2)), n)
+
+ n, err = db.SetTtl(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+
+ n, err = db.SCard(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(mems2)), n)
+
+ vlist, err = db.SMembers(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, len(mems2), len(vlist))
+ assert.Equal(t, mems2[0], vlist[0])
+
+ n, err = db.SIsMember(setKey, mems[0])
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.SIsMember(setKey, mems2[0])
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.SKeyExists(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ vlist, err = db.SScan(setKey, []byte(""), -1, "", false)
+ assert.Nil(t, err)
+ assert.Equal(t, len(mems2), len(vlist))
+ assert.Equal(t, mems2[0], vlist[0])
+
+ vlist, err = db.SPop(tn, setKey, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, 1, len(vlist))
+ assert.Equal(t, mems2[0], vlist[0])
+}
+
+func TestZSetTTL_Compact(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ zsetKey := []byte("test:testdbTTL_zset_c")
+ var zsetTTL int64 = 1000
+
+ tn := time.Now().UnixNano()
+ if v, err := db.ZSetTtl(zsetKey); err != nil {
+ t.Fatal(err)
+ } else if v != -1 {
+ t.Fatal("ttl of not exist zset key is not -1")
+ }
+
+ if v, err := db.ZExpire(tn, zsetKey, zsetTTL); err != nil {
+ t.Fatal(err)
+ } else if v != 0 {
+ t.Fatal("return value from expire of not exist zset key != 0")
+ }
+
+ if v, err := db.ZPersist(tn, zsetKey); err != nil {
+ t.Fatal(err)
+ } else if v != 0 {
+ t.Fatal("return value from zpersist of not exist zset key != 0")
+ }
+
+ members := []common.ScorePair{
+ {Member: []byte("member1"), Score: 10},
+ {Member: []byte("member2"), Score: 20},
+ {Member: []byte("member3"), Score: 30},
+ {Member: []byte("member4"), Score: 40},
+ }
+
+ if _, err := db.ZAdd(0, zsetKey, members...); err != nil {
+ t.Fatal(err)
+ }
+
+ if v, err := db.ZExpire(tn, zsetKey, zsetTTL); err != nil {
+ t.Fatal(err)
+ } else if v != 1 {
+ t.Fatal("return value from zexpire != 1")
+ }
+
+ if v, err := db.ZSetTtl(zsetKey); err != nil {
+ t.Fatal(err)
+ } else if v != zsetTTL {
+ t.Fatal("ttl != expire")
+ }
+
+ if v, err := db.ZPersist(tn, zsetKey); err != nil {
+ t.Fatal(err)
+ } else if v != 1 {
+ t.Fatal("return value from zpersist != 1")
+ }
+
+ if v, err := db.ZSetTtl(zsetKey); err != nil {
+ t.Fatal(err)
+ } else if v != -1 {
+ t.Fatal("ZSetPersist do not clear the ttl")
+ }
+}
+
+func TestZSetTTL_Compact_KeepTTL(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ setKey := []byte("test:testdbTTL_zset_compact_keepttl")
+ var ttl int64 = int64(rand.Int31() - 10)
+ tn := time.Now().UnixNano()
+ mems := []common.ScorePair{
+ common.ScorePair{1, []byte("m1")},
+ common.ScorePair{2, []byte("m2")},
+ common.ScorePair{3, []byte("m3")},
+ }
+
+ n, err := db.ZAdd(tn, setKey, mems...)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(mems)), n)
+ n, err = db.ZExpire(tn, setKey, ttl)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ n, err = db.ZSetTtl(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, ttl, n)
+
+ mems2 := []common.ScorePair{
+ common.ScorePair{4, []byte("newm4")},
+ common.ScorePair{5, []byte("newm5")},
+ }
+ // should keep ttl
+ n, err = db.ZAdd(tn, setKey, mems2[0])
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ n, err = db.ZSetTtl(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, ttl, n)
+
+ n, err = db.ZRem(tn, setKey, mems[0].Member)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ n, err = db.ZSetTtl(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, ttl, n)
+}
+
+func TestZSetTTL_Compact_TTLExpired(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ setKey := []byte("test:testdbTTL_zset_compact_expired")
+ var ttl int64 = int64(3)
+ tn := time.Now().UnixNano()
+
+ mems := []common.ScorePair{
+ common.ScorePair{1, []byte("m1")},
+ common.ScorePair{2, []byte("m2")},
+ common.ScorePair{3, []byte("m3")},
+ }
+
+ n, err := db.ZAdd(tn, setKey, mems...)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(mems)), n)
+ n, err = db.ZExpire(tn, setKey, ttl)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.ZSetTtl(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, ttl, n)
+
+ score, err := db.ZScore(setKey, mems[0].Member)
+ assert.Nil(t, err)
+ assert.Equal(t, mems[0].Score, score)
+ n, err = db.ZRank(setKey, mems[0].Member)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.ZRevRank(setKey, mems[0].Member)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
+
+ vlist, err := db.ZRange(setKey, 0, 100)
+ assert.Nil(t, err)
+ assert.Equal(t, len(mems), len(vlist))
+ mlist, err := db.ZRangeByLex(setKey, nil, nil, common.RangeClose, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, len(mems), len(mlist))
+ vlist, err = db.ZRangeByScore(setKey, 0, 100, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, len(mems), len(vlist))
+
+ n, err = db.ZCard(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(mems)), n)
+ n, err = db.ZCount(setKey, 0, 100)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(mems)), n)
+ n, err = db.ZLexCount(setKey, nil, nil, common.RangeClose)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(mems)), n)
+
+ time.Sleep(time.Second * time.Duration(ttl+1))
+ dbLog.Infof("wait expired done")
+ n, err = db.ZSetTtl(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+
+ tn = time.Now().UnixNano()
+ score, err = db.ZScore(setKey, mems[0].Member)
+ assert.Equal(t, errScoreMiss, err)
+ assert.Equal(t, float64(0), score)
+ n, err = db.ZRank(setKey, mems[0].Member)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+ n, err = db.ZRevRank(setKey, mems[0].Member)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+
+ n, err = db.ZKeyExists(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ vlist, err = db.ZRange(setKey, 0, 100)
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(vlist))
+ mlist, err = db.ZRangeByLex(setKey, nil, nil, common.RangeClose, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(mlist))
+ vlist, err = db.ZRangeByScore(setKey, 0, 100, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(vlist))
+ vlist, err = db.ZRevRange(setKey, 0, 100)
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(vlist))
+ vlist, err = db.ZScan(setKey, []byte(""), -1, "", false)
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(vlist))
+
+ n, err = db.ZCard(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.ZCount(setKey, 0, 100)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.ZLexCount(setKey, nil, nil, common.RangeClose)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ // renew
+ mems2 := []common.ScorePair{
+ common.ScorePair{4, []byte("newm4")},
+ common.ScorePair{5, []byte("newm5")},
+ }
+ time.Sleep(time.Second)
+ tn = time.Now().UnixNano()
+ n, err = db.ZAdd(tn, setKey, mems2...)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(mems2)), n)
+
+ n, err = db.ZSetTtl(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+
+ n, err = db.ZCard(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(mems2)), n)
+
+ vlist, err = db.ZRange(setKey, 0, 100)
+ assert.Nil(t, err)
+ assert.Equal(t, len(mems2), len(vlist))
+ assert.Equal(t, mems2[0], vlist[0])
+
+ score, err = db.ZScore(setKey, mems[0].Member)
+ assert.Equal(t, errScoreMiss, err)
+ assert.Equal(t, float64(0), score)
+ n, err = db.ZRank(setKey, mems[0].Member)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+ n, err = db.ZRevRank(setKey, mems[0].Member)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+
+ score, err = db.ZScore(setKey, mems2[0].Member)
+ assert.Nil(t, err)
+ assert.Equal(t, float64(mems2[0].Score), score)
+ n, err = db.ZRank(setKey, mems2[0].Member)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.ZRevRank(setKey, mems2[0].Member)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.ZKeyExists(setKey)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ vlist, err = db.ZScan(setKey, []byte(""), -1, "", false)
+ assert.Nil(t, err)
+ assert.Equal(t, len(mems2), len(vlist))
+ assert.Equal(t, mems2[0], vlist[0])
+}
+
+func TestDBCompactTTL(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ kTypeMap := make(map[string]byte)
+ dataTypes := []byte{KVType, ListType, HashType, SetType, ZSetType, BitmapType}
+
+ for i := 0; i < 10000*3+rand.Intn(10000); i++ {
+ key := "test:ttl_checker_compact:" + strconv.Itoa(i)
+ dataType := dataTypes[rand.Int()%len(dataTypes)]
+ kTypeMap[key] = dataType
+
+ switch dataType {
+ case KVType:
+ db.KVSet(0, []byte(key), []byte("test_checker_local_kvValue"))
+
+ case ListType:
+ tListKey := []byte(key)
+ db.LPush(0, tListKey, []byte("this"), []byte("is"), []byte("list"),
+ []byte("local"), []byte("deletion"), []byte("ttl"), []byte("checker"), []byte("test"))
+
+ case HashType:
+ tHashKey := []byte(key)
+ tHashVal := []common.KVRecord{
+ {Key: []byte("field0"), Value: []byte("value0")},
+ {Key: []byte("field1"), Value: []byte("value1")},
+ {Key: []byte("field2"), Value: []byte("value2")},
+ }
+ db.HMset(0, tHashKey, tHashVal...)
+
+ case SetType:
+ tSetKey := []byte(key)
+ db.SAdd(0, tSetKey, []byte("this"), []byte("is"), []byte("set"),
+ []byte("local"), []byte("deletion"), []byte("ttl"), []byte("checker"), []byte("test"))
+
+ case ZSetType:
+ tZsetKey := []byte(key)
+ members := []common.ScorePair{
+ {Member: []byte("member1"), Score: 11},
+ {Member: []byte("member2"), Score: 22},
+ {Member: []byte("member3"), Score: 33},
+ {Member: []byte("member4"), Score: 44},
+ }
+
+ db.ZAdd(0, tZsetKey, members...)
+ case BitmapType:
+ tBitKey := []byte(key)
+ db.BitSetV2(0, tBitKey, 0, 1)
+ db.BitSetV2(0, tBitKey, 1, 1)
+ db.BitSetV2(0, tBitKey, bitmapSegBits, 1)
+ }
+
+ tn := time.Now().UnixNano()
+ if _, err := db.expire(tn, dataType, []byte(key), nil, 2); err != nil {
+ t.Fatalf("expire key %v , type %v, err: %v", key, dataType, err)
+ }
+ }
+
+ time.Sleep(3 * time.Second)
+
+ for k, v := range kTypeMap {
+ switch v {
+ case KVType:
+ if v, err := db.KVGet([]byte(k)); err != nil {
+ t.Fatal(err)
+ } else if v != nil {
+ t.Errorf("key:%s of KVType do not expired", string(k))
+ }
+ case HashType:
+ if v, err := db.HLen([]byte(k)); err != nil {
+ t.Fatal(err)
+ } else if v != 0 {
+ t.Errorf("key:%s of HashType do not expired", string(k))
+ }
+ case ListType:
+ if v, err := db.LLen([]byte(k)); err != nil {
+ t.Fatal(err)
+ } else if v != 0 {
+ t.Errorf("key:%s of ListType do not expired", string(k))
+ }
+ case SetType:
+ if v, err := db.SCard([]byte(k)); err != nil {
+ t.Fatal(err)
+ } else if v != 0 {
+ t.Errorf("key:%s of SetType do not expired", string(k))
+ }
+ case ZSetType:
+ if v, err := db.ZCard([]byte(k)); err != nil {
+ t.Fatal(err)
+ } else if v != 0 {
+ t.Errorf("key:%s of ZSetType do not expired", string(k))
+ }
+ case BitmapType:
+ if n, err := db.BitCountV2([]byte(k), 0, -1); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Errorf("key:%s of BitmapType do not expired", string(k))
+ }
+ }
+ }
+}
+
+func TestDBTableCounterWithExpired(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+ key := []byte("test-set:compact_ttl_tablecounter_test")
+ key2 := []byte("test-set:compact_ttl_tablecounter_test2")
+ ttl := 2
+
+ tn := time.Now().UnixNano()
+ db.SAdd(tn, key, []byte("hello"), []byte("world"))
+ db.SAdd(tn, key2, []byte("hello"), []byte("world"))
+
+ n, err := db.GetTableKeyCount([]byte("test-set"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
+
+ db.SExpire(tn, key, int64(ttl))
+ // wait expired and renew should keep counter
+ time.Sleep(time.Second * time.Duration(ttl+1))
+
+ tn = time.Now().UnixNano()
+ db.SAdd(tn, key, []byte("hello2"), []byte("world2"))
+
+ n, err = db.GetTableKeyCount([]byte("test-set"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
+
+ key = []byte("test-list:compact_ttl_tablecounter_test")
+ key2 = []byte("test-list:compact_ttl_tablecounter_test2")
+
+ tn = time.Now().UnixNano()
+ db.RPush(tn, key, []byte("hello"), []byte("world"))
+ db.RPush(tn, key2, []byte("hello"), []byte("world"))
+
+ n, err = db.GetTableKeyCount([]byte("test-list"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
+
+ n, err = db.LExpire(tn, key, int64(ttl))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ // wait expired and renew should keep counter
+ time.Sleep(time.Second * time.Duration(ttl+1))
+
+ n, err = db.ListTtl(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+ n, err = db.LLen(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ tn = time.Now().UnixNano()
+ n, err = db.RPush(tn, key, []byte("hello2"), []byte("world2"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
+
+ n, err = db.GetTableKeyCount([]byte("test-list"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
+
+ key = []byte("test-hash:compact_ttl_tablecounter_test")
+ key2 = []byte("test-hash:compact_ttl_tablecounter_test2")
+
+ tn = time.Now().UnixNano()
+ db.HSet(tn, false, key, []byte("hello"), []byte("world"))
+ db.HSet(tn, false, key2, []byte("hello"), []byte("world"))
+
+ n, err = db.GetTableKeyCount([]byte("test-hash"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
+
+ n, err = db.HExpire(tn, key, int64(ttl))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ // wait expired and renew should keep counter
+ time.Sleep(time.Second * time.Duration(ttl+1))
+
+ n, err = db.HashTtl(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+ n, err = db.HLen(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ tn = time.Now().UnixNano()
+ n, err = db.HSet(tn, false, key, []byte("hello2"), []byte("world2"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.GetTableKeyCount([]byte("test-hash"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
+
+ key = []byte("test-zset:compact_ttl_tablecounter_test")
+ key2 = []byte("test-zset:compact_ttl_tablecounter_test2")
+
+ tn = time.Now().UnixNano()
+ db.ZAdd(tn, key, common.ScorePair{1, []byte("hello")})
+ db.ZAdd(tn, key2, common.ScorePair{2, []byte("hello")})
+
+ n, err = db.GetTableKeyCount([]byte("test-zset"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
+
+ n, err = db.ZExpire(tn, key, int64(ttl))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ // wait expired and renew should keep counter
+ time.Sleep(time.Second * time.Duration(ttl+1))
+
+ n, err = db.ZSetTtl(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+ n, err = db.ZCard(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ tn = time.Now().UnixNano()
+ n, err = db.ZAdd(tn, key, common.ScorePair{3, []byte("hello")})
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.GetTableKeyCount([]byte("test-zset"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
+}
+
+func TestDBTableCounterWithKVExpired(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+ key := []byte("test-kv:compact_ttl_tablecounter_test")
+ key2 := []byte("test-kv:compact_ttl_tablecounter_test2")
+ ttl := 2
+
+ tn := time.Now().UnixNano()
+ db.KVSet(tn, key, []byte("hello"))
+ db.KVSet(tn, key2, []byte("hello"))
+
+ n, err := db.GetTableKeyCount([]byte("test-kv"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
+
+ n, err = db.Expire(tn, key, int64(ttl))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ // wait expired and renew should keep counter
+ time.Sleep(time.Second * time.Duration(ttl+1))
+
+ n, err = db.KVTtl(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+ n, err = db.KVExists(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ tn = time.Now().UnixNano()
+ err = db.KVSet(tn, key, []byte("hello2"))
+ assert.Nil(t, err)
+
+ n, err = db.GetTableKeyCount([]byte("test-kv"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
+
+ n, err = db.Expire(tn, key, int64(ttl))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ time.Sleep(time.Second * time.Duration(ttl+1))
+ n, err = db.KVExists(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ tn = time.Now().UnixNano()
+ n, err = db.SetNX(tn, key, []byte("hello2"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.GetTableKeyCount([]byte("test-kv"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
+
+ n, err = db.Expire(tn, key, int64(ttl))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ time.Sleep(time.Second * time.Duration(ttl+1))
+ n, err = db.KVExists(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ tn = time.Now().UnixNano()
+ err = db.SetEx(tn, key, int64(ttl), []byte("hello2"))
+ assert.Nil(t, err)
+
+ n, err = db.GetTableKeyCount([]byte("test-kv"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
+
+ time.Sleep(time.Second * time.Duration(ttl+1))
+ n, err = db.KVExists(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ tn = time.Now().UnixNano()
+ n, err = db.SetRange(tn, key, 10, []byte("hello2"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(16), n)
+
+ n, err = db.GetTableKeyCount([]byte("test-kv"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
+}
diff --git a/rockredis/t_ttl_l.go b/rockredis/t_ttl_l.go
index d82b178d..65d53cec 100644
--- a/rockredis/t_ttl_l.go
+++ b/rockredis/t_ttl_l.go
@@ -6,8 +6,8 @@ import (
"sync/atomic"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/gorocksdb"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/engine"
)
var localExpCheckInterval = 300
@@ -21,57 +21,89 @@ const (
var (
ErrLocalBatchFullToCommit = errors.New("batched is fully filled and should commit right now")
ErrLocalBatchedBuffFull = errors.New("the local batched buffer is fully filled")
+ errTTLCheckTooLong = errors.New("the local ttl scan take too long")
+ errChangeTTLNotSupported = errors.New("change ttl is not supported in current expire policy")
)
type localExpiration struct {
*TTLChecker
- db *RockDB
- stopCh chan struct{}
- wg sync.WaitGroup
- localBuffer *localBatchedBuffer
- running int32
+ db *RockDB
+ stopCh chan struct{}
+ wg sync.WaitGroup
+ running int32
}
func newLocalExpiration(db *RockDB) *localExpiration {
exp := &localExpiration{
- db: db,
- TTLChecker: newTTLChecker(db),
- localBuffer: newLocalBatchedBuffer(db, localBatchedBufSize),
+ db: db,
+ TTLChecker: newTTLChecker(db),
}
return exp
}
-func (exp *localExpiration) expireAt(dataType byte, key []byte, when int64) error {
- wb := exp.db.wb
- wb.Clear()
+func (exp *localExpiration) encodeToVersionKey(dt byte, h *headerMetaValue, key []byte) []byte {
+ return key
+}
- tk := expEncodeTimeKey(dataType, key, when)
- mk := expEncodeMetaKey(dataType, key)
+func (exp *localExpiration) decodeFromVersionKey(dt byte, key []byte) ([]byte, int64, error) {
+ return key, 0, nil
+}
- wb.Put(tk, mk)
+func (exp *localExpiration) encodeToRawValue(dataType byte, h *headerMetaValue) []byte {
+ return h.UserData
+}
- if err := exp.db.eng.Write(exp.db.defaultWriteOpts, wb); err != nil {
- return err
- } else {
- exp.setNextCheckTime(when, false)
- return nil
+func (exp *localExpiration) decodeRawValue(dataType byte, rawValue []byte) (*headerMetaValue, error) {
+ var h headerMetaValue
+ h.UserData = rawValue
+ return &h, nil
+}
+
+func (exp *localExpiration) getRawValueForHeader(ts int64, dataType byte, key []byte) ([]byte, error) {
+ return nil, nil
+}
+
+func (exp *localExpiration) isExpired(ts int64, dataType byte, key []byte, rawValue []byte, useLock bool) (bool, error) {
+ return false, nil
+}
+
+func (exp *localExpiration) ExpireAt(dataType byte, key []byte, rawValue []byte, when int64) (int64, error) {
+ if when == 0 {
+ return 0, errChangeTTLNotSupported
+ }
+ wb := exp.db.wb
+ defer wb.Clear()
+ _, err := exp.rawExpireAt(dataType, key, rawValue, when, wb)
+ if err != nil {
+ return 0, err
+ }
+ if err := exp.db.rockEng.Write(wb); err != nil {
+ return 0, err
}
+ return 1, nil
}
-func (exp *localExpiration) rawExpireAt(dataType byte, key []byte, when int64, wb *gorocksdb.WriteBatch) error {
+func (exp *localExpiration) rawExpireAt(dataType byte, key []byte, rawValue []byte, when int64, wb engine.WriteBatch) ([]byte, error) {
tk := expEncodeTimeKey(dataType, key, when)
mk := expEncodeMetaKey(dataType, key)
wb.Put(tk, mk)
- return nil
+ exp.setNextCheckTime(when, false)
+ return rawValue, nil
}
-func (exp *localExpiration) ttl(byte, []byte) (int64, error) {
+func (exp *localExpiration) ttl(int64, byte, []byte, []byte) (int64, error) {
return -1, nil
}
-func (exp *localExpiration) delExpire(byte, []byte, *gorocksdb.WriteBatch) error {
- return nil
+func (exp *localExpiration) renewOnExpired(ts int64, dataType byte, key []byte, oldh *headerMetaValue) {
+ // local expire should not renew on expired data, since it will be checked by expire handler
+ // and it will clean ttl and all the sub data
+ return
+}
+
+func (exp *localExpiration) delExpire(dt byte, key []byte, rawv []byte, keepV bool, wb engine.WriteBatch) ([]byte, error) {
+ return rawv, nil
}
func (exp *localExpiration) check(buffer common.ExpiredDataBuffer, stop chan struct{}) error {
@@ -97,22 +129,28 @@ func (exp *localExpiration) applyExpiration(stop chan struct{}) {
defer t.Stop()
checker := exp.TTLChecker
-
+ localBuffer := newLocalBatchedBuffer(exp.db, localBatchedBufSize)
+ defer localBuffer.Destroy()
for {
select {
case <-t.C:
for {
- err := checker.check(exp.localBuffer, stop)
+ err := checker.check(localBuffer, stop)
select {
case <-stop:
- exp.localBuffer.Clear()
+ localBuffer.Clear()
return
default:
- exp.localBuffer.commit()
+ localBuffer.commit()
}
//start the next check immediately if the last check is stopped because of the buffer is fully filled
if err == ErrLocalBatchedBuffFull {
+ // avoid 100% cpu
+ time.Sleep(time.Millisecond)
continue
+ } else if err == errTTLCheckTooLong {
+ // do not do manual compact ttl here, it may cause write stall
+ //
} else if err != nil {
dbLog.Errorf("check expired data failed at applying expiration, err:%s", err.Error())
}
@@ -182,7 +220,10 @@ func (self *localBatchedBuffer) commit() {
for _, v := range self.buff {
dt, key, _, err := expDecodeTimeKey(v.timeKey)
if err != nil || dataType2CommonType(dt) == common.NONE {
- dbLog.Errorf("decode time-key failed, bad data encounter, err:%s", err.Error())
+ // currently the bitmap/json type is not supported
+ if err != nil {
+ dbLog.Errorf("decode time-key failed, bad data encounter, err:%s, %v", err, dt)
+ }
continue
}
@@ -214,18 +255,14 @@ func (exp *localExpiration) Stop() {
if atomic.CompareAndSwapInt32(&exp.running, 1, 0) {
close(exp.stopCh)
exp.wg.Wait()
- exp.localBuffer.Clear()
}
}
func (exp *localExpiration) Destroy() {
exp.Stop()
- if exp.localBuffer != nil {
- exp.localBuffer.Destroy()
- }
}
-func createLocalDelFunc(dt common.DataType, db *RockDB, wb *gorocksdb.WriteBatch) func(keys [][]byte) error {
+func createLocalDelFunc(dt common.DataType, db *RockDB, wb engine.WriteBatch) func(keys [][]byte) error {
switch dt {
case common.KV:
return func(keys [][]byte) error {
@@ -233,7 +270,7 @@ func createLocalDelFunc(dt common.DataType, db *RockDB, wb *gorocksdb.WriteBatch
for _, k := range keys {
db.KVDelWithBatch(k, wb)
}
- err := db.eng.Write(db.defaultWriteOpts, wb)
+ err := db.rockEng.Write(wb)
if err != nil {
return err
}
@@ -250,7 +287,7 @@ func createLocalDelFunc(dt common.DataType, db *RockDB, wb *gorocksdb.WriteBatch
return err
}
}
- return db.eng.Write(db.defaultWriteOpts, wb)
+ return db.rockEng.Write(wb)
}
case common.LIST:
return func(keys [][]byte) error {
@@ -258,7 +295,7 @@ func createLocalDelFunc(dt common.DataType, db *RockDB, wb *gorocksdb.WriteBatch
if err := db.lMclearWithBatch(wb, keys...); err != nil {
return err
}
- return db.eng.Write(db.defaultWriteOpts, wb)
+ return db.rockEng.Write(wb)
}
case common.SET:
return func(keys [][]byte) error {
@@ -266,7 +303,7 @@ func createLocalDelFunc(dt common.DataType, db *RockDB, wb *gorocksdb.WriteBatch
if err := db.sMclearWithBatch(wb, keys...); err != nil {
return err
}
- return db.eng.Write(db.defaultWriteOpts, wb)
+ return db.rockEng.Write(wb)
}
case common.ZSET:
return func(keys [][]byte) error {
@@ -274,9 +311,10 @@ func createLocalDelFunc(dt common.DataType, db *RockDB, wb *gorocksdb.WriteBatch
if err := db.zMclearWithBatch(wb, keys...); err != nil {
return err
}
- return db.eng.Write(db.defaultWriteOpts, wb)
+ return db.rockEng.Write(wb)
}
default:
+ // TODO: currently bitmap/json is not handled
return nil
}
}
@@ -284,14 +322,14 @@ func createLocalDelFunc(dt common.DataType, db *RockDB, wb *gorocksdb.WriteBatch
type localBatch struct {
keys [][]byte
dt common.DataType
- wb *gorocksdb.WriteBatch
+ wb engine.WriteBatch
localDelFn func([][]byte) error
}
func newLocalBatch(db *RockDB, dt common.DataType) *localBatch {
batch := &localBatch{
dt: dt,
- wb: gorocksdb.NewWriteBatch(),
+ wb: db.rockEng.NewWriteBatch(),
keys: make([][]byte, 0, localBatchedMaxKeysNum),
}
batch.localDelFn = createLocalDelFunc(dt, db, batch.wb)
diff --git a/rockredis/t_ttl_l_test.go b/rockredis/t_ttl_l_test.go
index a0a6da1f..a8922947 100644
--- a/rockredis/t_ttl_l_test.go
+++ b/rockredis/t_ttl_l_test.go
@@ -8,7 +8,8 @@ import (
"testing"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
+ "github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/common"
)
func TestKVTTL_L(t *testing.T) {
@@ -18,8 +19,9 @@ func TestKVTTL_L(t *testing.T) {
key1 := []byte("test:testdbTTL_kv_l")
var ttl1 int64 = rand.Int63()
+ tn := time.Now().UnixNano()
- if v, err := db.Expire(key1, ttl1); err != nil {
+ if v, err := db.Expire(tn, key1, ttl1); err != nil {
t.Fatal(err)
} else if v != 0 {
t.Fatal("return value from expire of not exist key != 0")
@@ -29,15 +31,13 @@ func TestKVTTL_L(t *testing.T) {
t.Fatal(err)
}
- if v, err := db.Expire(key1, ttl1); err != nil {
+ if v, err := db.Expire(tn, key1, ttl1); err != nil {
t.Fatal(err)
} else if v != 1 {
t.Fatal("return value from expire != 1")
}
- if v, err := db.Persist(key1); err != nil {
- t.Fatal(err)
- } else if v != 0 {
+ if v, _ := db.Persist(tn, key1); v != 0 {
t.Fatal("return value from persist of LocalDeletion Policy != 0")
}
@@ -68,7 +68,8 @@ func TestHashTTL_L(t *testing.T) {
hashKey := []byte("test:testdbTTL_hash_l")
var hashTTL int64 = rand.Int63()
- if v, err := db.HExpire(hashKey, hashTTL); err != nil {
+ tn := time.Now().UnixNano()
+ if v, err := db.HExpire(tn, hashKey, hashTTL); err != nil {
t.Fatal(err)
} else if v != 0 {
t.Fatal("return value from expire of not exist hash key != 0")
@@ -84,7 +85,7 @@ func TestHashTTL_L(t *testing.T) {
t.Fatal(err)
}
- if v, err := db.HExpire(hashKey, hashTTL); err != nil {
+ if v, err := db.HExpire(tn, hashKey, hashTTL); err != nil {
t.Fatal(err)
} else if v != 1 {
t.Fatal("return value from hexpire != 1")
@@ -96,11 +97,45 @@ func TestHashTTL_L(t *testing.T) {
t.Fatal("return value from HashTtl of LocalDeletion Policy != -1")
}
- if v, err := db.HPersist(hashKey); err != nil {
+ v, err := db.HPersist(tn, hashKey)
+ assert.NotNil(t, err)
+ assert.Equal(t, int64(0), v)
+}
+
+func TestBitmapV2TTL_L(t *testing.T) {
+ db := getTestDBWithExpirationPolicy(t, common.LocalDeletion)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ key := []byte("test:testdbTTL_bitmap_l")
+ var ttl int64 = rand.Int63()
+
+ tn := time.Now().UnixNano()
+ if v, err := db.BitExpire(tn, key, ttl); err != nil {
t.Fatal(err)
} else if v != 0 {
- t.Fatal("return value from HPersist of LocalDeletion Policy != 0")
+ t.Fatal("return value from expire of not exist key != 0")
+ }
+
+ if _, err := db.BitSetV2(0, key, 1, 1); err != nil {
+ t.Fatal(err)
+ }
+
+ if v, err := db.BitExpire(tn, key, ttl); err != nil {
+ t.Fatal(err)
+ } else if v != 1 {
+ t.Fatal("return value from expire != 1")
}
+
+ if v, err := db.BitTtl(key); err != nil {
+ t.Fatal(err)
+ } else if v != -1 {
+ t.Fatal("return value from BitTtl of LocalDeletion Policy != -1")
+ }
+
+ v, err := db.BitPersist(tn, key)
+ assert.NotNil(t, err)
+ assert.Equal(t, int64(0), v)
}
func TestListTTL_L(t *testing.T) {
@@ -111,7 +146,8 @@ func TestListTTL_L(t *testing.T) {
listKey := []byte("test:testdbTTL_list_l")
var listTTL int64 = rand.Int63()
- if v, err := db.LExpire(listKey, listTTL); err != nil {
+ tn := time.Now().UnixNano()
+ if v, err := db.LExpire(tn, listKey, listTTL); err != nil {
t.Fatal(err)
} else if v != 0 {
t.Fatal("return value from expire of not exist list key != 0")
@@ -122,7 +158,7 @@ func TestListTTL_L(t *testing.T) {
t.Fatal(err)
}
- if v, err := db.LExpire(listKey, listTTL); err != nil {
+ if v, err := db.LExpire(tn, listKey, listTTL); err != nil {
t.Fatal(err)
} else if v != 1 {
t.Fatal("return value from lexpire != 1")
@@ -134,11 +170,9 @@ func TestListTTL_L(t *testing.T) {
t.Fatal("return value from ListTtl of LocalDeletion Policy != -1")
}
- if v, err := db.LPersist(listKey); err != nil {
- t.Fatal(err)
- } else if v != 0 {
- t.Fatal("return value from LPersist of LocalDeletion Policy != 0")
- }
+ v, err := db.LPersist(tn, listKey)
+ assert.NotNil(t, err)
+ assert.Equal(t, int64(0), v)
}
func TestSetTTL_L(t *testing.T) {
@@ -149,7 +183,8 @@ func TestSetTTL_L(t *testing.T) {
setKey := []byte("test:testdbTTL_set_l")
var setTTL int64 = rand.Int63()
- if v, err := db.SExpire(setKey, setTTL); err != nil {
+ tn := time.Now().UnixNano()
+ if v, err := db.SExpire(tn, setKey, setTTL); err != nil {
t.Fatal(err)
} else if v != 0 {
t.Fatal("return value from expire of not exist set key != 0")
@@ -160,7 +195,7 @@ func TestSetTTL_L(t *testing.T) {
t.Fatal(err)
}
- if v, err := db.SExpire(setKey, setTTL); err != nil {
+ if v, err := db.SExpire(tn, setKey, setTTL); err != nil {
t.Fatal(err)
} else if v != 1 {
t.Fatal("return value from sexpire != 1")
@@ -172,11 +207,9 @@ func TestSetTTL_L(t *testing.T) {
t.Fatal("return value from SetTtl of LocalDeletion Policy != -1")
}
- if v, err := db.SPersist(setKey); err != nil {
- t.Fatal(err)
- } else if v != 0 {
- t.Fatal("return value from SPersist of LocalDeletion Policy != 0")
- }
+ v, err := db.SPersist(tn, setKey)
+ assert.NotNil(t, err)
+ assert.Equal(t, int64(0), v)
}
func TestZSetTTL_L(t *testing.T) {
@@ -187,7 +220,8 @@ func TestZSetTTL_L(t *testing.T) {
zsetKey := []byte("test:testdbTTL_zset_l")
var zsetTTL int64 = rand.Int63()
- if v, err := db.ZExpire(zsetKey, zsetTTL); err != nil {
+ tn := time.Now().UnixNano()
+ if v, err := db.ZExpire(tn, zsetKey, zsetTTL); err != nil {
t.Fatal(err)
} else if v != 0 {
t.Fatal("return value from expire of not exist zset key != 0")
@@ -204,7 +238,7 @@ func TestZSetTTL_L(t *testing.T) {
t.Fatal(err)
}
- if v, err := db.ZExpire(zsetKey, zsetTTL); err != nil {
+ if v, err := db.ZExpire(tn, zsetKey, zsetTTL); err != nil {
t.Fatal(err)
} else if v != 1 {
t.Fatal("return value from zexpire != 1")
@@ -216,17 +250,15 @@ func TestZSetTTL_L(t *testing.T) {
t.Fatal("return value from ZSetTtl of LocalDeletion Policy != -1")
}
- if v, err := db.ZPersist(zsetKey); err != nil {
- t.Fatal(err)
- } else if v != 0 {
- t.Fatal("return value from ZPersist of LocalDeletion Policy != 0")
- }
-
+ v, err := db.ZPersist(tn, zsetKey)
+ assert.NotNil(t, err)
+ assert.Equal(t, int64(0), v)
}
func TestLocalDeletionTTLChecker(t *testing.T) {
oldCheckInterval := localExpCheckInterval
localExpCheckInterval = 5
+
defer func() {
localExpCheckInterval = oldCheckInterval
}()
@@ -237,7 +269,7 @@ func TestLocalDeletionTTLChecker(t *testing.T) {
kTypeMap := make(map[string]byte)
- dataTypes := []byte{KVType, ListType, HashType, SetType, ZSetType}
+ dataTypes := []byte{KVType, ListType, HashType, SetType, ZSetType, BitmapType}
for i := 0; i < 1000*3+rand.Intn(1000); i++ {
key := "test:ttl_checker_local:" + strconv.Itoa(i)
@@ -245,15 +277,15 @@ func TestLocalDeletionTTLChecker(t *testing.T) {
kTypeMap[key] = dataType
switch dataType {
case KVType:
- db.KVSet(0, []byte("test_checker_local_kvKey"), []byte("test_checker_local_kvValue"))
+ db.KVSet(0, []byte(key), []byte("test_checker_local_kvValue"))
case ListType:
- tListKey := []byte("test_checker_local_listKey")
+ tListKey := []byte(key)
db.LPush(0, tListKey, []byte("this"), []byte("is"), []byte("list"),
[]byte("local"), []byte("deletion"), []byte("ttl"), []byte("checker"), []byte("test"))
case HashType:
- tHashKey := []byte("test_checker_local_hashKey")
+ tHashKey := []byte(key)
tHashVal := []common.KVRecord{
{Key: []byte("field0"), Value: []byte("value0")},
{Key: []byte("field1"), Value: []byte("value1")},
@@ -262,12 +294,12 @@ func TestLocalDeletionTTLChecker(t *testing.T) {
db.HMset(0, tHashKey, tHashVal...)
case SetType:
- tSetKey := []byte("test_checker_local_setKey")
+ tSetKey := []byte(key)
db.SAdd(0, tSetKey, []byte("this"), []byte("is"), []byte("set"),
[]byte("local"), []byte("deletion"), []byte("ttl"), []byte("checker"), []byte("test"))
case ZSetType:
- tZsetKey := []byte("test_checker_local_zsetKey")
+ tZsetKey := []byte(key)
members := []common.ScorePair{
{Member: []byte("member1"), Score: 11},
{Member: []byte("member2"), Score: 22},
@@ -276,9 +308,15 @@ func TestLocalDeletionTTLChecker(t *testing.T) {
}
db.ZAdd(0, tZsetKey, members...)
+ case BitmapType:
+ tBitKey := []byte(key)
+ db.BitSetV2(0, tBitKey, 0, 1)
+ db.BitSetV2(0, tBitKey, 1, 1)
+ db.BitSetV2(0, tBitKey, bitmapSegBits, 1)
}
- if err := db.expire(dataType, []byte(key), 8); err != nil {
+ tn := time.Now().UnixNano()
+ if _, err := db.expire(tn, dataType, []byte(key), nil, 1); err != nil {
t.Fatal(err)
}
}
@@ -291,31 +329,37 @@ func TestLocalDeletionTTLChecker(t *testing.T) {
if v, err := db.KVGet([]byte(k)); err != nil {
t.Fatal(err)
} else if v != nil {
- t.Fatalf("key:%s of KVType do not expired", string(k))
+ t.Errorf("key:%s of KVType do not expired", string(k))
}
case HashType:
if v, err := db.HLen([]byte(k)); err != nil {
t.Fatal(err)
} else if v != 0 {
- t.Fatalf("key:%s of HashType do not expired", string(k))
+ t.Errorf("key:%s of HashType do not expired", string(k))
}
case ListType:
if v, err := db.LLen([]byte(k)); err != nil {
t.Fatal(err)
} else if v != 0 {
- t.Fatalf("key:%s of ListType do not expired", string(k))
+ t.Errorf("key:%s of ListType do not expired", string(k))
}
case SetType:
if v, err := db.SCard([]byte(k)); err != nil {
t.Fatal(err)
} else if v != 0 {
- t.Fatalf("key:%s of SetType do not expired", string(k))
+ t.Errorf("key:%s of SetType do not expired", string(k))
}
case ZSetType:
if v, err := db.ZCard([]byte(k)); err != nil {
t.Fatal(err)
} else if v != 0 {
- t.Fatalf("key:%s of ZSetType do not expired", string(k))
+ t.Errorf("key:%s of ZSetType do not expired", string(k))
+ }
+ case BitmapType:
+ if n, err := db.BitCountV2([]byte(k), 0, -1); err != nil {
+ t.Fatal(err)
+ } else if n == 0 {
+ t.Errorf("key:%s of BitmapType should not expired", string(k))
}
}
}
diff --git a/rockredis/t_ttl_test.go b/rockredis/t_ttl_test.go
index 73bb5cbc..05bd8a1a 100644
--- a/rockredis/t_ttl_test.go
+++ b/rockredis/t_ttl_test.go
@@ -7,11 +7,30 @@ import (
"testing"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/common"
)
+func getTestDBWithCompactTTL(t *testing.T) *RockDB {
+ cfg := NewRockRedisDBConfig()
+ cfg.ExpirationPolicy = common.WaitCompact
+ cfg.EnableTableCounter = true
+ cfg.DataVersion = common.ValueHeaderV1
+
+ var err error
+ cfg.DataDir, err = ioutil.TempDir("", fmt.Sprintf("rockredis-test-%d", time.Now().UnixNano()))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testDB, err := OpenRockDB(cfg)
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+ return testDB
+}
+
func getTestDBWithExpirationPolicy(t *testing.T, ePolicy common.ExpirationPolicy) *RockDB {
- cfg := NewRockConfig()
+ cfg := NewRockRedisDBConfig()
cfg.ExpirationPolicy = ePolicy
cfg.EnableTableCounter = true
diff --git a/rockredis/t_zset.go b/rockredis/t_zset.go
index ce8f6c65..af44dfc1 100644
--- a/rockredis/t_zset.go
+++ b/rockredis/t_zset.go
@@ -4,9 +4,13 @@ import (
"bytes"
"encoding/binary"
"errors"
+ "time"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/gorocksdb"
+ ps "github.com/prometheus/client_golang/prometheus"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/engine"
+ "github.com/youzan/ZanRedisDB/metric"
+ "github.com/youzan/ZanRedisDB/slow"
)
const (
@@ -35,13 +39,11 @@ const (
zsetMemSep byte = ':'
)
-func checkZSetKMSize(key []byte, member []byte) error {
- if len(key) > MaxKeySize || len(key) == 0 {
- return errKeySize
- } else if len(member) > MaxZSetMemberSize {
- return errZSetMemberSize
+func IsMemberNotExist(err error) bool {
+ if err == errScoreMiss {
+ return true
}
- return nil
+ return false
}
func zEncodeSizeKey(key []byte) []byte {
@@ -65,70 +67,18 @@ func zDecodeSizeKey(ek []byte) ([]byte, error) {
return ek[pos:], nil
}
-func convertRedisKeyToDBZSetKey(key []byte, member []byte) ([]byte, error) {
- table, rk, err := extractTableFromRedisKey(key)
- if err != nil {
- return nil, err
- }
- if err := checkZSetKMSize(rk, member); err != nil {
- return nil, err
- }
- return zEncodeSetKey(table, rk, member), nil
-}
-
-func convertRedisKeyToDBZScoreKey(key []byte, member []byte, score float64) ([]byte, error) {
- table, rk, err := extractTableFromRedisKey(key)
- if err != nil {
- return nil, err
- }
- if err := checkZSetKMSize(rk, member); err != nil {
- return nil, err
- }
- return zEncodeScoreKey(false, false, table, rk, member, score), nil
-}
-
func zEncodeSetKey(table []byte, key []byte, member []byte) []byte {
- buf := make([]byte, getDataTablePrefixBufLen(ZSetType, table)+len(key)+len(member)+3)
- pos := 0
- pos = encodeDataTablePrefixToBuf(buf, ZSetType, table)
-
- binary.BigEndian.PutUint16(buf[pos:], uint16(len(key)))
- pos += 2
-
- copy(buf[pos:], key)
- pos += len(key)
-
- buf[pos] = zsetMemSep
- pos++
-
- copy(buf[pos:], member)
- return buf
+ return encodeCollSubKey(ZSetType, table, key, member)
}
func zDecodeSetKey(ek []byte) ([]byte, []byte, []byte, error) {
- table, pos, err := decodeDataTablePrefixFromBuf(ek, ZSetType)
+ dt, table, key, member, err := decodeCollSubKey(ek)
if err != nil {
return nil, nil, nil, err
}
-
- if pos+2 > len(ek) {
- return table, nil, nil, errZSetKey
- }
-
- keyLen := int(binary.BigEndian.Uint16(ek[pos:]))
- if keyLen+pos > len(ek) {
- return table, nil, nil, errZSetKey
+ if dt != ZSetType {
+ return table, key, member, errCollTypeMismatch
}
-
- pos += 2
- key := ek[pos : pos+keyLen]
-
- if ek[pos+keyLen] != zsetMemSep {
- return table, nil, nil, errZSetKey
- }
- pos++
-
- member := ek[pos+keyLen:]
return table, key, member, nil
}
@@ -222,24 +172,90 @@ func zDecodeScoreKey(ek []byte) (table []byte, key []byte, member []byte, score
return
}
-func (db *RockDB) zSetItem(key []byte, score float64, member []byte, wb *gorocksdb.WriteBatch) (int64, error) {
+func parseZMeta(meta []byte) (int64, int64, error) {
+ num, err := parseZMetaSize(meta)
+ if err != nil {
+ return 0, 0, err
+ }
+ if len(meta) < 16 {
+ return num, 0, nil
+ }
+ ver, err := Int64(meta[8:16], nil)
+ return num, ver, err
+}
+
+func parseZMetaSize(meta []byte) (int64, error) {
+ if len(meta) == 0 {
+ return 0, nil
+ }
+ if len(meta) < 8 {
+ return 0, errIntNumber
+ }
+ num, err := Int64(meta[:8], nil)
+ if err != nil {
+ return 0, err
+ }
+ return num, nil
+}
+
+func encodeZMetaData(size int64, ts int64, oldh *headerMetaValue) []byte {
+ buf := make([]byte, 16)
+ binary.BigEndian.PutUint64(buf[0:8], uint64(size))
+ binary.BigEndian.PutUint64(buf[8:16], uint64(ts))
+ oldh.UserData = buf
+ nv := oldh.encodeWithData()
+ return nv
+}
+
+func (db *RockDB) getZSetForRangeWithMinMax(ts int64, key []byte, min []byte, max []byte, useLock bool) (collVerKeyInfo, error) {
+ info, err := db.GetCollVersionKey(ts, ZSetType, key, useLock)
+ if err != nil {
+ return info, err
+ }
+ rk := info.VerKey
+ table := info.Table
+ if min == nil {
+ info.RangeStart = zEncodeStartSetKey(table, rk)
+ } else {
+ info.RangeStart = zEncodeSetKey(table, rk, min)
+ }
+ if max == nil {
+ info.RangeEnd = zEncodeStopSetKey(table, rk)
+ } else {
+ info.RangeEnd = zEncodeSetKey(table, rk, max)
+ }
+ return info, err
+}
+
+func (db *RockDB) getZSetForRangeWithNum(ts int64, key []byte, min float64, max float64, useLock bool) (collVerKeyInfo, error) {
+ info, err := db.GetCollVersionKey(ts, ZSetType, key, useLock)
+ if err != nil {
+ return info, err
+ }
+ info.RangeStart = zEncodeStartScoreKey(info.Table, info.VerKey, min)
+ info.RangeEnd = zEncodeStopScoreKey(info.Table, info.VerKey, max)
+ return info, err
+}
+
+func (db *RockDB) zSetItem(table []byte, rk []byte, score float64, member []byte, wb engine.WriteBatch) (int64, error) {
// if score <= MinScore || score >= MaxScore {
// return 0, errScoreOverflow
// }
var exists int64
- ek, err := convertRedisKeyToDBZSetKey(key, member)
- if err != nil {
- return 0, err
- }
+ ek := zEncodeSetKey(table, rk, member)
- if v, err := db.eng.GetBytesNoLock(db.defaultReadOpts, ek); err != nil {
+ if v, err := db.GetBytesNoLock(ek); err != nil {
return 0, err
} else if v != nil {
exists = 1
if s, err := Float64(v, err); err != nil {
return 0, err
} else {
- sk, err := convertRedisKeyToDBZScoreKey(key, member, s)
+ if s == score {
+ return exists, nil
+ }
+ // delete old score key
+ sk := zEncodeScoreKey(false, false, table, rk, member, s)
if err != nil {
return 0, err
}
@@ -249,21 +265,15 @@ func (db *RockDB) zSetItem(key []byte, score float64, member []byte, wb *gorocks
wb.Put(ek, PutFloat64(score))
- sk, err := convertRedisKeyToDBZScoreKey(key, member, score)
- if err != nil {
- return 0, err
- }
+ sk := zEncodeScoreKey(false, false, table, rk, member, score)
wb.Put(sk, []byte{})
return exists, nil
}
-func (db *RockDB) zDelItem(key []byte, member []byte,
- wb *gorocksdb.WriteBatch) (int64, error) {
- ek, err := convertRedisKeyToDBZSetKey(key, member)
- if err != nil {
- return 0, err
- }
- if v, err := db.eng.GetBytesNoLock(db.defaultReadOpts, ek); err != nil {
+func (db *RockDB) zDelItem(table, rk, member []byte,
+ wb engine.WriteBatch) (int64, error) {
+ ek := zEncodeSetKey(table, rk, member)
+ if v, err := db.GetBytesNoLock(ek); err != nil {
return 0, err
} else if v == nil {
//not exists
@@ -274,10 +284,7 @@ func (db *RockDB) zDelItem(key []byte, member []byte,
if s, err := Float64(v, err); err != nil {
return 0, err
} else {
- sk, err := convertRedisKeyToDBZScoreKey(key, member, s)
- if err != nil {
- return 0, err
- }
+ sk := zEncodeScoreKey(false, false, table, rk, member, s)
wb.Delete(sk)
}
}
@@ -289,26 +296,27 @@ func (db *RockDB) ZAdd(ts int64, key []byte, args ...common.ScorePair) (int64, e
if len(args) == 0 {
return 0, nil
}
- if len(args) >= MAX_BATCH_NUM {
+ if len(args) > MAX_BATCH_NUM {
return 0, errTooMuchBatchSize
}
- table, _, err := extractTableFromRedisKey(key)
+ keyInfo, err := db.prepareCollKeyForWrite(ts, ZSetType, key, nil)
if err != nil {
return 0, err
}
+ table := keyInfo.Table
wb := db.wb
- wb.Clear()
+ defer wb.Clear()
var num int64
for i := 0; i < len(args); i++ {
score := args[i].Score
member := args[i].Member
- if err := checkZSetKMSize(key, member); err != nil {
+ if err := common.CheckKeySubKey(key, member); err != nil {
return 0, err
}
- if n, err := db.zSetItem(key, score, member, wb); err != nil {
+ if n, err := db.zSetItem(table, keyInfo.VerKey, score, member, wb); err != nil {
return 0, err
} else if n == 0 {
//add new
@@ -316,18 +324,26 @@ func (db *RockDB) ZAdd(ts int64, key []byte, args ...common.ScorePair) (int64, e
}
}
- if newNum, err := db.zIncrSize(ts, key, num, wb); err != nil {
+ newNum, err := db.zIncrSize(ts, key, keyInfo.OldHeader, num, wb)
+ if err != nil {
return 0, err
- } else if newNum > 0 && newNum == num {
+ } else if newNum > 0 && newNum == num && !keyInfo.Expired {
db.IncrTableKeyCount(table, 1, wb)
}
+ db.topLargeCollKeys.Update(key, int(newNum))
- err = db.eng.Write(db.defaultWriteOpts, wb)
+ slow.LogLargeCollection(int(newNum), slow.NewSlowLogInfo(string(table), string(key), "zset"))
+ if newNum > collectionLengthForMetric {
+ metric.CollectionLenDist.With(ps.Labels{
+ "table": string(table),
+ }).Observe(float64(newNum))
+ }
+ err = db.rockEng.Write(wb)
return num, err
}
func (db *RockDB) ZFixKey(ts int64, key []byte) error {
- n, err := db.ZCard(key)
+ oldh, n, err := db.zGetSize(ts, key, false)
if err != nil {
dbLog.Infof("get zset card failed: %v", err.Error())
return err
@@ -339,9 +355,8 @@ func (db *RockDB) ZFixKey(ts int64, key []byte) error {
}
if len(elems) != int(n) {
dbLog.Infof("unmatched length : %v, %v, detail: %v", n, len(elems), elems)
- db.wb.Clear()
- db.zSetSize(ts, key, int64(len(elems)), db.wb)
- err = db.eng.Write(db.defaultWriteOpts, db.wb)
+ db.zSetSize(ts, key, oldh, int64(len(elems)), db.wb)
+ err = db.CommitBatchWrite()
if err != nil {
return err
}
@@ -350,108 +365,87 @@ func (db *RockDB) ZFixKey(ts int64, key []byte) error {
}
// note: we should not batch incrsize, because we read the old and put the new.
-func (db *RockDB) zIncrSize(ts int64, key []byte, delta int64, wb *gorocksdb.WriteBatch) (int64, error) {
- sk := zEncodeSizeKey(key)
-
- var size int64
- meta, err := db.eng.GetBytesNoLock(db.defaultReadOpts, sk)
+func (db *RockDB) zIncrSize(ts int64, key []byte, oldh *headerMetaValue, delta int64, wb engine.WriteBatch) (int64, error) {
+ meta := oldh.UserData
+ size, err := parseZMetaSize(meta)
if err != nil {
return 0, err
}
- if len(meta) == 0 {
- size = 0
- } else if len(meta) < 8 {
- return 0, errIntNumber
- } else {
- if size, err = Int64(meta[:8], err); err != nil {
- return 0, err
- }
- }
size += delta
+ sk := zEncodeSizeKey(key)
if size <= 0 {
size = 0
wb.Delete(sk)
} else {
- buf := make([]byte, 16)
- binary.BigEndian.PutUint64(buf[0:8], uint64(size))
- binary.BigEndian.PutUint64(buf[8:16], uint64(ts))
- wb.Put(sk, buf)
+ wb.Put(sk, encodeZMetaData(size, ts, oldh))
}
return size, nil
}
-func (db *RockDB) zGetSize(key []byte) (int64, error) {
- sk := zEncodeSizeKey(key)
- meta, err := db.eng.GetBytesNoLock(db.defaultReadOpts, sk)
+func (db *RockDB) zGetSize(tn int64, key []byte, useLock bool) (*headerMetaValue, int64, error) {
+ oldh, expired, err := db.collHeaderMeta(tn, ZSetType, key, useLock)
if err != nil {
- return 0, err
- }
- if len(meta) == 0 {
- return 0, nil
+ return oldh, 0, err
}
- if len(meta) < 8 {
- return 0, errIntNumber
+ if expired {
+ return oldh, 0, nil
}
- return Int64(meta[:8], err)
+ s, err := parseZMetaSize(oldh.UserData)
+ return oldh, s, err
}
func (db *RockDB) zGetVer(key []byte) (int64, error) {
- sk := zEncodeSizeKey(key)
- meta, err := db.eng.GetBytesNoLock(db.defaultReadOpts, sk)
+ oldh, _, err := db.collHeaderMeta(time.Now().UnixNano(), ZSetType, key, true)
if err != nil {
return 0, err
}
- if len(meta) == 0 {
- return 0, nil
- }
- if len(meta) < 16 {
- return 0, errIntNumber
- }
- return Int64(meta[8:16], err)
+ _, ver, err := parseZMeta(oldh.UserData)
+ return ver, err
}
-func (db *RockDB) zSetSize(ts int64, key []byte, newSize int64, wb *gorocksdb.WriteBatch) {
+func (db *RockDB) zSetSize(ts int64, key []byte, oldh *headerMetaValue, newSize int64, wb engine.WriteBatch) {
sk := zEncodeSizeKey(key)
if newSize <= 0 {
wb.Delete(sk)
} else {
- buf := make([]byte, 16)
- binary.BigEndian.PutUint64(buf[0:8], uint64(newSize))
- binary.BigEndian.PutUint64(buf[8:16], uint64(ts))
- wb.Put(sk, buf)
+ wb.Put(sk, encodeZMetaData(newSize, ts, oldh))
}
}
func (db *RockDB) ZGetVer(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return 0, err
- }
return db.zGetVer(key)
}
func (db *RockDB) ZCard(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return 0, err
- }
-
- return db.zGetSize(key)
+ ts := time.Now().UnixNano()
+ _, s, err := db.zGetSize(ts, key, true)
+ return s, err
}
func (db *RockDB) ZScore(key []byte, member []byte) (float64, error) {
+ ts := time.Now().UnixNano()
+ keyInfo, err := db.GetCollVersionKey(ts, ZSetType, key, true)
var score float64
-
- k, err := convertRedisKeyToDBZSetKey(key, member)
if err != nil {
return score, err
}
- if v, err := db.eng.GetBytes(db.defaultReadOpts, k); err != nil {
+ if keyInfo.IsNotExistOrExpired() {
+ return score, errScoreMiss
+ }
+ k := zEncodeSetKey(keyInfo.Table, keyInfo.VerKey, member)
+ refv, err := db.rockEng.GetRef(k)
+ if err != nil {
return score, err
- } else if v == nil {
+ }
+ if refv == nil {
return score, errScoreMiss
- } else {
- if score, err = Float64(v, nil); err != nil {
- return score, err
- }
+ }
+ defer refv.Free()
+ if refv.Data() == nil {
+ return score, errScoreMiss
+ }
+ if score, err = Float64(refv.Data(), nil); err != nil {
+ return score, err
}
return score, nil
}
@@ -460,66 +454,75 @@ func (db *RockDB) ZRem(ts int64, key []byte, members ...[]byte) (int64, error) {
if len(members) == 0 {
return 0, nil
}
- if len(members) >= MAX_BATCH_NUM {
+ if len(members) > MAX_BATCH_NUM {
return 0, errTooMuchBatchSize
}
- table, _, err := extractTableFromRedisKey(key)
+ keyInfo, err := db.GetCollVersionKey(ts, ZSetType, key, false)
if err != nil {
return 0, err
}
+ table := keyInfo.Table
wb := db.wb
- wb.Clear()
+ defer wb.Clear()
var num int64 = 0
for i := 0; i < len(members); i++ {
- if err := checkZSetKMSize(key, members[i]); err != nil {
+ if err := common.CheckKeySubKey(key, members[i]); err != nil {
return 0, err
}
- if n, err := db.zDelItem(key, members[i], wb); err != nil {
+ if n, err := db.zDelItem(table, keyInfo.VerKey, members[i], wb); err != nil {
return 0, err
} else if n == 1 {
num++
}
}
- if newNum, err := db.zIncrSize(ts, key, -num, wb); err != nil {
+ newNum, err := db.zIncrSize(ts, key, keyInfo.OldHeader, -num, wb)
+ if err != nil {
return 0, err
} else if num > 0 && newNum == 0 {
db.IncrTableKeyCount(table, -1, wb)
- db.delExpire(ZSetType, key, wb)
}
+ if newNum == 0 {
+ db.delExpire(ZSetType, key, nil, false, wb)
+ }
+ db.topLargeCollKeys.Update(key, int(newNum))
- err = db.eng.Write(db.defaultWriteOpts, wb)
+ err = db.rockEng.Write(wb)
return num, err
}
func (db *RockDB) ZIncrBy(ts int64, key []byte, delta float64, member []byte) (float64, error) {
var score float64
- if err := checkZSetKMSize(key, member); err != nil {
+ if err := common.CheckKeySubKey(key, member); err != nil {
return score, err
}
- table, rk, err := extractTableFromRedisKey(key)
+
+ keyInfo, err := db.prepareCollKeyForWrite(ts, ZSetType, key, nil)
if err != nil {
return score, err
}
+ table := keyInfo.Table
+ rk := keyInfo.VerKey
wb := db.wb
- wb.Clear()
+ defer wb.Clear()
ek := zEncodeSetKey(table, rk, member)
var oldScore float64
- v, err := db.eng.GetBytesNoLock(db.defaultReadOpts, ek)
+ v, err := db.GetBytesNoLock(ek)
if err != nil {
return score, err
} else if v == nil {
- newNum, err := db.zIncrSize(ts, key, 1, wb)
+ newNum, err := db.zIncrSize(ts, key, keyInfo.OldHeader, 1, wb)
if err != nil {
return score, err
- } else if newNum == 1 {
+ } else if newNum == 1 && !keyInfo.Expired {
db.IncrTableKeyCount(table, 1, wb)
}
+ db.topLargeCollKeys.Update(key, int(newNum))
} else {
if oldScore, err = Float64(v, err); err != nil {
return score, err
@@ -538,7 +541,7 @@ func (db *RockDB) ZIncrBy(ts int64, key []byte, delta float64, member []byte) (f
wb.Delete(oldSk)
}
- err = db.eng.Write(db.defaultWriteOpts, wb)
+ err = db.rockEng.Write(wb)
return score, err
}
@@ -546,13 +549,17 @@ func (db *RockDB) ZCount(key []byte, min float64, max float64) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
- table, rk, err := extractTableFromRedisKey(key)
+ tn := time.Now().UnixNano()
+ keyInfo, err := db.getZSetForRangeWithNum(tn, key, min, max, true)
if err != nil {
return 0, err
}
- minKey := zEncodeStartScoreKey(table, rk, min)
- maxKey := zEncodeStopScoreKey(table, rk, max)
- it, err := NewDBRangeIterator(db.eng, minKey, maxKey, common.RangeClose, false)
+ if keyInfo.IsNotExistOrExpired() {
+ return 0, nil
+ }
+ minKey := keyInfo.RangeStart
+ maxKey := keyInfo.RangeEnd
+ it, err := db.NewDBRangeIterator(minKey, maxKey, common.RangeClose, false)
if err != nil {
return 0, err
}
@@ -567,17 +574,24 @@ func (db *RockDB) ZCount(key []byte, min float64, max float64) (int64, error) {
}
func (db *RockDB) zrank(key []byte, member []byte, reverse bool) (int64, error) {
- if err := checkZSetKMSize(key, member); err != nil {
+ if err := common.CheckKeySubKey(key, member); err != nil {
return 0, err
}
- table, rk, err := extractTableFromRedisKey(key)
+ tn := time.Now().UnixNano()
+ keyInfo, err := db.GetCollVersionKey(tn, ZSetType, key, true)
if err != nil {
return 0, err
}
+ if keyInfo.IsNotExistOrExpired() {
+ return -1, nil
+ }
+ table := keyInfo.Table
+ rk := keyInfo.VerKey
+
k := zEncodeSetKey(table, rk, member)
- v, _ := db.eng.GetBytes(db.defaultReadOpts, k)
+ v, _ := db.GetBytes(k)
if v == nil {
return -1, nil
} else {
@@ -585,16 +599,16 @@ func (db *RockDB) zrank(key []byte, member []byte, reverse bool) (int64, error)
return 0, err
} else {
sk := zEncodeScoreKey(false, false, table, rk, member, s)
- var rit *RangeLimitedIterator
+ var rit *engine.RangeLimitedIterator
if !reverse {
minKey := zEncodeStartKey(table, rk)
- rit, err = NewDBRangeIterator(db.eng, minKey, sk, common.RangeClose, reverse)
+ rit, err = db.NewDBRangeIterator(minKey, sk, common.RangeClose, reverse)
if err != nil {
return 0, err
}
} else {
maxKey := zEncodeStopKey(table, rk)
- rit, err = NewDBRangeIterator(db.eng, sk, maxKey, common.RangeClose, reverse)
+ rit, err = db.NewDBRangeIterator(sk, maxKey, common.RangeClose, reverse)
if err != nil {
return 0, err
}
@@ -605,7 +619,7 @@ func (db *RockDB) zrank(key []byte, member []byte, reverse bool) (int64, error)
var n int64 = 0
for ; rit.Valid(); rit.Next() {
- rawk := rit.RefKey()
+ rawk := rit.Key()
n++
lastKey = lastKey[0:0]
lastKey = append(lastKey, rawk...)
@@ -622,18 +636,37 @@ func (db *RockDB) zrank(key []byte, member []byte, reverse bool) (int64, error)
return -1, nil
}
-func (db *RockDB) zRemAll(ts int64, key []byte, wb *gorocksdb.WriteBatch) (int64, error) {
- num, err := db.ZCard(key)
+func (db *RockDB) zRemAll(ts int64, key []byte, wb engine.WriteBatch) (int64, error) {
+ keyInfo, err := db.getCollVerKeyForRange(ts, ZSetType, key, false)
if err != nil {
return 0, err
}
- table, rk, err := extractTableFromRedisKey(key)
+ table := keyInfo.Table
+ rk := keyInfo.VerKey
+ num, err := parseZMetaSize(keyInfo.OldHeader.UserData)
if err != nil {
return 0, err
}
+ if num == 0 {
+ return 0, nil
+ }
+ // no need delete if expired
+ if keyInfo.IsNotExistOrExpired() {
+ return 0, nil
+ }
+ db.topLargeCollKeys.Update(key, int(0))
+ if db.cfg.ExpirationPolicy == common.WaitCompact {
+ // for compact ttl , we can just delete the meta
+ sk := zEncodeSizeKey(key)
+ wb.Delete(sk)
+ if num > 0 {
+ db.IncrTableKeyCount(table, -1, wb)
+ }
+ return num, nil
+ }
- minKey := zEncodeStartKey(table, rk)
- maxKey := zEncodeStopKey(table, rk)
+ minKey := keyInfo.RangeStart
+ maxKey := keyInfo.RangeEnd
if num > RangeDeleteNum {
sk := zEncodeSizeKey(key)
wb.DeleteRange(minKey, maxKey)
@@ -643,37 +676,51 @@ func (db *RockDB) zRemAll(ts int64, key []byte, wb *gorocksdb.WriteBatch) (int64
wb.DeleteRange(minSetKey, maxSetKey)
if num > 0 {
db.IncrTableKeyCount(table, -1, wb)
- db.delExpire(ZSetType, key, wb)
}
+ db.delExpire(ZSetType, key, nil, false, wb)
wb.Delete(sk)
} else {
- rmCnt, err := db.zRemRangeBytes(ts, key, minKey, maxKey, 0, -1, wb)
+ // remove all scan can ignore deleted to speed up scan.
+ // update: no ignore deleted is needed, and it may costly if too much deleted
+ rmCnt, err := db.zRemRangeBytes(ts, key, keyInfo, 0, -1, wb)
return rmCnt, err
}
return num, nil
}
-func (db *RockDB) zRemRangeBytes(ts int64, key []byte, minKey []byte, maxKey []byte, offset int,
- count int, wb *gorocksdb.WriteBatch) (int64, error) {
- if len(key) > MaxKeySize {
- return 0, errKeySize
+func (db *RockDB) zRemRangeBytes(ts int64, key []byte, keyInfo collVerKeyInfo, offset int,
+ count int, wb engine.WriteBatch) (int64, error) {
+ err := common.CheckKey(key)
+ if err != nil {
+ return 0, err
+ }
+ total, err := parseZMetaSize(keyInfo.OldHeader.UserData)
+ if err != nil {
+ return 0, err
+ }
+ if total == 0 {
+ // no data to be deleted, avoid iterator data
+ return 0, nil
}
// if count >= total size , remove all
if offset == 0 {
- total, err := db.ZCard(key)
if err == nil && int64(count) >= total {
return db.zRemAll(ts, key, wb)
}
}
- if count >= MAX_BATCH_NUM {
+ if count > MAX_BATCH_NUM {
return 0, errTooMuchBatchSize
}
- table, _, err := extractTableFromRedisKey(key)
- if err != nil {
- return 0, err
- }
+ table := keyInfo.Table
+ minKey := keyInfo.RangeStart
+ maxKey := keyInfo.RangeEnd
- it, err := NewDBRangeLimitIterator(db.eng, minKey, maxKey, common.RangeClose, offset, count, false)
+ opts := engine.IteratorOpts{
+ Range: engine.Range{Min: minKey, Max: maxKey, Type: common.RangeClose},
+ Limit: engine.Limit{Offset: offset, Count: count},
+ Reverse: false,
+ }
+ it, err := db.NewDBRangeLimitIteratorWithOpts(opts)
if err != nil {
return 0, err
}
@@ -686,51 +733,54 @@ func (db *RockDB) zRemRangeBytes(ts int64, key []byte, minKey []byte, maxKey []b
continue
}
- if n, err := db.zDelItem(key, m, wb); err != nil {
+ if n, err := db.zDelItem(table, keyInfo.VerKey, m, wb); err != nil {
return 0, err
} else if n == 1 {
num++
}
}
- if newNum, err := db.zIncrSize(ts, key, -num, wb); err != nil {
+ newNum, err := db.zIncrSize(ts, key, keyInfo.OldHeader, -num, wb)
+ if err != nil {
return 0, err
} else if num > 0 && newNum == 0 {
db.IncrTableKeyCount(table, -1, wb)
- db.delExpire(ZSetType, key, wb)
}
+ if newNum == 0 {
+ db.delExpire(ZSetType, key, nil, false, wb)
+ }
+ db.topLargeCollKeys.Update(key, int(newNum))
return num, nil
}
func (db *RockDB) zRemRange(ts int64, key []byte, min float64, max float64, offset int,
- count int, wb *gorocksdb.WriteBatch) (int64, error) {
+ count int, wb engine.WriteBatch) (int64, error) {
- table, rk, err := extractTableFromRedisKey(key)
+ keyInfo, err := db.getZSetForRangeWithNum(ts, key, min, max, false)
if err != nil {
return 0, err
}
- minKey := zEncodeStartScoreKey(table, rk, min)
- maxKey := zEncodeStopScoreKey(table, rk, max)
- return db.zRemRangeBytes(ts, key, minKey, maxKey, offset, count, wb)
+ return db.zRemRangeBytes(ts, key, keyInfo, offset, count, wb)
}
-func (db *RockDB) zRangeBytes(key []byte, minKey []byte, maxKey []byte, offset int, count int, reverse bool) ([]common.ScorePair, error) {
- if len(key) > MaxKeySize {
- return nil, errKeySize
+func (db *RockDB) zRangeBytes(ts int64, preCheckCnt bool, key []byte, minKey []byte, maxKey []byte, offset int, count int, reverse bool) ([]common.ScorePair, error) {
+ err := common.CheckKey(key)
+ if err != nil {
+ return nil, err
}
if offset < 0 {
return []common.ScorePair{}, nil
}
- if count >= MAX_BATCH_NUM {
+ if count > MAX_BATCH_NUM {
return nil, errTooMuchBatchSize
}
// if count == -1, check if we may get too much data
- if count < 0 {
- total, _ := db.ZCard(key)
- if total >= MAX_BATCH_NUM {
+ if count < 0 && preCheckCnt {
+ _, total, _ := db.zGetSize(ts, key, true)
+ if total-int64(offset) > MAX_BATCH_NUM {
return nil, errTooMuchBatchSize
}
}
@@ -741,16 +791,23 @@ func (db *RockDB) zRangeBytes(key []byte, minKey []byte, maxKey []byte, offset i
nv = MAX_BATCH_NUM
}
- v := make([]common.ScorePair, 0, nv)
+ // TODO: use buf pool
+ // we can not use count for prealloc since it may large than we have for (total - offset),
+ // since it have minkey~maxkey range, it may much smaller than count
+ preAlloc := 16
+ if count > 0 && count < preAlloc {
+ preAlloc = count
+ }
+ // TODO: use pool for large alloc
+ v := make([]common.ScorePair, 0, preAlloc)
- var err error
- var it *RangeLimitedIterator
+ var it *engine.RangeLimitedIterator
//if reverse and offset is 0, count < 0, we may use forward iterator then reverse
//because store iterator prev is slower than next
if !reverse || (offset == 0 && count < 0) {
- it, err = NewDBRangeLimitIterator(db.eng, minKey, maxKey, common.RangeClose, offset, count, false)
+ it, err = db.NewDBRangeLimitIterator(minKey, maxKey, common.RangeClose, offset, count, false)
} else {
- it, err = NewDBRangeLimitIterator(db.eng, minKey, maxKey, common.RangeClose, offset, count, true)
+ it, err = db.NewDBRangeLimitIterator(minKey, maxKey, common.RangeClose, offset, count, true)
}
if err != nil {
return nil, err
@@ -784,24 +841,29 @@ func (db *RockDB) zRangeBytes(key []byte, minKey []byte, maxKey []byte, offset i
}
func (db *RockDB) zRange(key []byte, min float64, max float64, offset int, count int, reverse bool) ([]common.ScorePair, error) {
- table, rk, err := extractTableFromRedisKey(key)
+ tn := time.Now().UnixNano()
+ keyInfo, err := db.getZSetForRangeWithNum(tn, key, min, max, true)
if err != nil {
return nil, err
}
- minKey := zEncodeStartScoreKey(table, rk, min)
- maxKey := zEncodeStopScoreKey(table, rk, max)
- return db.zRangeBytes(key, minKey, maxKey, offset, count, reverse)
+ if keyInfo.IsNotExistOrExpired() {
+ return nil, nil
+ }
+
+ minKey := keyInfo.RangeStart
+ maxKey := keyInfo.RangeEnd
+ preCheckCnt := false
+ if min == common.MinScore && max == common.MaxScore {
+ preCheckCnt = true
+ }
+ return db.zRangeBytes(tn, preCheckCnt, key, minKey, maxKey, offset, count, reverse)
}
-func (db *RockDB) zParseLimit(key []byte, start int, stop int) (offset int, count int, err error) {
+func (db *RockDB) zParseLimit(total int64, start int, stop int) (offset int, count int, err error) {
if start < 0 || stop < 0 {
//refer redis implementation
var size int64
- size, err = db.ZCard(key)
- if err != nil {
- return
- }
-
+ size = total
llen := int(size)
if start < 0 {
@@ -831,14 +893,17 @@ func (db *RockDB) zParseLimit(key []byte, start int, stop int) (offset int, coun
return
}
-func (db *RockDB) ZClear(key []byte) (int64, error) {
- db.wb.Clear()
+func (db *RockDB) ZClear(ts int64, key []byte) (int64, error) {
+ defer db.wb.Clear()
- rmCnt, err := db.zRemAll(0, key, db.wb)
+ rmCnt, err := db.zRemAll(ts, key, db.wb)
if err == nil {
- err = db.eng.Write(db.defaultWriteOpts, db.wb)
+ err = db.rockEng.Write(db.wb)
}
- return rmCnt, err
+ if rmCnt > 0 {
+ return 1, err
+ }
+ return 0, err
}
func (db *RockDB) ZMclear(keys ...[]byte) (int64, error) {
@@ -849,11 +914,11 @@ func (db *RockDB) ZMclear(keys ...[]byte) (int64, error) {
for _, key := range keys {
// note: the zRemAll can not be batched, so we need clear and commit
// after each key.
- db.wb.Clear()
if _, err := db.zRemAll(0, key, db.wb); err != nil {
+ db.wb.Clear()
return deleted, err
}
- err := db.eng.Write(db.defaultWriteOpts, db.wb)
+ err := db.CommitBatchWrite()
if err != nil {
return deleted, err
}
@@ -863,7 +928,7 @@ func (db *RockDB) ZMclear(keys ...[]byte) (int64, error) {
return int64(len(keys)), nil
}
-func (db *RockDB) zMclearWithBatch(wb *gorocksdb.WriteBatch, keys ...[]byte) error {
+func (db *RockDB) zMclearWithBatch(wb engine.WriteBatch, keys ...[]byte) error {
if len(keys) > MAX_BATCH_NUM {
return errTooMuchBatchSize
}
@@ -892,34 +957,37 @@ func (db *RockDB) ZRank(key []byte, member []byte) (int64, error) {
}
func (db *RockDB) ZRemRangeByRank(ts int64, key []byte, start int, stop int) (int64, error) {
- offset, count, err := db.zParseLimit(key, start, stop)
+ keyInfo, err := db.getCollVerKeyForRange(ts, ZSetType, key, false)
+ if err != nil {
+ return 0, err
+ }
+ num, err := parseZMetaSize(keyInfo.OldHeader.UserData)
if err != nil {
return 0, err
}
- var rmCnt int64
-
- db.wb.Clear()
- table, rk, err := extractTableFromRedisKey(key)
+ offset, count, err := db.zParseLimit(num, start, stop)
if err != nil {
return 0, err
}
- minKey := zEncodeStartKey(table, rk)
- maxKey := zEncodeStopKey(table, rk)
- rmCnt, err = db.zRemRangeBytes(ts, key, minKey, maxKey, offset, count, db.wb)
+
+ var rmCnt int64
+ defer db.wb.Clear()
+
+ rmCnt, err = db.zRemRangeBytes(ts, key, keyInfo, offset, count, db.wb)
if err == nil {
- err = db.eng.Write(db.defaultWriteOpts, db.wb)
+ err = db.rockEng.Write(db.wb)
}
return rmCnt, err
}
//min and max must be inclusive
func (db *RockDB) ZRemRangeByScore(ts int64, key []byte, min float64, max float64) (int64, error) {
- db.wb.Clear()
+ defer db.wb.Clear()
rmCnt, err := db.zRemRange(ts, key, min, max, 0, -1, db.wb)
if err == nil {
- err = db.eng.Write(db.defaultWriteOpts, db.wb)
+ err = db.rockEng.Write(db.wb)
}
return rmCnt, err
@@ -940,17 +1008,23 @@ func (db *RockDB) ZRevRangeByScore(key []byte, min float64, max float64, offset
}
func (db *RockDB) ZRangeGeneric(key []byte, start int, stop int, reverse bool) ([]common.ScorePair, error) {
- offset, count, err := db.zParseLimit(key, start, stop)
+ tn := time.Now().UnixNano()
+ keyInfo, err := db.getCollVerKeyForRange(tn, ZSetType, key, true)
if err != nil {
return nil, err
}
- table, rk, err := extractTableFromRedisKey(key)
+ if keyInfo.IsNotExistOrExpired() {
+ return nil, nil
+ }
+ num, err := parseZMetaSize(keyInfo.OldHeader.UserData)
if err != nil {
return nil, err
}
- minKey := zEncodeStartKey(table, rk)
- maxKey := zEncodeStopKey(table, rk)
- return db.zRangeBytes(key, minKey, maxKey, offset, count, reverse)
+ offset, count, err := db.zParseLimit(num, start, stop)
+ if err != nil {
+ return nil, err
+ }
+ return db.zRangeBytes(tn, true, key, keyInfo.RangeStart, keyInfo.RangeEnd, offset, count, reverse)
}
//min and max must be inclusive
@@ -986,37 +1060,39 @@ func getAggregateFunc(aggregate byte) func(int64, int64) int64 {
}
func (db *RockDB) ZRangeByLex(key []byte, min []byte, max []byte, rangeType uint8, offset int, count int) ([][]byte, error) {
- table, rk, err := extractTableFromRedisKey(key)
- if err != nil {
- return nil, err
+ if count > MAX_BATCH_NUM {
+ return nil, errTooMuchBatchSize
}
- if min == nil {
- min = zEncodeStartSetKey(table, rk)
- } else {
- min = zEncodeSetKey(table, rk, min)
- }
- if max == nil {
- max = zEncodeStopSetKey(table, rk)
- } else {
- max = zEncodeSetKey(table, rk, max)
+ tn := time.Now().UnixNano()
+ keyInfo, err := db.getZSetForRangeWithMinMax(tn, key, min, max, true)
+ if err != nil {
+ return nil, err
}
- if count >= MAX_BATCH_NUM {
- return nil, errTooMuchBatchSize
+ if keyInfo.IsNotExistOrExpired() {
+ return nil, nil
}
- if count < 0 {
- total, _ := db.ZCard(key)
- if total >= MAX_BATCH_NUM {
+ min = keyInfo.RangeStart
+ max = keyInfo.RangeEnd
+
+ if count < 0 && min == nil && max == nil {
+ total, _ := parseZMetaSize(keyInfo.OldHeader.UserData)
+ if total-int64(offset) > MAX_BATCH_NUM {
return nil, errTooMuchBatchSize
}
}
- it, err := NewDBRangeLimitIterator(db.eng, min, max, rangeType, offset, count, false)
+ it, err := db.NewDBRangeLimitIterator(min, max, rangeType, offset, count, false)
if err != nil {
return nil, err
}
defer it.Close()
- ay := make([][]byte, 0, 16)
+ preAlloc := 16
+ if count > 0 && count < preAlloc {
+ preAlloc = count
+ }
+ // TODO: use pool for large alloc
+ ay := make([][]byte, 0, preAlloc)
for ; it.Valid(); it.Next() {
rawk := it.Key()
if _, _, m, err := zDecodeSetKey(rawk); err == nil {
@@ -1025,7 +1101,6 @@ func (db *RockDB) ZRangeByLex(key []byte, min []byte, max []byte, rangeType uint
} else {
dbLog.Infof("key %v : error %v", rawk, err)
}
- // TODO: err for iterator step would match the final count?
if count >= 0 && len(ay) >= count {
break
}
@@ -1038,37 +1113,13 @@ func (db *RockDB) ZRangeByLex(key []byte, min []byte, max []byte, rangeType uint
return ay, nil
}
-func (db *RockDB) ZRemRangeByLex(ts int64, key []byte, min []byte, max []byte, rangeType uint8) (int64, error) {
- wb := db.wb
- wb.Clear()
- if min == nil && max == nil {
- cnt, err := db.zRemAll(ts, key, wb)
- if err != nil {
- return 0, err
- }
- if err := db.eng.Write(db.defaultWriteOpts, wb); err != nil {
- return 0, err
- }
- return cnt, nil
- }
-
- table, rk, err := extractTableFromRedisKey(key)
+func (db *RockDB) internalZRemRangeByLex(ts int64, key []byte, min []byte, max []byte, rangeType uint8, wb engine.WriteBatch) (int64, error) {
+ keyInfo, err := db.getZSetForRangeWithMinMax(ts, key, min, max, false)
if err != nil {
return 0, err
}
- if min == nil {
- min = zEncodeStartSetKey(table, rk)
- } else {
- min = zEncodeSetKey(table, rk, min)
- }
- if max == nil {
- max = zEncodeStopSetKey(table, rk)
- } else {
- max = zEncodeSetKey(table, rk, max)
- }
-
- it, err := NewDBRangeIterator(db.eng, min, max, rangeType, false)
+ it, err := db.NewDBRangeIterator(keyInfo.RangeStart, keyInfo.RangeEnd, rangeType, false)
if err != nil {
return 0, err
}
@@ -1080,21 +1131,47 @@ func (db *RockDB) ZRemRangeByLex(ts int64, key []byte, min []byte, max []byte, r
if err != nil {
continue
}
- if n, err := db.zDelItem(key, m, wb); err != nil {
+ if n, err := db.zDelItem(keyInfo.Table, keyInfo.VerKey, m, wb); err != nil {
return 0, err
} else if n == 1 {
num++
}
}
- if newNum, err := db.zIncrSize(ts, key, -num, wb); err != nil {
+ newNum, err := db.zIncrSize(ts, key, keyInfo.OldHeader, -num, wb)
+ if err != nil {
return 0, err
} else if num > 0 && newNum == 0 {
- db.IncrTableKeyCount(table, -1, wb)
- db.delExpire(ZSetType, key, wb)
+ db.IncrTableKeyCount(keyInfo.Table, -1, wb)
+ }
+ if newNum == 0 {
+ db.delExpire(ZSetType, key, nil, false, wb)
+ }
+
+ db.topLargeCollKeys.Update(key, int(newNum))
+ return num, nil
+}
+
+func (db *RockDB) ZRemRangeByLex(ts int64, key []byte, min []byte, max []byte, rangeType uint8) (int64, error) {
+ wb := db.wb
+ defer wb.Clear()
+ if min == nil && max == nil {
+ cnt, err := db.zRemAll(ts, key, wb)
+ if err != nil {
+ return 0, err
+ }
+ if err := db.rockEng.Write(wb); err != nil {
+ return 0, err
+ }
+ return cnt, nil
}
- if err := db.eng.Write(db.defaultWriteOpts, wb); err != nil {
+ num, err := db.internalZRemRangeByLex(ts, key, min, max, rangeType, wb)
+ if err != nil {
+ return 0, err
+ }
+
+ if err := db.rockEng.Write(wb); err != nil {
return 0, err
}
@@ -1102,23 +1179,16 @@ func (db *RockDB) ZRemRangeByLex(ts int64, key []byte, min []byte, max []byte, r
}
func (db *RockDB) ZLexCount(key []byte, min []byte, max []byte, rangeType uint8) (int64, error) {
- table, rk, err := extractTableFromRedisKey(key)
+ tn := time.Now().UnixNano()
+ keyInfo, err := db.getZSetForRangeWithMinMax(tn, key, min, max, true)
if err != nil {
return 0, err
}
-
- if min == nil {
- min = zEncodeStartSetKey(table, rk)
- } else {
- min = zEncodeSetKey(table, rk, min)
- }
- if max == nil {
- max = zEncodeStopSetKey(table, rk)
- } else {
- max = zEncodeSetKey(table, rk, max)
+ if keyInfo.IsNotExistOrExpired() {
+ return 0, nil
}
- it, err := NewDBRangeIterator(db.eng, min, max, rangeType, false)
+ it, err := db.NewDBRangeIterator(keyInfo.RangeStart, keyInfo.RangeEnd, rangeType, false)
if err != nil {
return 0, err
}
@@ -1134,43 +1204,14 @@ func (db *RockDB) ZKeyExists(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
- sk := zEncodeSizeKey(key)
- v, err := db.eng.GetBytes(db.defaultReadOpts, sk)
- if v != nil && err == nil {
- return 1, nil
- }
- return 0, err
-}
-func (db *RockDB) ZExpire(key []byte, duration int64) (int64, error) {
- if exists, err := db.ZKeyExists(key); err != nil || exists != 1 {
- return 0, err
- } else {
- if err2 := db.expire(ZSetType, key, duration); err2 != nil {
- return 0, err2
- } else {
- return 1, nil
- }
- }
+ return db.collKeyExists(ZSetType, key)
}
-func (db *RockDB) ZPersist(key []byte) (int64, error) {
- if exists, err := db.ZKeyExists(key); err != nil || exists != 1 {
- return 0, err
- }
-
- if ttl, err := db.ttl(ZSetType, key); err != nil || ttl < 0 {
- return 0, err
- }
+func (db *RockDB) ZExpire(ts int64, key []byte, duration int64) (int64, error) {
+ return db.collExpire(ts, ZSetType, key, duration)
+}
- db.wb.Clear()
- if err := db.delExpire(ZSetType, key, db.wb); err != nil {
- return 0, err
- } else {
- if err2 := db.eng.Write(db.defaultWriteOpts, db.wb); err2 != nil {
- return 0, err2
- } else {
- return 1, nil
- }
- }
+func (db *RockDB) ZPersist(ts int64, key []byte) (int64, error) {
+ return db.collPersist(ts, ZSetType, key)
}
diff --git a/rockredis/t_zset_test.go b/rockredis/t_zset_test.go
index 430111af..9dec12ab 100644
--- a/rockredis/t_zset_test.go
+++ b/rockredis/t_zset_test.go
@@ -4,10 +4,12 @@ import (
"fmt"
"os"
"reflect"
+ "strconv"
"testing"
+ "time"
- "github.com/absolute8511/ZanRedisDB/common"
"github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/common"
)
const (
@@ -37,7 +39,7 @@ func TestZSetCodec(t *testing.T) {
t.Fatal(string(k))
}
- ek, _ = convertRedisKeyToDBZSetKey(key, member)
+ ek = zEncodeSetKey([]byte("test"), []byte("key"), member)
if tb, k, m, err := zDecodeSetKey(ek); err != nil {
t.Fatal(err)
} else if string(k) != "key" {
@@ -48,7 +50,7 @@ func TestZSetCodec(t *testing.T) {
t.Fatal(string(tb))
}
- ek, _ = convertRedisKeyToDBZScoreKey(key, member, 100)
+ ek = zEncodeScoreKey(false, false, []byte("test"), []byte("key"), member, 100)
if tb, k, m, s, err := zDecodeScoreKey(ek); err != nil {
t.Fatal(err)
} else if string(k) != "key" {
@@ -68,55 +70,56 @@ func TestDBZSetWithEmptyMember(t *testing.T) {
defer os.RemoveAll(db.cfg.DataDir)
defer db.Close()
+ tn := time.Now().UnixNano()
key := bin("test:testdb_zset_empty")
- if n, err := db.ZAdd(0, key, pair("a", 0), pair("b", 1),
- pair("c", 2), pair("", 3)); err != nil {
- t.Fatal(err)
- } else if n != 4 {
- t.Fatal(n)
- }
+ n, err := db.ZAdd(tn, key, pair("a", 0), pair("b", 1),
+ pair("c", 2), pair("", 3))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(4), n)
- if n, err := db.ZCount(key, 0, 0XFF); err != nil {
- t.Fatal(err)
- } else if n != 4 {
- t.Fatal(n)
- }
+ s, err := db.ZScore(key, bin("b"))
+ assert.Nil(t, err)
+ assert.Equal(t, float64(1), s)
+ s, err = db.ZScore(key, bin(""))
+ assert.Nil(t, err)
+ assert.Equal(t, float64(3), s)
- if s, err := db.ZScore(key, bin("")); err != nil {
- t.Fatal(err)
- } else if s != 3 {
- t.Fatal(s)
- }
+ n, err = db.ZCount(key, 0, 0xFF)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(4), n)
- if n, err := db.ZRem(0, key, bin("a"), bin("b")); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
+ n, err = db.ZRem(tn, key, bin("a"), bin("b"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
- if n, err := db.ZRem(0, key, bin("a"), bin("b")); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
+ n, err = db.ZCount(key, 0, 0xFF)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
- if n, err := db.ZCard(key); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
+ n, err = db.ZRem(tn, key, bin("a"), bin("b"))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
- if n, err := db.ZClear(key); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
+ n, err = db.ZCard(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
- if n, err := db.ZCount(key, 0, 0XFF); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
+ n, err = db.ZClear(0, key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.ZCount(key, 0, 0xFF)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ // test zrange, zrank, zscore for empty
+ vals, err := db.ZRange(key, 0, 0xFF)
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(vals))
+ n, err = db.ZRank(key, []byte("a"))
+ assert.Nil(t, err)
+ assert.Equal(t, true, n < 0)
+ _, err = db.ZScore(key, []byte("a"))
+ assert.NotNil(t, err)
}
func TestDBZSet(t *testing.T) {
@@ -134,21 +137,25 @@ func TestDBZSet(t *testing.T) {
t.Fatal(n)
}
- if n, err := db.ZCount(key, 0, 0XFF); err != nil {
- t.Fatal(err)
- } else if n != 4 {
- t.Fatal(n)
- }
+ s, err := db.ZScore(key, bin("d"))
+ assert.Nil(t, err)
+ assert.Equal(t, float64(3), s)
+ _, err = db.ZScore(key, bin("zzz"))
+ assert.Equal(t, errScoreMiss, err)
- if s, err := db.ZScore(key, bin("d")); err != nil {
- t.Fatal(err)
- } else if s != 3 {
- t.Fatal(s)
- }
+ n, err := db.ZCount(key, 0, 0xFF)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(4), n)
- if s, err := db.ZScore(key, bin("zzz")); err != errScoreMiss {
- t.Fatal(fmt.Sprintf("s=[%v] err=[%s]", s, err))
- }
+ // test zadd mixed with the same score
+ n, err = db.ZAdd(0, key, pair("a", 0), pair("b", 1),
+ pair("c", 2), pair("d", 4))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ s, err = db.ZScore(key, bin("d"))
+ assert.Nil(t, err)
+ assert.Equal(t, float64(4), s)
// {c':2, 'd':3}
if n, err := db.ZRem(0, key, bin("a"), bin("b")); err != nil {
@@ -170,9 +177,9 @@ func TestDBZSet(t *testing.T) {
}
// {}
- if n, err := db.ZClear(key); err != nil {
+ if n, err := db.ZClear(0, key); err != nil {
t.Fatal(err)
- } else if n != 2 {
+ } else if n != 1 {
t.Fatal(n)
}
@@ -183,6 +190,34 @@ func TestDBZSet(t *testing.T) {
}
}
+func TestDBZSetIncrby(t *testing.T) {
+ db := getTestDB(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ key := bin("test:testdb_zset_incr")
+
+ // {'a':0, 'b':1, 'c':2, 'd':3}
+ if n, err := db.ZAdd(0, key, pair("a", 0), pair("b", 1)); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ s, err := db.ZScore(key, bin("b"))
+ assert.Nil(t, err)
+ assert.Equal(t, float64(1), s)
+ _, err = db.ZScore(key, bin("c"))
+ assert.Equal(t, errScoreMiss, err)
+
+ s, err = db.ZIncrBy(0, key, 3, bin("b"))
+ assert.Nil(t, err)
+ assert.Equal(t, float64(4), s)
+ s, err = db.ZIncrBy(0, key, 1, bin("c"))
+ assert.Nil(t, err)
+ assert.Equal(t, float64(1), s)
+}
+
func TestZSetOrder(t *testing.T) {
db := getTestDB(t)
defer os.RemoveAll(db.cfg.DataDir)
@@ -278,7 +313,7 @@ func TestZSetOrder(t *testing.T) {
scores := []float64{0, 1, 2, 5, 6, 999}
for i := 0; i < len(datas); i++ {
if datas[i].Score != scores[i] {
- t.Fatal(fmt.Sprintf("[%d]=%d", i, datas[i]))
+ t.Fatal(fmt.Sprintf("[%d]=%v", i, datas[i]))
}
}
}
@@ -286,8 +321,137 @@ func TestZSetOrder(t *testing.T) {
return
}
+func TestZSetRange(t *testing.T) {
+ db := getTestDB(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ // all range remove test
+ key := []byte("test:zkey_zrange_test")
+ mems := []common.ScorePair{
+ common.ScorePair{1, []byte("a")},
+ common.ScorePair{2, []byte("b")},
+ common.ScorePair{3, []byte("c")},
+ common.ScorePair{4, []byte("d")},
+ }
+ n, err := db.ZAdd(0, key, mems...)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(mems)), n)
+
+ vlist, err := db.ZRange(key, 0, 3)
+ assert.Nil(t, err)
+ assert.Equal(t, mems, vlist)
+
+ vlist, err = db.ZRange(key, 1, 4)
+ assert.Nil(t, err)
+ assert.Equal(t, mems[1:], vlist)
+
+ vlist, err = db.ZRange(key, -2, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, mems[2:], vlist)
+
+ vlist, err = db.ZRange(key, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, mems, vlist)
+
+ vlist, err = db.ZRange(key, -1, -2)
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(vlist))
+
+ vlist, err = db.ZRevRange(key, 0, 4)
+ assert.Nil(t, err)
+ assert.Equal(t, len(mems), len(vlist))
+ assert.Equal(t, mems[0], vlist[len(vlist)-1])
+
+ vlist, err = db.ZRevRange(key, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, len(mems), len(vlist))
+ assert.Equal(t, mems[len(mems)-1], vlist[0])
+
+ vlist, err = db.ZRevRange(key, 2, 3)
+ assert.Nil(t, err)
+ assert.Equal(t, len(mems)-2, len(vlist))
+ assert.Equal(t, mems[len(mems)-1-2], vlist[0])
+
+ vlist, err = db.ZRevRange(key, -2, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, len(mems)-2, len(vlist))
+ assert.Equal(t, mems[1], vlist[0])
+}
+
func TestZRemRange(t *testing.T) {
+ db := getTestDB(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
// all range remove test
+ key := []byte("test:zkey_range_rm_test")
+ mems := []common.ScorePair{
+ common.ScorePair{1, []byte("a")},
+ common.ScorePair{2, []byte("b")},
+ common.ScorePair{3, []byte("c")},
+ common.ScorePair{4, []byte("d")},
+ common.ScorePair{5, []byte("e")},
+ common.ScorePair{6, []byte("f")},
+ }
+ n, err := db.ZAdd(0, key, mems...)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(mems)), n)
+
+ vlist, err := db.ZRange(key, 0, len(mems))
+ assert.Nil(t, err)
+ assert.Equal(t, mems, vlist)
+
+ total := len(mems)
+ n, err = db.ZRemRangeByRank(0, key, 2, 3)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(2), n)
+ total -= 2
+ n, err = db.ZCard(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(total), n)
+ vlist, err = db.ZRange(key, 0, len(mems))
+ assert.Nil(t, err)
+ assert.Equal(t, mems[:2], vlist[:2])
+ assert.Equal(t, mems[4:], vlist[2:])
+
+ n, err = db.ZRem(0, key, mems[0].Member)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ total--
+ n, err = db.ZCard(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(total), n)
+ vlist, err = db.ZRange(key, 0, len(mems))
+ assert.Nil(t, err)
+ assert.Equal(t, total, len(vlist))
+ assert.Equal(t, mems[1], vlist[0])
+ assert.Equal(t, mems[4:], vlist[1:])
+
+ n, err = db.ZRemRangeByScore(0, key, 0, 3)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ total--
+
+ n, err = db.ZRemRangeByLex(0, key, []byte("e"), []byte("e"), common.RangeClose)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+ total--
+ n, err = db.ZCard(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(total), n)
+ vlist, err = db.ZRange(key, 0, len(mems))
+ assert.Nil(t, err)
+ assert.Equal(t, total, len(vlist))
+ assert.Equal(t, mems[5:], vlist)
+
+ n, err = db.ZClear(0, key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.ZCard(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
}
func TestZRangeLimit(t *testing.T) {
@@ -333,6 +497,76 @@ func TestZRangeLimit(t *testing.T) {
assert.Equal(t, MAX_BATCH_NUM-1, len(ay))
}
+func TestZRangeLimitPreCheck(t *testing.T) {
+ db := getTestDB(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ key := []byte("test:myzset_range_precheck")
+ for i := 0; i < MAX_BATCH_NUM-1; i++ {
+ m := fmt.Sprintf("%8d", i)
+ _, err := db.ZAdd(0, key, common.ScorePair{Score: float64(i), Member: []byte(m)})
+ assert.Nil(t, err)
+ }
+
+ maxMem := fmt.Sprintf("%8d", MAX_BATCH_NUM+1)
+ ay, err := db.ZRangeByLex(key, nil, nil, common.RangeClose, 0, -1)
+ assert.Nil(t, err)
+ for _, v := range ay {
+ t.Logf("zrange key: %v", v)
+ }
+ assert.Equal(t, MAX_BATCH_NUM-1, len(ay))
+ mems, err := db.ZRangeByScore(key, 0, float64(MAX_BATCH_NUM*2), 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, MAX_BATCH_NUM-1, len(mems))
+
+ elems, err := db.ZRange(key, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, MAX_BATCH_NUM-1, len(elems))
+
+ total := MAX_BATCH_NUM + 10
+ for i := MAX_BATCH_NUM; i < MAX_BATCH_NUM+10; i++ {
+ m := fmt.Sprintf("%8d", i)
+ _, err := db.ZAdd(0, key, common.ScorePair{Score: float64(i), Member: []byte(m)})
+ assert.Nil(t, err)
+ }
+ _, err = db.ZRange(key, 0, -1)
+ assert.Equal(t, errTooMuchBatchSize, err)
+ _, err = db.ZRange(key, total-10, -1)
+ assert.Nil(t, err)
+
+ _, err = db.ZRangeByLex(key, nil, []byte(maxMem), common.RangeClose, 0, -1)
+ assert.Equal(t, errTooMuchBatchSize, err)
+ _, err = db.ZRangeByLex(key, nil, nil, common.RangeClose, 0, -1)
+ assert.Equal(t, errTooMuchBatchSize, err)
+ ay, err = db.ZRangeByLex(key, nil, nil, common.RangeClose, total-10, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, 9, len(ay))
+
+ r1 := fmt.Sprintf("%8d", 1)
+ r2 := fmt.Sprintf("%8d", 2)
+ // count = -1 , but have a small range should return the results
+ ay, err = db.ZRangeByLex(key, []byte(r1), []byte(r2), common.RangeClose, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, 2, len(ay))
+
+ _, err = db.ZRangeByScore(key, common.MinScore, common.MaxScore, 0, -1)
+ assert.Equal(t, errTooMuchBatchSize, err)
+ _, err = db.ZRangeByScore(key, 0, MAX_BATCH_NUM+1, 0, -1)
+ assert.Equal(t, errTooMuchBatchSize, err)
+
+ elems, err = db.ZRangeByScore(key, common.MinScore, common.MaxScore, total-10, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, 9, len(elems))
+ elems, err = db.ZRangeByScore(key, 0, 1, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, 2, len(elems))
+
+ ay, err = db.ZRangeByLex(key, nil, []byte(maxMem), common.RangeClose, 0, MAX_BATCH_NUM-1)
+ assert.Nil(t, err)
+ assert.Equal(t, MAX_BATCH_NUM-1, len(ay))
+}
+
func TestZLex(t *testing.T) {
db := getTestDB(t)
defer os.RemoveAll(db.cfg.DataDir)
@@ -407,3 +641,142 @@ func TestZKeyExists(t *testing.T) {
t.Fatal("invalid value ", n)
}
}
+
+func TestDBZClearInCompactTTL(t *testing.T) {
+ db := getTestDBWithCompactTTL(t)
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ key := []byte("test:testdb_zset_clear_compact_a")
+ member := []byte("member")
+ memberNew := []byte("memberNew")
+
+ ts := time.Now().UnixNano()
+ db.ZAdd(ts, key, common.ScorePair{
+ Score: 1,
+ Member: member,
+ })
+
+ n, err := db.ZCard(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ ts = time.Now().UnixNano()
+ n, err = db.ZClear(ts, key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ n, err = db.ZCard(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ vlist, err := db.ZRange(key, 0, 100)
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(vlist))
+ vlist, err = db.ZRevRange(key, 0, 100)
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(vlist))
+ mlist, err := db.ZRangeByLex(key, nil, nil, common.RangeClose, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(mlist))
+ vlist, err = db.ZRangeByScore(key, 0, 100, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(vlist))
+ score, err := db.ZScore(key, member)
+ assert.Equal(t, errScoreMiss, err)
+ assert.Equal(t, float64(0), score)
+
+ n, err = db.ZRank(key, member)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+ n, err = db.ZRevRank(key, member)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+
+ n, err = db.ZKeyExists(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ vlist, err = db.ZScan(key, []byte(""), -1, "", false)
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(vlist))
+
+ // renew
+ ts = time.Now().UnixNano()
+ db.ZAdd(ts, key, common.ScorePair{
+ Score: 2,
+ Member: memberNew,
+ })
+
+ n, err = db.ZCard(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ vlist, err = db.ZRange(key, 0, 100)
+ assert.Nil(t, err)
+ assert.Equal(t, 1, len(vlist))
+ assert.Equal(t, float64(2), vlist[0].Score)
+ assert.Equal(t, memberNew, vlist[0].Member)
+ vlist, err = db.ZRevRange(key, 0, 100)
+ assert.Equal(t, 1, len(vlist))
+ assert.Equal(t, float64(2), vlist[0].Score)
+ assert.Equal(t, memberNew, vlist[0].Member)
+ mlist, err = db.ZRangeByLex(key, nil, nil, common.RangeClose, 0, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, 1, len(mlist))
+ vlist, err = db.ZRangeByScore(key, 0, 100, 0, -1)
+ assert.Equal(t, 1, len(vlist))
+ assert.Equal(t, float64(2), vlist[0].Score)
+ assert.Equal(t, memberNew, vlist[0].Member)
+ score, err = db.ZScore(key, member)
+ assert.Equal(t, errScoreMiss, err)
+ assert.Equal(t, float64(0), score)
+ score, err = db.ZScore(key, memberNew)
+ assert.Nil(t, err)
+ assert.Equal(t, float64(2), score)
+
+ n, err = db.ZRank(key, member)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+ n, err = db.ZRevRank(key, member)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(-1), n)
+ n, err = db.ZRank(key, memberNew)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ n, err = db.ZRevRank(key, memberNew)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+
+ n, err = db.ZKeyExists(key)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), n)
+
+ vlist, err = db.ZScan(key, []byte(""), -1, "", false)
+ assert.Nil(t, err)
+ assert.Equal(t, 1, len(vlist))
+}
+
+func BenchmarkZaddAndZRembyrange(b *testing.B) {
+ db := getTestDBForBench()
+ defer os.RemoveAll(db.cfg.DataDir)
+ defer db.Close()
+
+ // all range remove test
+ key := []byte("test:zkey_range_rm_bench")
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ member := common.ScorePair{
+ Score: float64(i),
+ Member: []byte(strconv.Itoa(i)),
+ }
+ db.ZAdd(0, key, member)
+ }
+
+ for i := 0; i < b.N; i++ {
+ db.ZRemRangeByRank(0, key, 1, 100)
+ db.ZRemRangeByScore(0, key, 101, 201)
+ db.ZRemRangeByLex(0, key, []byte("0"), []byte("100"), common.RangeClose)
+ }
+ b.StopTimer()
+}
diff --git a/rockredis/util.go b/rockredis/util.go
index 21fa0761..a66a4eca 100644
--- a/rockredis/util.go
+++ b/rockredis/util.go
@@ -6,7 +6,7 @@ import (
"math"
"strconv"
- "github.com/absolute8511/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/common"
)
var errIntNumber = errors.New("invalid integer")
@@ -33,6 +33,10 @@ func FormatInt64ToSlice(v int64) []byte {
return strconv.AppendInt(nil, int64(v), 10)
}
+func PutInt64ToBuf(v int64, b []byte) {
+ binary.BigEndian.PutUint64(b, uint64(v))
+}
+
func PutInt64(v int64) []byte {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(v))
diff --git a/scripts/run_zankv.sh b/scripts/run_zankv.sh
new file mode 100755
index 00000000..e793cce8
--- /dev/null
+++ b/scripts/run_zankv.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+exec 2>&1
+sleep 1
+rsync --daemon --config=/opt/zankv/conf/rsyncd.conf
+exec /opt/zankv/bin/zankv -config /opt/zankv/conf/zankv.conf -stderrthreshold=1
\ No newline at end of file
diff --git a/scripts/run_zanpd.sh b/scripts/run_zanpd.sh
new file mode 100755
index 00000000..0b8d6a1e
--- /dev/null
+++ b/scripts/run_zanpd.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+exec 2>&1
+sleep 1
+exec /opt/zankv/bin/placedriver -config /opt/zankv/conf/placedriver.conf -stderrthreshold=1
\ No newline at end of file
diff --git a/server/config.go b/server/config.go
index 772c0e34..520b1b48 100644
--- a/server/config.go
+++ b/server/config.go
@@ -1,36 +1,49 @@
package server
import (
- "github.com/absolute8511/ZanRedisDB/rockredis"
+ "github.com/youzan/ZanRedisDB/engine"
)
type ServerConfig struct {
// this cluster id is used for server transport to tell
// different global cluster
- ClusterID string `json:"cluster_id"`
- EtcdClusterAddresses string `json:"etcd_cluster_addresses"`
- BroadcastInterface string `json:"broadcast_interface"`
- BroadcastAddr string `json:"broadcast_addr"`
- RedisAPIPort int `json:"redis_api_port"`
- HttpAPIPort int `json:"http_api_port"`
- GrpcAPIPort int `json:"grpc_api_port"`
- ProfilePort int `json:"profile_port"`
- DataDir string `json:"data_dir"`
- DataRsyncModule string `json:"data_rsync_module"`
- LocalRaftAddr string `json:"local_raft_addr"`
- Tags map[string]string `json:"tags"`
- SyncerWriteOnly bool `json:"syncer_write_only"`
- SyncerNormalInit bool `json:"syncer_normal_init"`
- LearnerRole string `json:"learner_role"`
- RemoteSyncCluster string `json:"remote_sync_cluster"`
- StateMachineType string `json:"state_machine_type"`
+ ClusterID string `json:"cluster_id"`
+ EtcdClusterAddresses string `json:"etcd_cluster_addresses"`
+ BroadcastInterface string `json:"broadcast_interface"`
+ BroadcastAddr string `json:"broadcast_addr"`
+ MetricAddr string `json:"metric_addr"`
+ RedisAPIPort int `json:"redis_api_port"`
+ HttpAPIPort int `json:"http_api_port"`
+ GrpcAPIPort int `json:"grpc_api_port"`
+ ProfilePort int `json:"profile_port"`
+ DataDir string `json:"data_dir"`
+ LogDir string `json:"log_dir"`
+ RemoteLogAddr string `json:"remote_log_addr"`
+ DataRsyncModule string `json:"data_rsync_module"`
+ LocalRaftAddr string `json:"local_raft_addr"`
+ Tags map[string]string `json:"tags"`
+ SyncerWriteOnly bool `json:"syncer_write_only"`
+ SyncerNormalInit bool `json:"syncer_normal_init"`
+ LearnerRole string `json:"learner_role"`
+ RemoteSyncCluster string `json:"remote_sync_cluster"`
+ StateMachineType string `json:"state_machine_type"`
+ RsyncLimit int64 `json:"rsync_limit"`
+ DefaultSnapCount int `json:"default_snap_count"`
+ DefaultSnapCatchup int `json:"default_snap_catchup"`
+ KeepBackup int `json:"keep_backup"`
+ KeepWAL int `json:"keep_wal"`
+ UseRocksWAL bool `json:"use_rocks_wal"`
+ SharedRocksWAL bool `json:"shared_rocks_wal"`
+ UseRedisV2 bool `json:"use_redis_v2"`
+ SlowLimiterRefuseCostMs int64 `json:"slow_limiter_refuse_cost_ms"`
ElectionTick int `json:"election_tick"`
TickMs int `json:"tick_ms"`
// default rocksdb options, can be override by namespace config
- RocksDBOpts rockredis.RockOptions `json:"rocksdb_opts"`
- Namespaces []NamespaceNodeConfig `json:"namespaces"`
- MaxScanJob int32 `json:"max_scan_job"`
+ RocksDBOpts engine.RockOptions `json:"rocksdb_opts"`
+ WALRocksDBOpts engine.RockOptions `json:"wal_rocksdb_opts"`
+ Namespaces []NamespaceNodeConfig `json:"namespaces"`
+ MaxScanJob int32 `json:"max_scan_job"`
}
type NamespaceNodeConfig struct {
diff --git a/server/grpc_api.go b/server/grpc_api.go
index ec67a0f0..06343e93 100644
--- a/server/grpc_api.go
+++ b/server/grpc_api.go
@@ -1,6 +1,7 @@
package server
import (
+ "errors"
"fmt"
"net"
"net/http"
@@ -9,21 +10,24 @@ import (
context "golang.org/x/net/context"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/syncerpb"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/metric"
+ "github.com/youzan/ZanRedisDB/node"
+ "github.com/youzan/ZanRedisDB/syncerpb"
"google.golang.org/grpc"
)
var (
- proposeTimeout = time.Second * 4
+ proposeTimeout = time.Second * 4
+ errRemoteSyncOutOfOrder = errors.New("remote sync index out of order")
)
-var syncClusterNetStats common.WriteStats
-var syncClusterTotalStats common.WriteStats
+var syncClusterNetStats metric.WriteStats
+var syncClusterTotalStats metric.WriteStats
var applyStatusMapping = map[int]syncerpb.RaftApplySnapStatus{
0: syncerpb.ApplyUnknown,
- 1: syncerpb.ApplyWaitingTransfer,
+ 1: syncerpb.ApplyWaitingBegin,
2: syncerpb.ApplyWaitingTransfer,
3: syncerpb.ApplyTransferSuccess,
4: syncerpb.ApplyWaiting,
@@ -47,6 +51,21 @@ func (s *Server) GetSyncedRaft(ctx context.Context, req *syncerpb.SyncedRaftReq)
func (s *Server) ApplyRaftReqs(ctx context.Context, reqs *syncerpb.RaftReqs) (*syncerpb.RpcErr, error) {
var rpcErr syncerpb.RpcErr
receivedTs := time.Now()
+ // TODO: to speed up we can use pipeline write, propose all raft logs to raft buffer and wait
+ // all raft responses. However, it may make it unordered if part of them failed and retry. Maybe
+ // combine them to a single raft proposal.
+ futureList := make([]func() error, 0, len(reqs.RaftLog))
+ start := time.Now()
+ lastIndex := uint64(0)
+ defer func() {
+ if rpcErr.ErrMsg == "" && rpcErr.ErrCode == 0 {
+ return
+ }
+ // clean future response for error
+ for _, f := range futureList {
+ f()
+ }
+ }()
for _, r := range reqs.RaftLog {
if sLog.Level() >= common.LOG_DETAIL {
sLog.Debugf("applying raft log from remote cluster syncer: %v", r.String())
@@ -66,21 +85,76 @@ func (s *Server) ApplyRaftReqs(ctx context.Context, reqs *syncerpb.RaftReqs) (*s
r.RaftGroupName, r.Term, r.Index, term, index)
continue
}
+ if lastIndex == 0 {
+ lastIndex = index
+ }
+ if r.Index > lastIndex+1 {
+ sLog.Warningf("%v raft log commit not continued: %v-%v, synced: %v-%v, last: %v",
+ r.RaftGroupName, r.Term, r.Index, term, index, lastIndex)
+ // TODO: for compatible with old syncer, we just log here, after all upgraded, we need check and return here
+ //rpcErr.ErrCode = http.StatusBadRequest
+ //rpcErr.ErrMsg = errRemoteSyncOutOfOrder.Error()
+ //return &rpcErr, nil
+ }
// raft timestamp should be the same with the real raft request in data
logStart := r.RaftTimestamp
syncNetLatency := receivedTs.UnixNano() - logStart
syncClusterNetStats.UpdateLatencyStats(syncNetLatency / time.Microsecond.Nanoseconds())
- err := kv.Node.ProposeRawAndWait(r.Data, r.Term, r.Index, r.RaftTimestamp)
+ var reqList node.BatchInternalRaftRequest
+ err := reqList.Unmarshal(r.Data)
+ if err != nil {
+ rpcErr.ErrCode = http.StatusBadRequest
+ rpcErr.ErrMsg = err.Error()
+ return &rpcErr, nil
+ }
+ if len(reqList.Reqs) == 0 {
+ // some events (such as leader transfer) has no reqs,
+ // however, we need to send to raft so we will not lost the event while this leader changed
+ sLog.Infof("%v raft log commit synced without proposal: %v-%v, last: %v",
+ r.RaftGroupName, r.Term, r.Index, r.String())
+ }
+ fu, origReqs, err := kv.Node.ProposeRawAsyncFromSyncer(r.Data, &reqList, r.Term, r.Index, r.RaftTimestamp)
if err != nil {
sLog.Infof("propose failed: %v, err: %v", r.String(), err.Error())
rpcErr.ErrCode = http.StatusInternalServerError
rpcErr.ErrMsg = err.Error()
return &rpcErr, nil
}
- syncLatency := time.Now().UnixNano() - logStart
- syncClusterTotalStats.UpdateLatencyStats(syncLatency / time.Microsecond.Nanoseconds())
+ lastIndex = r.Index
+ fuFunc := func() error {
+ rsp, err := fu.WaitRsp()
+ if err != nil {
+ return err
+ }
+ var ok bool
+ if err, ok = rsp.(error); ok {
+ return err
+ }
+
+ reqList := origReqs
+ cost := time.Since(start).Nanoseconds()
+ for _, req := range reqList.Reqs {
+ if req.Header.DataType == int32(node.RedisReq) || req.Header.DataType == int32(node.RedisV2Req) {
+ kv.Node.UpdateWriteStats(int64(len(req.Data)), cost/1000)
+ }
+ }
+ syncLatency := time.Now().UnixNano() - logStart
+ syncClusterTotalStats.UpdateLatencyStats(syncLatency / time.Microsecond.Nanoseconds())
+ return nil
+ }
+ futureList = append(futureList, fuFunc)
+ }
+ for _, f := range futureList {
+ err := f()
+ if err != nil {
+ rpcErr.ErrCode = http.StatusInternalServerError
+ rpcErr.ErrMsg = err.Error()
+ // we just set error and continue wait other future response
+ }
}
+ // should clean here to avoid wait response again in defer
+ futureList = futureList[:0]
return &rpcErr, nil
}
@@ -98,7 +172,11 @@ func (s *Server) NotifyTransferSnap(ctx context.Context, req *syncerpb.RaftApply
return &rpcErr, nil
}
sLog.Infof("raft need transfer snapshot from remote: %v", req.String())
- kv.Node.BeginTransferRemoteSnap(req.ClusterName, req.Term, req.Index, req.SyncAddr, req.SyncPath)
+ err := kv.Node.BeginTransferRemoteSnap(req.ClusterName, req.Term, req.Index, req.SyncAddr, req.SyncPath)
+ if err != nil {
+ rpcErr.ErrCode = http.StatusInternalServerError
+ rpcErr.ErrMsg = err.Error()
+ }
return &rpcErr, nil
}
@@ -143,8 +221,13 @@ func (s *Server) GetApplySnapStatus(ctx context.Context, req *syncerpb.RaftApply
if !ok {
status.Status = syncerpb.ApplyMissing
} else {
- status.Status, _ = applyStatusMapping[ss.StatusCode]
- status.StatusMsg = ss.Status
+ if ss.SS.SyncedTerm != req.Term || ss.SS.SyncedIndex != req.Index {
+ // another snapshot is applying
+ status.Status = syncerpb.ApplyMissing
+ } else {
+ status.Status, _ = applyStatusMapping[ss.StatusCode]
+ status.StatusMsg = ss.Status
+ }
}
}
sLog.Infof("raft apply snapshot from remote %v , status: %v", req.String(), status)
diff --git a/server/httpapi.go b/server/httpapi.go
index 8a0669d0..963d217a 100644
--- a/server/httpapi.go
+++ b/server/httpapi.go
@@ -2,33 +2,83 @@ package server
import (
"encoding/json"
+ "fmt"
"io/ioutil"
"net/http"
_ "net/http/pprof"
"net/url"
"os"
+ "runtime"
"strconv"
"strings"
"sync/atomic"
"time"
- "github.com/absolute8511/ZanRedisDB/rockredis"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+ "github.com/youzan/ZanRedisDB/metric"
+ "github.com/youzan/ZanRedisDB/rockredis"
+ "github.com/youzan/ZanRedisDB/slow"
- "github.com/absolute8511/ZanRedisDB/cluster"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/node"
- "github.com/absolute8511/ZanRedisDB/raft"
- "github.com/absolute8511/ZanRedisDB/transport/rafthttp"
"github.com/julienschmidt/httprouter"
+ "github.com/youzan/ZanRedisDB/cluster"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/node"
+ "github.com/youzan/ZanRedisDB/raft"
+ "github.com/youzan/ZanRedisDB/transport/rafthttp"
)
var allowStaleRead int32
+type RaftProgress struct {
+ Match uint64 `json:"match"`
+ Next uint64 `json:"next"`
+ State string `json:"state"`
+}
+
+// raft status in raft can not marshal/unmarshal correctly, we redefine it
+type CustomRaftStatus struct {
+ ID uint64 `json:"id,omitempty"`
+ Term uint64 `json:"term,omitempty"`
+ Vote uint64 `json:"vote"`
+ Commit uint64 `json:"commit"`
+ Lead uint64 `json:"lead"`
+ RaftState string `json:"raft_state"`
+ Applied uint64 `json:"applied"`
+ Progress map[uint64]RaftProgress `json:"progress,omitempty"`
+ LeadTransferee uint64 `json:"lead_transferee"`
+}
+
+func (crs *CustomRaftStatus) Init(s raft.Status) {
+ crs.ID = s.ID
+ crs.Term = s.Term
+ crs.Vote = s.Vote
+ crs.Commit = s.Commit
+ crs.Lead = s.Lead
+ crs.RaftState = s.RaftState.String()
+ crs.Applied = s.Applied
+ crs.Progress = make(map[uint64]RaftProgress, len(s.Progress))
+ for i, pr := range s.Progress {
+ var cpr RaftProgress
+ cpr.Match = pr.Match
+ cpr.Next = pr.Next
+ cpr.State = pr.State.String()
+ crs.Progress[i] = cpr
+ }
+ crs.LeadTransferee = s.LeadTransferee
+}
+
type RaftStatus struct {
- LeaderInfo *common.MemberInfo
- Members []*common.MemberInfo
- Learners []*common.MemberInfo
- RaftStat raft.Status
+ LeaderInfo *common.MemberInfo `json:"leader_info,omitempty"`
+ Members []*common.MemberInfo `json:"members,omitempty"`
+ Learners []*common.MemberInfo `json:"learners,omitempty"`
+ RaftStat CustomRaftStatus `json:"raft_stat,omitempty"`
+}
+
+func toBoolParam(param string) bool {
+ if param == "true" {
+ return true
+ }
+ return false
}
func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
@@ -55,15 +105,84 @@ func (s *Server) getKey(w http.ResponseWriter, req *http.Request, ps httprouter.
}
}
-func (s *Server) doOptimize(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+func (s *Server) doOptimizeTable(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
ns := ps.ByName("namespace")
table := ps.ByName("table")
- s.OptimizeDB(ns, table)
+ s.nsMgr.OptimizeDB(ns, table)
+ return nil, nil
+}
+
+func (s *Server) doOptimizeNS(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ ns := ps.ByName("namespace")
+ s.nsMgr.OptimizeDB(ns, "")
+ return nil, nil
+}
+
+func (s *Server) doOptimizeExpire(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ ns := ps.ByName("namespace")
+ s.nsMgr.OptimizeDBExpire(ns)
+ return nil, nil
+}
+
+func (s *Server) doOptimizeAnyRange(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ ns := ps.ByName("namespace")
+ data, err := ioutil.ReadAll(req.Body)
+ if err != nil {
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: err.Error()}
+ }
+ sLog.Infof("got compact range: %v from remote: %v", string(data), req.RemoteAddr)
+ var anyRange node.CompactAPIRange
+ err = json.Unmarshal(data, &anyRange)
+ if err != nil {
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: err.Error()}
+ }
+ if len(anyRange.StartFrom) == 0 && len(anyRange.EndTo) == 0 {
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: "compact range can not be both empty"}
+ }
+ s.nsMgr.OptimizeDBAnyRange(ns, anyRange)
+ return nil, nil
+}
+
+func (s *Server) doBackup(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ ns := ps.ByName("namespace")
+ s.nsMgr.BackupDB(ns, false)
+ return nil, nil
+}
+
+func (s *Server) doBackupAll(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ s.nsMgr.BackupDB("", false)
return nil, nil
}
func (s *Server) doOptimizeAll(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
- s.OptimizeDB("", "")
+ s.nsMgr.OptimizeDB("", "")
+ return nil, nil
+}
+
+func (s *Server) disableOptimize(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ s.nsMgr.DisableOptimizeDB(true)
+ return nil, nil
+}
+func (s *Server) enableOptimize(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ s.nsMgr.DisableOptimizeDB(false)
+ return nil, nil
+}
+
+func (s *Server) doEnableTopn(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ ns := ps.ByName("namespace")
+ s.nsMgr.EnableTopn(ns, true)
+ return nil, nil
+}
+
+func (s *Server) doDisableTopn(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ ns := ps.ByName("namespace")
+ s.nsMgr.EnableTopn(ns, false)
+ return nil, nil
+}
+
+func (s *Server) doClearTopn(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ ns := ps.ByName("namespace")
+ s.nsMgr.ClearTopn(ns)
return nil, nil
}
@@ -283,6 +402,23 @@ func (s *Server) pingHandler(w http.ResponseWriter, req *http.Request, ps httpro
return "OK", nil
}
+func (s *Server) doChangeSlowLogLevel(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ reqParams, err := url.ParseQuery(req.URL.RawQuery)
+ if err != nil {
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: "INVALID_REQUEST"}
+ }
+ levelStr := reqParams.Get("loglevel")
+ if levelStr == "" {
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: "MISSING_ARG_LEVEL"}
+ }
+ level, err := strconv.Atoi(levelStr)
+ if err != nil {
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: "BAD_LEVEL_STRING"}
+ }
+ slow.ChangeSlowLogLevel(level)
+ return nil, nil
+}
+
func (s *Server) doSetLogLevel(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
reqParams, err := url.ParseQuery(req.URL.RawQuery)
if err != nil {
@@ -337,6 +473,26 @@ func (s *Server) doSetCostLevel(w http.ResponseWriter, req *http.Request, ps htt
return nil, nil
}
+func (s *Server) doSetRsyncLimit(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ reqParams, err := url.ParseQuery(req.URL.RawQuery)
+ if err != nil {
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: "INVALID_REQUEST"}
+ }
+ limitStr := reqParams.Get("limit")
+ if limitStr == "" {
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: "MISSING_ARG"}
+ }
+ limit, err := strconv.Atoi(limitStr)
+ if err != nil {
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: "BAD_ARG_STRING"}
+ }
+ if limit <= 0 {
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: "BAD_ARG_STRING"}
+ }
+ common.SetRsyncLimit(int64(limit))
+ return nil, nil
+}
+
func (s *Server) doSetStaleRead(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
reqParams, err := url.ParseQuery(req.URL.RawQuery)
if err != nil {
@@ -354,6 +510,49 @@ func (s *Server) doSetStaleRead(w http.ResponseWriter, req *http.Request, ps htt
return nil, nil
}
+func (s *Server) getSyncerRunnings(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ if s.dataCoord == nil {
+ return nil, nil
+ }
+ runnings, err := s.dataCoord.GetCurrentNsWithLearners()
+ if err != nil {
+ return nil, common.HttpErr{Code: http.StatusInternalServerError, Text: err.Error()}
+ }
+ return struct {
+ Runnings []string `json:"runnings"`
+ }{
+ Runnings: runnings,
+ }, nil
+}
+
+func (s *Server) getSyncerNormalInit(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ return node.IsSyncerNormalInit(), nil
+}
+
+func (s *Server) doSetSyncerNormalInit(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ reqParams, err := url.ParseQuery(req.URL.RawQuery)
+ if err != nil {
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: "INVALID_REQUEST"}
+ }
+ param := reqParams.Get("enable")
+ if param == "" {
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: "MISSING_ARG"}
+ }
+ boolParam := toBoolParam(param)
+ sLog.Infof("syncer normal init state changed to : %v", param)
+ err = s.updateSyncerNormalInitToRegister(boolParam)
+ if err != nil {
+ sLog.Warningf("failed to set state: %v", err.Error())
+ return nil, common.HttpErr{Code: http.StatusInternalServerError, Text: err.Error()}
+ }
+ node.SetSyncerNormalInit(boolParam)
+ return nil, nil
+}
+
+func (s *Server) getSyncerOnlyState(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ return node.IsSyncerOnly(), nil
+}
+
func (s *Server) doSetSyncerOnly(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
reqParams, err := url.ParseQuery(req.URL.RawQuery)
if err != nil {
@@ -363,10 +562,100 @@ func (s *Server) doSetSyncerOnly(w http.ResponseWriter, req *http.Request, ps ht
if param == "" {
return nil, common.HttpErr{Code: http.StatusBadRequest, Text: "MISSING_ARG"}
}
- if param == "true" {
- node.SetSyncerOnly(true)
+ boolParam := toBoolParam(param)
+ sLog.Infof("syncer only state changed to : %v", param)
+ err = s.updateSyncerWriteOnlyToRegister(boolParam)
+ if err != nil {
+ sLog.Warningf("failed to set syncer only state: %v", err.Error())
+ return nil, common.HttpErr{Code: http.StatusInternalServerError, Text: err.Error()}
+ }
+ node.SetSyncerOnly(boolParam)
+ return nil, nil
+}
+
+func (s *Server) doSwitchDisableConflictLog(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ reqParams, err := url.ParseQuery(req.URL.RawQuery)
+ if err != nil {
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: "INVALID_REQUEST"}
+ }
+ param := reqParams.Get("disable")
+ if param == "" {
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: "MISSING_ARG"}
+ }
+ boolParam := toBoolParam(param)
+ sLog.Infof("disable log conflict state changed to : %v", param)
+ node.SwitchDisableMaybeConflictLog(boolParam)
+ return nil, nil
+}
+
+func (s *Server) doSetDynamicConf(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ reqParams, err := url.ParseQuery(req.URL.RawQuery)
+ if err != nil {
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: "INVALID_REQUEST"}
+ }
+ paramT := reqParams.Get("type")
+ paramKey := reqParams.Get("key")
+ paramV := reqParams.Get("value")
+ if paramT == "int" {
+ n, err := strconv.Atoi(paramV)
+ if err != nil {
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: "INVALID_ARG"}
+ }
+ common.SetIntDynamicConf(paramKey, n)
+ } else if paramT == "str" {
+ common.SetStrDynamicConf(paramKey, paramV)
+ } else {
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: "INVALID_ARG: param type should be int/str"}
+ }
+ sLog.Infof("conf %v changed to : %v", paramKey, paramV)
+ return nil, nil
+}
+
+func (s *Server) doGetDynamicConf(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ reqParams, err := url.ParseQuery(req.URL.RawQuery)
+ if err != nil {
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: "INVALID_REQUEST"}
+ }
+ paramT := reqParams.Get("type")
+ paramKey := reqParams.Get("key")
+ if paramT == "int" {
+ v := common.GetIntDynamicConf(paramKey)
+ return struct {
+ Key string `json:"key"`
+ Value int `json:"value"`
+ }{
+ Key: paramKey,
+ Value: v,
+ }, nil
+ } else if paramT == "str" {
+ v := common.GetStrDynamicConf(paramKey)
+ return struct {
+ Key string `json:"key"`
+ Value string `json:"value"`
+ }{
+ Key: paramKey,
+ Value: v,
+ }, nil
} else {
- node.SetSyncerOnly(false)
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: "INVALID_ARG: param type should be int/str"}
+ }
+ return nil, nil
+}
+
+func (s *Server) doSetDBOptions(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ reqParams, err := url.ParseQuery(req.URL.RawQuery)
+ if err != nil {
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: "INVALID_REQUEST"}
+ }
+ paramKey := reqParams.Get("key")
+ paramV := reqParams.Get("value")
+ if paramKey == "" || paramV == "" {
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: "INVALID_ARG: option key empty"}
+ }
+ sLog.Infof("try set db option %v to : %v", paramKey, paramV)
+ err = s.nsMgr.SetDBOptions(paramKey, paramV)
+ if err != nil {
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: err.Error()}
}
return nil, nil
}
@@ -380,7 +669,7 @@ func (s *Server) doSetSyncerIndex(w http.ResponseWriter, req *http.Request, ps h
if err != nil {
return nil, common.HttpErr{Code: http.StatusBadRequest, Text: err.Error()}
}
- var ss []common.LogSyncStats
+ var ss []metric.LogSyncStats
err = json.Unmarshal(data, &ss)
if err != nil {
return nil, common.HttpErr{Code: http.StatusBadRequest, Text: err.Error()}
@@ -425,6 +714,8 @@ func (s *Server) doRaftStats(w http.ResponseWriter, req *http.Request, ps httpro
return nil, common.HttpErr{Code: http.StatusBadRequest, Text: "INVALID_REQUEST"}
}
ns := reqParams.Get("namespace")
+ leaderOnlyStr := reqParams.Get("leader_only")
+ leaderOnly, _ := strconv.ParseBool(leaderOnlyStr)
nsList := s.nsMgr.GetNamespaces()
rstat := make([]*RaftStatus, 0)
for name, nsNode := range nsList {
@@ -434,11 +725,15 @@ func (s *Server) doRaftStats(w http.ResponseWriter, req *http.Request, ps httpro
if !nsNode.IsReady() {
continue
}
+ if leaderOnly && !nsNode.Node.IsLead() {
+ continue
+ }
var s RaftStatus
s.LeaderInfo = nsNode.Node.GetLeadMember()
s.Members = nsNode.Node.GetMembers()
s.Learners = nsNode.Node.GetLearners()
- s.RaftStat = nsNode.Node.GetRaftStatus()
+ rs := nsNode.Node.GetRaftStatus()
+ s.RaftStat.Init(rs)
rstat = append(rstat, &s)
}
return rstat, nil
@@ -455,7 +750,12 @@ func (s *Server) doStats(w http.ResponseWriter, req *http.Request, ps httprouter
if leaderOnlyStr == "" {
leaderOnly = true
}
- ss := s.GetStats(leaderOnly)
+ detailStr := reqParams.Get("table_detail")
+ detail, _ := strconv.ParseBool(detailStr)
+ if detailStr == "" {
+ detail = false
+ }
+ ss := s.GetStats(leaderOnly, detail)
startTime := s.startTime
uptime := time.Since(startTime)
@@ -463,20 +763,78 @@ func (s *Server) doStats(w http.ResponseWriter, req *http.Request, ps httprouter
return struct {
Version string `json:"version"`
UpTime int64 `json:"up_time"`
- Stats common.ServerStats `json:"stats"`
+ Stats metric.ServerStats `json:"stats"`
}{common.VerBinary, int64(uptime.Seconds()), ss}, nil
}
+func (s *Server) doTableStats(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ reqParams, err := url.ParseQuery(req.URL.RawQuery)
+ if err != nil {
+ sLog.Infof("failed to parse request params - %s", err)
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: "INVALID_REQUEST"}
+ }
+ leaderOnlyStr := reqParams.Get("leader_only")
+ leaderOnly, _ := strconv.ParseBool(leaderOnlyStr)
+ if leaderOnlyStr == "" {
+ leaderOnly = true
+ }
+ table := reqParams.Get("table")
+ if len(table) == 0 {
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: "INVALID_REQUEST: table is needed"}
+ }
+ ss := s.GetTableStats(leaderOnly, table)
+
+ return struct {
+ TableStats map[string]metric.TableStats `json:"table_stats"`
+ }{ss}, nil
+}
+
func (s *Server) doLogSyncStats(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
- if s.conf.LearnerRole == common.LearnerRoleLogSyncer {
+ if common.IsRoleLogSyncer(s.conf.LearnerRole) {
+ allUrls := make(map[string]bool)
recvLatency, syncLatency := node.GetLogLatencyStats()
recvStats, syncStats := s.GetLogSyncStatsInSyncLearner()
+ // note: it may happen one namespace is still waiting init, so
+ // this uninit namespace sync stats may be ignored in any stats.
+ for _, stat := range recvStats {
+ ninfos, err := s.dataCoord.GetSnapshotSyncInfo(stat.Name)
+ if err != nil {
+ sLog.Infof("failed to get %v nodes info - %s", stat.Name, err)
+ continue
+ }
+ for _, n := range ninfos {
+ uri := fmt.Sprintf("http://%s:%s/raft/stats?leader_only=true",
+ n.RemoteAddr, n.HttpAPIPort)
+ allUrls[uri] = true
+ }
+ }
+ allRaftStats := make(map[string]CustomRaftStatus)
+ for uri, _ := range allUrls {
+ rstat := make([]*RaftStatus, 0)
+ sc, err := common.APIRequest("GET", uri, nil, time.Second*3, &rstat)
+ if err != nil {
+ sLog.Infof("request %v error: %v", uri, err)
+ continue
+ }
+ if sc != http.StatusOK {
+ sLog.Infof("request %v error: %v", uri, sc)
+ continue
+ }
+
+ for _, rs := range rstat {
+ if rs.LeaderInfo != nil && rs.RaftStat.RaftState == raft.StateLeader.String() {
+ allRaftStats[rs.LeaderInfo.GroupName] = rs.RaftStat
+ }
+ }
+ }
+ // get leader raft log stats
return struct {
- SyncRecvLatency *common.WriteStats `json:"sync_net_latency"`
- SyncAllLatency *common.WriteStats `json:"sync_all_latency"`
- LogReceived []common.LogSyncStats `json:"log_received,omitempty"`
- LogSynced []common.LogSyncStats `json:"log_synced,omitempty"`
- }{recvLatency, syncLatency, recvStats, syncStats}, nil
+ SyncRecvLatency *metric.WriteStats `json:"sync_net_latency"`
+ SyncAllLatency *metric.WriteStats `json:"sync_all_latency"`
+ LogReceived []metric.LogSyncStats `json:"log_received,omitempty"`
+ LogSynced []metric.LogSyncStats `json:"log_synced,omitempty"`
+ LeaderRaftStats map[string]CustomRaftStatus `json:"leader_raft_stats,omitempty"`
+ }{recvLatency, syncLatency, recvStats, syncStats, allRaftStats}, nil
}
netStat := syncClusterNetStats.Copy()
totalStat := syncClusterTotalStats.Copy()
@@ -493,9 +851,9 @@ func (s *Server) doLogSyncStats(w http.ResponseWriter, req *http.Request, ps htt
}
logSyncedStats := s.GetLogSyncStats(leaderOnly, reqParams.Get("cluster"))
return struct {
- SyncNetLatency *common.WriteStats `json:"sync_net_latency"`
- SyncAllLatency *common.WriteStats `json:"sync_all_latency"`
- LogSynced []common.LogSyncStats `json:"log_synced,omitempty"`
+ SyncNetLatency *metric.WriteStats `json:"sync_net_latency"`
+ SyncAllLatency *metric.WriteStats `json:"sync_all_latency"`
+ LogSynced []metric.LogSyncStats `json:"log_synced,omitempty"`
}{netStat, totalStat, logSyncedStats}, nil
}
@@ -515,19 +873,37 @@ func (s *Server) doDBStats(w http.ResponseWriter, req *http.Request, ps httprout
return ss, nil
}
-func (s *Server) doDBPerf(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+func (s *Server) doWALDBStats(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
reqParams, err := url.ParseQuery(req.URL.RawQuery)
if err != nil {
+ sLog.Infof("failed to parse request params - %s", err)
return nil, common.HttpErr{Code: http.StatusBadRequest, Text: "INVALID_REQUEST"}
}
- levelStr := reqParams.Get("level")
- level, err := strconv.Atoi(levelStr)
+ leaderOnlyStr := reqParams.Get("leader_only")
+ leaderOnly, _ := strconv.ParseBool(leaderOnlyStr)
+
+ if leaderOnlyStr == "" {
+ leaderOnly = true
+ }
+ ss := s.GetWALDBStats(leaderOnly)
+ return ss, nil
+}
+
+func setBlockRateHandler(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ rate, err := strconv.Atoi(req.FormValue("rate"))
if err != nil {
- return nil, common.HttpErr{Code: http.StatusBadRequest, Text: "INVALID_REQUEST"}
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: fmt.Sprintf("invalid block rate : %s", err.Error())}
}
+ runtime.SetBlockProfileRate(rate)
+ return nil, nil
+}
- node.SetPerfLevel(level)
- sLog.Infof("perf level set to: %v", level)
+func setMutexProfileHandler(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (interface{}, error) {
+ rate, err := strconv.Atoi(req.FormValue("rate"))
+ if err != nil {
+ return nil, common.HttpErr{Code: http.StatusBadRequest, Text: fmt.Sprintf("invalid block rate : %s", err.Error())}
+ }
+ runtime.SetMutexProfileFraction(rate)
return nil, nil
}
@@ -542,8 +918,15 @@ func (s *Server) initHttpHandler() {
router.Handle("GET", common.APICheckBackup+"/:namespace", common.Decorate(s.checkNodeBackup, log, common.V1))
router.Handle("GET", common.APIIsRaftSynced+"/:namespace", common.Decorate(s.isNsNodeFullReady, common.V1))
router.Handle("GET", "/kv/get/:namespace", common.Decorate(s.getKey, common.PlainText))
- router.Handle("POST", "/kv/optimize/:namespace/:table", common.Decorate(s.doOptimize, log, common.V1))
+ router.Handle("POST", "/kv/optimize/:namespace/:table", common.Decorate(s.doOptimizeTable, log, common.V1))
+ router.Handle("POST", "/kv/optimize/:namespace", common.Decorate(s.doOptimizeNS, log, common.V1))
+ router.Handle("POST", "/kv/optimize_expire/:namespace", common.Decorate(s.doOptimizeExpire, log, common.V1))
router.Handle("POST", "/kv/optimize", common.Decorate(s.doOptimizeAll, log, common.V1))
+ router.Handle("POST", "/kv/optimize_anyrange/:namespace", common.Decorate(s.doOptimizeAnyRange, log, common.V1))
+ router.Handle("POST", "/kv/disable_optimize", common.Decorate(s.disableOptimize, log, common.V1))
+ router.Handle("POST", "/kv/enable_optimize", common.Decorate(s.enableOptimize, log, common.V1))
+ router.Handle("POST", "/kv/backup/:namespace", common.Decorate(s.doBackup, log, common.V1))
+ router.Handle("POST", "/kv/backup/", common.Decorate(s.doBackupAll, log, common.V1))
router.Handle("POST", "/cluster/raft/forcenew/:namespace", common.Decorate(s.doForceNewCluster, log, common.V1))
router.Handle("POST", "/cluster/raft/forceclean/:namespace", common.Decorate(s.doForceCleanRaftNode, log, common.V1))
router.Handle("POST", common.APIAddNode, common.Decorate(s.doAddNode, log, common.V1))
@@ -554,26 +937,41 @@ func (s *Server) initHttpHandler() {
router.Handle("GET", "/ping", common.Decorate(s.pingHandler, common.PlainText))
router.Handle("POST", "/loglevel/set", common.Decorate(s.doSetLogLevel, log, common.V1))
+ router.Handle("POST", "/slowlog/set", common.Decorate(s.doChangeSlowLogLevel, log, common.V1))
router.Handle("POST", "/costlevel/set", common.Decorate(s.doSetCostLevel, log, common.V1))
+ router.Handle("POST", "/rsynclimit", common.Decorate(s.doSetRsyncLimit, log, common.V1))
router.Handle("POST", "/staleread", common.Decorate(s.doSetStaleRead, log, common.V1))
router.Handle("POST", "/synceronly", common.Decorate(s.doSetSyncerOnly, log, common.V1))
+ router.Handle("POST", "/disableconflictlog", common.Decorate(s.doSwitchDisableConflictLog, log, common.V1))
+ router.Handle("GET", "/synceronly", common.Decorate(s.getSyncerOnlyState, log, common.V1))
+ router.Handle("POST", "/conf/set", common.Decorate(s.doSetDynamicConf, log, common.V1))
+ router.Handle("GET", "/conf/get", common.Decorate(s.doGetDynamicConf, log, common.V1))
router.Handle("GET", "/info", common.Decorate(s.doInfo, common.V1))
router.Handle("POST", "/syncer/setindex/:clustername", common.Decorate(s.doSetSyncerIndex, log, common.V1))
+ router.Handle("POST", "/syncer/normalinit", common.Decorate(s.doSetSyncerNormalInit, log, common.V1))
+ router.Handle("GET", "/syncer/normalinit", common.Decorate(s.getSyncerNormalInit, log, common.V1))
+ router.Handle("GET", "/syncer/runnings", common.Decorate(s.getSyncerRunnings, log, common.V1))
+
+ router.Handle("POST", "/topn/enable/:namespace", common.Decorate(s.doEnableTopn, log, common.V1))
+ router.Handle("POST", "/topn/disable/:namespace", common.Decorate(s.doDisableTopn, log, common.V1))
+ router.Handle("POST", "/topn/clear/:namespace", common.Decorate(s.doClearTopn, log, common.V1))
router.Handle("GET", "/stats", common.Decorate(s.doStats, common.V1))
+ router.Handle("GET", common.APITableStats, common.Decorate(s.doTableStats, common.V1))
router.Handle("GET", "/logsync/stats", common.Decorate(s.doLogSyncStats, common.V1))
router.Handle("GET", "/db/stats", common.Decorate(s.doDBStats, common.V1))
- router.Handle("GET", "/db/perf", common.Decorate(s.doDBPerf, log, common.V1))
+ router.Handle("POST", "/db/options/set", common.Decorate(s.doSetDBOptions, log, common.V1))
+ router.Handle("GET", "/waldb/stats", common.Decorate(s.doWALDBStats, common.V1))
router.Handle("GET", "/raft/stats", common.Decorate(s.doRaftStats, debugLog, common.V1))
+ router.Handle("POST", "/debug/setblockrate", common.Decorate(setBlockRateHandler, log, common.V1))
+ router.Handle("POST", "/debug/setmutexrate", common.Decorate(setMutexProfileHandler, log, common.V1))
+ router.Handler("GET", "/metrics", promhttp.Handler())
s.router = router
}
// serveHttpKVAPI starts a key-value server with a GET/PUT API and listens.
func (s *Server) serveHttpAPI(port int, stopC <-chan struct{}) {
- if s.conf.ProfilePort >= 0 {
- go http.ListenAndServe(":"+strconv.Itoa(s.conf.ProfilePort), nil)
- }
s.initHttpHandler()
srv := http.Server{
Addr: ":" + strconv.Itoa(port),
diff --git a/server/httpapi_test.go b/server/httpapi_test.go
index f1173011..55fc4427 100644
--- a/server/httpapi_test.go
+++ b/server/httpapi_test.go
@@ -6,9 +6,12 @@ import (
"fmt"
"net/http"
"testing"
+ "time"
- "github.com/absolute8511/ZanRedisDB/node"
"github.com/siddontang/goredis"
+ "github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/node"
)
func insertData(t *testing.T, c *goredis.PoolConn, cnt int, cmd, prefixkey string, args ...interface{}) {
@@ -165,3 +168,109 @@ func TestDeleteRangeCrossTable(t *testing.T) {
checkScanKeys(t, c, prefixzset2, "zset", tableCnt/2)
checkFullScan(t, c, prefixzset2, "zset", tableCnt/2)
}
+
+func TestMarshalRaftStats(t *testing.T) {
+ c := getMergeTestConn(t)
+ defer c.Close()
+ uri := fmt.Sprintf("http://127.0.0.1:%v/raft/stats?leader_only=true",
+ redisportMerge+1)
+ rstat := make([]*RaftStatus, 0)
+ sc, err := common.APIRequest("GET", uri, nil, time.Second*3, &rstat)
+ if err != nil {
+ t.Errorf("request %v error: %v", uri, err)
+ return
+ }
+ if sc != http.StatusOK {
+ t.Errorf("request %v error: %v", uri, sc)
+ return
+ }
+ if len(rstat) == 0 {
+ t.Errorf("get raft stats %v empty !!!", rstat)
+ return
+ }
+ d, _ := json.Marshal(rstat)
+ t.Logf("%v =======", string(d))
+}
+
+func TestSetGetDynamicConf(t *testing.T) {
+ c := getMergeTestConn(t)
+ defer c.Close()
+ // empty str conf
+ uriGet := fmt.Sprintf("http://127.0.0.1:%v/conf/get?type=str&key=test_str",
+ redisportMerge+1)
+
+ type strConf struct {
+ Key string
+ Value string
+ }
+ resp := strConf{}
+ sc, err := common.APIRequest("GET", uriGet, nil, time.Second*3, &resp)
+ assert.Nil(t, err)
+ assert.Equal(t, http.StatusOK, sc)
+ assert.Equal(t, "test_str", resp.Key)
+ assert.Equal(t, "test_str", resp.Value)
+
+ uriSet := fmt.Sprintf("http://127.0.0.1:%v/conf/set?type=str&key=test_str&value=",
+ redisportMerge+1)
+
+ sc, err = common.APIRequest("POST", uriSet, nil, time.Second*3, nil)
+ assert.Nil(t, err)
+ assert.Equal(t, http.StatusOK, sc)
+
+ sc, err = common.APIRequest("GET", uriGet, nil, time.Second*3, &resp)
+ assert.Nil(t, err)
+ assert.Equal(t, http.StatusOK, sc)
+ assert.Equal(t, "test_str", resp.Key)
+ assert.Equal(t, "", resp.Value)
+
+ type intConf struct {
+ Key string `json:"key,omitempty"`
+ Value int `json:"value,omitempty"`
+ }
+ // change int conf
+ uriGet = fmt.Sprintf("http://127.0.0.1:%v/conf/get?type=int&key=empty_int",
+ redisportMerge+1)
+ respInt := intConf{}
+ sc, err = common.APIRequest("GET", uriGet, nil, time.Second*3, &respInt)
+ assert.Nil(t, err)
+ assert.Equal(t, http.StatusOK, sc)
+ assert.Equal(t, "empty_int", respInt.Key)
+ assert.Equal(t, 0, respInt.Value)
+ uriSet = fmt.Sprintf("http://127.0.0.1:%v/conf/set?type=int&key=empty_int&value=10",
+ redisportMerge+1)
+ sc, err = common.APIRequest("POST", uriSet, nil, time.Second*3, nil)
+ assert.Nil(t, err)
+ assert.Equal(t, http.StatusOK, sc)
+ sc, err = common.APIRequest("GET", uriGet, nil, time.Second*3, &respInt)
+ assert.Nil(t, err)
+ assert.Equal(t, http.StatusOK, sc)
+ assert.Equal(t, "empty_int", respInt.Key)
+ assert.Equal(t, 10, respInt.Value)
+
+ // set not exist int/str conf
+ uriSet = fmt.Sprintf("http://127.0.0.1:%v/conf/set?type=int&key=noexist_int&value=10",
+ redisportMerge+1)
+ sc, err = common.APIRequest("POST", uriSet, nil, time.Second*3, nil)
+ assert.Nil(t, err)
+ assert.Equal(t, http.StatusOK, sc)
+ uriGet = fmt.Sprintf("http://127.0.0.1:%v/conf/get?type=int&key=noexist_int",
+ redisportMerge+1)
+ sc, err = common.APIRequest("GET", uriGet, nil, time.Second*3, &respInt)
+ assert.Nil(t, err)
+ assert.Equal(t, http.StatusOK, sc)
+ assert.Equal(t, "noexist_int", respInt.Key)
+ assert.Equal(t, 0, respInt.Value)
+
+ uriSet = fmt.Sprintf("http://127.0.0.1:%v/conf/set?type=str&key=noexist_str&value=nostr",
+ redisportMerge+1)
+ sc, err = common.APIRequest("POST", uriSet, nil, time.Second*3, nil)
+ assert.Nil(t, err)
+ assert.Equal(t, http.StatusOK, sc)
+ uriGet = fmt.Sprintf("http://127.0.0.1:%v/conf/get?type=str&key=noexist_str",
+ redisportMerge+1)
+ sc, err = common.APIRequest("GET", uriGet, nil, time.Second*3, &resp)
+ assert.Nil(t, err)
+ assert.Equal(t, http.StatusOK, sc)
+ assert.Equal(t, "noexist_str", resp.Key)
+ assert.Equal(t, "nostr", resp.Value)
+}
diff --git a/server/merge.go b/server/merge.go
index 2b1bbfed..9fc0fdc5 100644
--- a/server/merge.go
+++ b/server/merge.go
@@ -7,9 +7,9 @@ import (
"sync/atomic"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/node"
"github.com/absolute8511/redcon"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/node"
)
func dispatchHandlersAndWait(cmdName string, handlers []common.MergeCommandFunc, cmds []redcon.Command, concurrent bool) []interface{} {
@@ -71,6 +71,7 @@ func (s *Server) doMergeCommand(conn redcon.Conn, cmd redcon.Command) {
}
}
+// return merge func, command, haswrite, err
func (s *Server) getHandlersForKeys(cmdName string,
origArgs [][]byte) ([]common.MergeCommandFunc, []redcon.Command, bool, error) {
cmdArgMap := make(map[string][][]byte)
@@ -122,9 +123,11 @@ func (s *Server) getHandlersForKeys(cmdName string,
// never happen
return nil, nil, false, errInvalidCommand
}
+ if nsNode.Node.IsStopping() {
+ return nil, nil, false, common.ErrStopped
+ }
if !isWrite && !nsNode.Node.IsLead() && (atomic.LoadInt32(&allowStaleRead) == 0) {
// read only to leader to avoid stale read
- // TODO: also read command can request the raft read index if not leader
return nil, nil, hasWrite, node.ErrNamespaceNotLeader
}
handlerMap[nsNode.FullName()] = f
@@ -199,28 +202,34 @@ func (s *Server) dispatchAndWaitMergeCmd(cmd redcon.Command) ([]redcon.Command,
if len(cmd.Args) < 2 {
return nil, nil, fmt.Errorf("ERR wrong number of arguments for '%s' command", string(cmd.Args[0]))
}
- handlers, cmds, concurrent, err := s.GetMergeHandlers(cmd)
+ hasWrite, handlers, cmds, concurrent, err := s.GetMergeHandlers(cmd)
if err != nil {
return nil, nil, err
}
+ if hasWrite && node.IsSyncerOnly() {
+ err = fmt.Errorf("The cluster is only allowing syncer write : ERR handle command " + string(cmd.Args[0]))
+ return nil, nil, err
+ }
return cmds, dispatchHandlersAndWait(string(cmd.Args[0]), handlers, cmds, concurrent), nil
}
-func (s *Server) GetMergeHandlers(cmd redcon.Command) ([]common.MergeCommandFunc, []redcon.Command, bool, error) {
+// get merge handlers and return haswrite, handlers, commands, canconcurrency, error
+func (s *Server) GetMergeHandlers(cmd redcon.Command) (bool, []common.MergeCommandFunc, []redcon.Command, bool, error) {
+ hasWrite := false
if len(cmd.Args) < 2 {
- return nil, nil, false, fmt.Errorf("ERR wrong number of arguments for '%s' command", string(cmd.Args[0]))
+ return hasWrite, nil, nil, false, fmt.Errorf("ERR wrong number of arguments for '%s' command", string(cmd.Args[0]))
}
rawKey := cmd.Args[1]
namespace, realKey, err := common.ExtractNamesapce(rawKey)
if err != nil {
sLog.Infof("failed to get the namespace of the redis command:%v", string(rawKey))
- return nil, nil, false, err
+ return hasWrite, nil, nil, false, err
}
nodes, err := s.nsMgr.GetNamespaceNodes(namespace, true)
if err != nil {
- return nil, nil, false, err
+ return hasWrite, nil, nil, false, err
}
cmdName := strings.ToLower(string(cmd.Args[0]))
@@ -229,10 +238,14 @@ func (s *Server) GetMergeHandlers(cmd redcon.Command) ([]common.MergeCommandFunc
if common.IsMergeScanCommand(cmdName) {
cmds, err = s.doScanNodesFilter(realKey, namespace, cmd, nodes)
if err != nil {
- return nil, nil, false, err
+ return hasWrite, nil, nil, false, err
}
} else if common.IsMergeKeysCommand(cmdName) {
- return s.getHandlersForKeys(cmdName, cmd.Args[1:])
+ h, cmds, w, err := s.getHandlersForKeys(cmdName, cmd.Args[1:])
+ if w {
+ hasWrite = true
+ }
+ return hasWrite, h, cmds, true, err
} else {
cmds = make(map[string]redcon.Command)
for k := range nodes {
@@ -248,10 +261,15 @@ func (s *Server) GetMergeHandlers(cmd redcon.Command) ([]common.MergeCommandFunc
newCmd := cmds[k]
h, isWrite, ok := v.Node.GetMergeHandler(cmdName)
if ok {
+ if isWrite {
+ hasWrite = true
+ }
if !isWrite && !v.Node.IsLead() && (atomic.LoadInt32(&allowStaleRead) == 0) {
// read only to leader to avoid stale read
- // TODO: also read command can request the raft read index if not leader
- return nil, nil, needConcurrent, node.ErrNamespaceNotLeader
+ return hasWrite, nil, nil, needConcurrent, node.ErrNamespaceNotLeader
+ }
+ if v.Node.IsStopping() {
+ return hasWrite, nil, nil, needConcurrent, common.ErrStopped
}
handlers = append(handlers, h)
commands = append(commands, newCmd)
@@ -259,8 +277,8 @@ func (s *Server) GetMergeHandlers(cmd redcon.Command) ([]common.MergeCommandFunc
}
if len(handlers) <= 0 {
- return nil, nil, needConcurrent, common.ErrInvalidCommand
+ return hasWrite, nil, nil, needConcurrent, common.ErrInvalidCommand
}
- return handlers, commands, needConcurrent, nil
+ return hasWrite, handlers, commands, needConcurrent, nil
}
diff --git a/server/queue.go b/server/queue.go
new file mode 100644
index 00000000..080672e8
--- /dev/null
+++ b/server/queue.go
@@ -0,0 +1,236 @@
+// Copyright 2017-2019 Lei Ni (nilei81@gmail.com)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package server
+
+import (
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+type internalReq struct {
+ d []byte
+ f func()
+ id uint64
+}
+
+type elemT internalReq
+
+func newElemWithData(d []byte) elemT {
+ e := elemT{}
+ e.d = d
+ return e
+}
+
+func (e elemT) GetID() uint64 {
+ return e.id
+}
+
+func (e elemT) Func() func() {
+ return e.f
+}
+
+func (e elemT) GetData() []byte {
+ return e.d
+}
+
+func (e *elemT) ResetData() {
+ e.d = nil
+}
+
+func (e *elemT) SetID(index uint64) {
+ e.id = index
+}
+
+type entryQueue struct {
+ size uint64
+ left []elemT
+ right []elemT
+ leftInWrite bool
+ stopped bool
+ paused bool
+ idx uint64
+ oldIdx uint64
+ cycle uint64
+ lazyFreeCycle uint64
+ mu sync.Mutex
+ waitC chan struct{}
+ waitCnt int64
+}
+
+func newEntryQueue(size uint64, lazyFreeCycle uint64) *entryQueue {
+ e := &entryQueue{
+ size: size,
+ lazyFreeCycle: lazyFreeCycle,
+ left: make([]elemT, size),
+ right: make([]elemT, size),
+ waitC: make(chan struct{}, 1),
+ }
+ return e
+}
+
+func (q *entryQueue) closed() bool {
+ q.mu.Lock()
+ defer q.mu.Unlock()
+ return q.stopped
+}
+
+func (q *entryQueue) close() {
+ q.mu.Lock()
+ defer q.mu.Unlock()
+ q.stopped = true
+ if q.waitC != nil {
+ close(q.waitC)
+ q.waitC = nil
+ }
+}
+
+func (q *entryQueue) targetQueue() []elemT {
+ var t []elemT
+ if q.leftInWrite {
+ t = q.left
+ } else {
+ t = q.right
+ }
+ return t
+}
+
+func (q *entryQueue) addWait(ent elemT, to time.Duration) (bool, bool) {
+ if to <= 0 {
+ added, stopped, _ := q.add(ent)
+ return added, stopped
+ }
+ var t *time.Timer
+ for {
+ added, stopped, w := q.add(ent)
+ if added || stopped {
+ if t != nil {
+ t.Stop()
+ }
+ return added, stopped
+ }
+ // too full
+ if atomic.LoadInt64(&q.waitCnt) > int64(q.size)*5 {
+ if t != nil {
+ t.Stop()
+ }
+ return false, stopped
+ }
+ if t == nil {
+ t = time.NewTimer(to)
+ }
+ atomic.AddInt64(&q.waitCnt, 1)
+ select {
+ case <-t.C:
+ atomic.AddInt64(&q.waitCnt, -1)
+ t.Stop()
+ return false, false
+ case <-w:
+ }
+ atomic.AddInt64(&q.waitCnt, -1)
+ }
+}
+
+func (q *entryQueue) add(ent elemT) (bool, bool, chan struct{}) {
+ q.mu.Lock()
+ wc := q.waitC
+ if q.paused || q.idx >= q.size {
+ q.mu.Unlock()
+ return false, q.stopped, wc
+ }
+ if q.stopped {
+ q.mu.Unlock()
+ return false, true, wc
+ }
+ w := q.targetQueue()
+ w[q.idx] = ent
+ q.idx++
+ q.mu.Unlock()
+ return true, false, wc
+}
+
+func (q *entryQueue) gc() {
+ if q.lazyFreeCycle > 0 {
+ oldq := q.targetQueue()
+ if q.lazyFreeCycle == 1 {
+ for i := uint64(0); i < q.oldIdx; i++ {
+ oldq[i].ResetData()
+ }
+ } else if q.cycle%q.lazyFreeCycle == 0 {
+ for i := uint64(0); i < q.size; i++ {
+ oldq[i].ResetData()
+ }
+ }
+ }
+}
+
+func (q *entryQueue) get(paused bool) []elemT {
+ q.mu.Lock()
+ defer q.mu.Unlock()
+ needNotify := false
+ if q.paused || q.idx >= q.size {
+ needNotify = true
+ }
+ q.paused = paused
+ q.cycle++
+ sz := q.idx
+ q.idx = 0
+ t := q.targetQueue()
+ q.leftInWrite = !q.leftInWrite
+ q.gc()
+ q.oldIdx = sz
+ if needNotify {
+ if q.waitC != nil {
+ close(q.waitC)
+ }
+ q.waitC = make(chan struct{}, 1)
+ }
+ return t[:sz]
+}
+
+type readyCluster struct {
+ mu sync.Mutex
+ ready map[uint64]struct{}
+ maps [2]map[uint64]struct{}
+ index uint8
+}
+
+func newReadyCluster() *readyCluster {
+ r := &readyCluster{}
+ r.maps[0] = make(map[uint64]struct{})
+ r.maps[1] = make(map[uint64]struct{})
+ r.ready = r.maps[0]
+ return r
+}
+
+func (r *readyCluster) setClusterReady(clusterID uint64) {
+ r.mu.Lock()
+ r.ready[clusterID] = struct{}{}
+ r.mu.Unlock()
+}
+
+func (r *readyCluster) getReadyClusters() map[uint64]struct{} {
+ r.mu.Lock()
+ v := r.ready
+ r.index++
+ selected := r.index % 2
+ nm := r.maps[selected]
+ for k := range nm {
+ delete(nm, k)
+ }
+ r.ready = nm
+ r.mu.Unlock()
+ return v
+}
diff --git a/server/queue_test.go b/server/queue_test.go
new file mode 100644
index 00000000..33273666
--- /dev/null
+++ b/server/queue_test.go
@@ -0,0 +1,345 @@
+// Copyright 2017-2019 Lei Ni (nilei81@gmail.com)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !dragonboat_slowtest,!dragonboat_errorinjectiontest
+
+package server
+
+import (
+ "sync"
+ "testing"
+ "time"
+)
+
+func TestEntryQueueCanBeCreated(t *testing.T) {
+ q := newEntryQueue(5, 0)
+ if q.size != 5 || len(q.left) != 5 || len(q.right) != 5 {
+ t.Errorf("size unexpected")
+ }
+ if q.idx != 0 {
+ t.Errorf("idx %d, want 0", q.idx)
+ }
+}
+
+func TestLazyFreeCanBeDisabled(t *testing.T) {
+ q := newEntryQueue(5, 0)
+ q.add(newElemWithData(make([]byte, 16)))
+ q.add(newElemWithData(make([]byte, 16)))
+ q.add(newElemWithData(make([]byte, 16)))
+ q.get(false)
+ q.get(false)
+ tq := q.targetQueue()
+ for i := 0; i < 3; i++ {
+ if tq[i].GetData() == nil {
+ t.Errorf("data unexpectedly freed")
+ }
+ }
+}
+
+func TestLazyFreeCanBeUsed(t *testing.T) {
+ q := newEntryQueue(5, 1)
+ q.add(newElemWithData(make([]byte, 16)))
+ q.add(newElemWithData(make([]byte, 16)))
+ q.add(newElemWithData(make([]byte, 16)))
+ q.get(false)
+ q.get(false)
+ tq := q.targetQueue()
+ for i := 0; i < 3; i++ {
+ if tq[i].GetData() != nil {
+ t.Errorf("data unexpectedly not freed")
+ }
+ }
+}
+
+func TestLazyFreeCycleCanBeSet(t *testing.T) {
+ q := newEntryQueue(5, 6)
+ q.add(newElemWithData(make([]byte, 16)))
+ q.add(newElemWithData(make([]byte, 16)))
+ q.add(newElemWithData(make([]byte, 16)))
+ q.get(false)
+ q.get(false)
+ tq := q.targetQueue()
+ for i := 0; i < 3; i++ {
+ if tq[i].GetData() == nil {
+ t.Errorf("data unexpectedly freed")
+ }
+ }
+ q.get(false)
+ q.get(false)
+ tq = q.targetQueue()
+ for i := 0; i < 3; i++ {
+ if tq[i].GetData() == nil {
+ t.Errorf("data unexpectedly freed")
+ }
+ }
+ q.get(false)
+ q.get(false)
+ tq = q.targetQueue()
+ for i := 0; i < 3; i++ {
+ if tq[i].GetData() != nil {
+ t.Errorf("data not freed at the expected cycle")
+ }
+ }
+}
+
+func TestEntryQueueCanBePaused(t *testing.T) {
+ q := newEntryQueue(5, 0)
+ if q.paused {
+ t.Errorf("entry queue is paused by default")
+ }
+ for i := 0; i < 5; i++ {
+ ok, stopped, _ := q.add(elemT{})
+ if !ok || stopped {
+ t.Errorf("failed to add new entry")
+ }
+ if q.stopped {
+ t.Errorf("stopped too early")
+ }
+ }
+ v := q.get(true)
+ if len(v) != 5 {
+ t.Errorf("failed to get all entries")
+ }
+ if !q.paused {
+ t.Errorf("not paused")
+ }
+ ok, stopped, _ := q.add(elemT{})
+ if ok {
+ t.Errorf("entry added to paused queue")
+ }
+ if stopped {
+ t.Errorf("entry queue unexpectedly stopped")
+ }
+}
+
+func TestEntryQueueCanBeClosed(t *testing.T) {
+ q := newEntryQueue(5, 0)
+ if q.stopped {
+ t.Errorf("entry queue is stopped by default")
+ }
+ for i := 0; i < 5; i++ {
+ ok, stopped, _ := q.add(elemT{})
+ if !ok || stopped {
+ t.Errorf("failed to add new entry")
+ }
+ if q.stopped {
+ t.Errorf("stopped too early")
+ }
+ }
+ ok, _, _ := q.add(elemT{})
+ if ok {
+ t.Errorf("not expect to add more")
+ }
+ q = newEntryQueue(5, 0)
+ q.close()
+ if !q.stopped {
+ t.Errorf("entry queue is not marked as stopped")
+ }
+ if q.idx != 0 {
+ t.Errorf("idx %d, want 0", q.idx)
+ }
+ ok, stopped, _ := q.add(elemT{})
+ if ok {
+ t.Errorf("not expect to add more")
+ }
+ if !stopped {
+ t.Errorf("stopped flag is not returned")
+ }
+}
+
+func TestEntryQueueAllowEntriesToBeAdded(t *testing.T) {
+ q := newEntryQueue(5, 0)
+ for i := uint64(0); i < 5; i++ {
+ e := elemT{}
+ e.SetID(uint64(i + 1))
+ ok, stopped, _ := q.add(e)
+ if !ok || stopped {
+ t.Errorf("failed to add new entry")
+ }
+ if q.idx != i+1 {
+ t.Errorf("idx %d, want %d", q.idx, i+1)
+ }
+ var r []elemT
+ if q.leftInWrite {
+ r = q.left
+ } else {
+ r = q.right
+ }
+ if r[i].GetID() != uint64(i+1) {
+ t.Errorf("index %d, want %d", r[i].GetID(), uint64(i+1))
+ }
+ }
+}
+
+func TestEntryQueueAllowAddedEntriesToBeReturned(t *testing.T) {
+ q := newEntryQueue(5, 0)
+ for i := 0; i < 3; i++ {
+ e := elemT{}
+ e.SetID(uint64(i + 1))
+ ok, stopped, _ := q.add(e)
+ if !ok || stopped {
+ t.Errorf("failed to add new entry")
+ }
+ }
+ r := q.get(false)
+ if len(r) != 3 {
+ t.Errorf("len %d, want %d", len(r), 3)
+ }
+ if q.idx != 0 {
+ t.Errorf("idx %d, want %d", q.idx, 0)
+ }
+ // check whether we can keep adding entries as long as we keep getting
+ // previously written entries.
+ expectedIndex := uint64(1)
+ q = newEntryQueue(5, 0)
+ for i := 0; i < 1000; i++ {
+ e := elemT{}
+ e.SetID(uint64(i + 1))
+ ok, stopped, _ := q.add(e)
+ if !ok || stopped {
+ t.Errorf("failed to add new entry")
+ }
+ if q.idx == q.size {
+ r := q.get(false)
+ if len(r) != 5 {
+ t.Errorf("len %d, want %d", len(r), 5)
+ }
+ for _, ee := range r {
+ if ee.GetID() != expectedIndex {
+ t.Errorf("index %d, expected %d", ee.GetID(), expectedIndex)
+ }
+ expectedIndex++
+ }
+ }
+ }
+}
+
+func TestEntryQueueAllowEntriesToBeAddedAndWait(t *testing.T) {
+ q := newEntryQueue(5, 0)
+ for i := uint64(0); i < 5; i++ {
+ e := elemT{}
+ e.SetID(uint64(i + 1))
+ ok, stopped := q.addWait(e, time.Second)
+ if !ok || stopped {
+ t.Errorf("failed to add new entry")
+ }
+ if q.idx != i+1 {
+ t.Errorf("idx %d, want %d", q.idx, i+1)
+ }
+ var r []elemT
+ if q.leftInWrite {
+ r = q.left
+ } else {
+ r = q.right
+ }
+ if r[i].GetID() != uint64(i+1) {
+ t.Errorf("index %d, want %d", r[i].GetID(), uint64(i+1))
+ }
+ }
+ for i := uint64(5); i < 10; i++ {
+ e := elemT{}
+ e.SetID(uint64(i + 1))
+ ok, _ := q.addWait(e, time.Second)
+ if ok {
+ t.Errorf("should failed while add wait full")
+ }
+ }
+ go func() {
+ for {
+ time.Sleep(time.Second / 100)
+ if q.closed() {
+ return
+ }
+ q.get(false)
+ }
+ }()
+ for i := uint64(0); i < 5; i++ {
+ e := elemT{}
+ e.SetID(uint64(i + 1))
+ ok, stopped := q.addWait(e, time.Second)
+ if !ok || stopped {
+ t.Errorf("failed to add new entry")
+ }
+ if q.idx != i+1 {
+ t.Errorf("idx %d, want %d", q.idx, i+1)
+ }
+ }
+ var wg sync.WaitGroup
+ for g := 0; g < 10; g++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i := uint64(0); i < 100; i++ {
+ e := elemT{}
+ e.SetID(uint64(i + 1))
+ ok, stopped := q.addWait(e, time.Second*5)
+ if !ok || stopped {
+ t.Errorf("failed to add new entry")
+ }
+ }
+ }()
+ }
+ wg.Wait()
+ q.close()
+}
+
+func TestClusterCanBeSetAsReady(t *testing.T) {
+ rc := newReadyCluster()
+ if len(rc.ready) != 0 {
+ t.Errorf("ready map not empty")
+ }
+ rc.setClusterReady(1)
+ rc.setClusterReady(2)
+ rc.setClusterReady(2)
+ if len(rc.ready) != 2 {
+ t.Errorf("ready map sz %d, want 2", len(rc.ready))
+ }
+ _, ok := rc.ready[1]
+ if !ok {
+ t.Errorf("cluster 1 not set as ready")
+ }
+ _, ok = rc.ready[2]
+ if !ok {
+ t.Errorf("cluster 2 not set as ready")
+ }
+}
+
+func TestReadyClusterCanBeReturnedAndCleared(t *testing.T) {
+ rc := newReadyCluster()
+ if len(rc.ready) != 0 {
+ t.Errorf("ready map not empty")
+ }
+ rc.setClusterReady(1)
+ rc.setClusterReady(2)
+ rc.setClusterReady(2)
+ if len(rc.ready) != 2 {
+ t.Errorf("ready map sz %d, want 2", len(rc.ready))
+ }
+ r := rc.getReadyClusters()
+ if len(r) != 2 {
+ t.Errorf("ready map sz %d, want 2", len(r))
+ }
+ if len(rc.ready) != 0 {
+ t.Errorf("cluster ready map not cleared")
+ }
+ r = rc.getReadyClusters()
+ if len(r) != 0 {
+ t.Errorf("cluster ready map not cleared")
+ }
+ rc.setClusterReady(4)
+ r = rc.getReadyClusters()
+ if len(r) != 1 {
+ t.Errorf("cluster ready not set")
+ }
+}
diff --git a/server/redis_api.go b/server/redis_api.go
index 400e5507..f64d600d 100644
--- a/server/redis_api.go
+++ b/server/redis_api.go
@@ -7,9 +7,8 @@ import (
"sync/atomic"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/node"
"github.com/absolute8511/redcon"
+ "github.com/youzan/ZanRedisDB/common"
)
var (
@@ -17,6 +16,7 @@ var (
costStatsLevel int32
)
+// TODO: maybe provide reusable memory buffer for req and response
func (s *Server) serverRedis(conn redcon.Conn, cmd redcon.Command) {
defer func() {
if e := recover(); e != nil {
@@ -46,13 +46,12 @@ func (s *Server) serverRedis(conn redcon.Conn, cmd redcon.Command) {
case "ping":
conn.WriteString("PONG")
case "auth":
- // TODO: add auth here
conn.WriteString("OK")
case "quit":
conn.WriteString("OK")
conn.Close()
case "info":
- s := s.GetStats(false)
+ s := s.GetStats(false, false)
d, _ := json.MarshalIndent(s, "", " ")
conn.WriteBulkString(string(d))
default:
@@ -64,8 +63,12 @@ func (s *Server) serverRedis(conn redcon.Conn, cmd redcon.Command) {
if level > 0 {
start = time.Now()
}
- isWrite, h, cmd, err := s.GetHandler(cmdName, cmd)
cmdStr := string(cmd.Args[0])
+ ns, pk, pkSum, err := GetPKAndHashSum(cmdName, cmd)
+ if err != nil {
+ conn.WriteError(err.Error() + " : ERR handle command " + cmdStr)
+ break
+ }
if len(cmd.Args) > 1 {
cmdStr += ", " + string(cmd.Args[1])
if level > 4 && len(cmd.Args) > 2 {
@@ -74,13 +77,11 @@ func (s *Server) serverRedis(conn redcon.Conn, cmd redcon.Command) {
}
}
}
+ kvn, err := s.GetHandleNode(ns, pk, pkSum, cmdName, cmd)
if err == nil {
- if isWrite && node.IsSyncerOnly() {
- conn.WriteError("The cluster is only allowing syncer write : ERR handle command " + cmdStr)
- } else {
- h(conn, cmd)
- }
- } else {
+ err = s.handleRedisSingleCmd(cmdName, pk, pkSum, kvn, conn, cmd)
+ }
+ if err != nil {
conn.WriteError(err.Error() + " : ERR handle command " + cmdStr)
}
if level > 0 && err == nil {
@@ -111,6 +112,7 @@ func (s *Server) serveRedisAPI(port int, stopC <-chan struct{}) {
}
},
)
+ redisS.SetIdleClose(time.Minute * 5)
go func() {
err := redisS.ListenAndServe()
if err != nil {
diff --git a/server/redis_api_fullscan_test.go b/server/redis_api_fullscan_test.go
index 640a2188..4b3bc013 100644
--- a/server/redis_api_fullscan_test.go
+++ b/server/redis_api_fullscan_test.go
@@ -10,11 +10,11 @@ import (
"testing"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/node"
- "github.com/absolute8511/ZanRedisDB/rockredis"
"github.com/siddontang/goredis"
"github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/node"
+ "github.com/youzan/ZanRedisDB/rockredis"
)
var testOnceFullScan sync.Once
@@ -22,7 +22,7 @@ var kvsFullScan *Server
var redisportFullScan int
var gtmpScanDir string
-func startFullScanTestServer(t *testing.T) (*Server, int, string) {
+func startFullScanTestServer(t *testing.T, clusterID string, rport int, partNum int) (*Server, int, string) {
tmpDir, err := ioutil.TempDir("", fmt.Sprintf("rocksdb-test-%d", time.Now().UnixNano()))
if err != nil {
t.Fatal(err)
@@ -32,85 +32,64 @@ func startFullScanTestServer(t *testing.T) (*Server, int, string) {
path.Join(tmpDir, "myid"),
[]byte(strconv.FormatInt(int64(1), 10)),
common.FILE_PERM)
- raftAddr := "http://127.0.0.1:52346"
- redisportFullScan := 52345
+ raftAddr := fmt.Sprintf("http://127.0.0.1:%d", rport+2)
+
+ kvOpts := ServerConfig{
+ ClusterID: clusterID,
+ DataDir: tmpDir,
+ RedisAPIPort: rport,
+ HttpAPIPort: rport + 1,
+ LocalRaftAddr: raftAddr,
+ BroadcastAddr: "127.0.0.1",
+ TickMs: 100,
+ ElectionTick: 5,
+ UseRocksWAL: testUseRocksWAL,
+ SharedRocksWAL: testSharedRocksWAL,
+ }
+ kvOpts.RocksDBOpts.EnablePartitionedIndexFilter = true
+ kvOpts.WALRocksDBOpts.EngineType = testEngineType
+ kv, err := NewServer(kvOpts)
+ assert.Nil(t, err)
+
var replica node.ReplicaInfo
replica.NodeID = 1
replica.ReplicaID = 1
replica.RaftAddr = raftAddr
- kvOpts := ServerConfig{
- ClusterID: "test",
- DataDir: tmpDir,
- RedisAPIPort: redisportFullScan,
- LocalRaftAddr: raftAddr,
- BroadcastAddr: "127.0.0.1",
- TickMs: 100,
- ElectionTick: 5,
- }
- nsConf := node.NewNSConfig()
- nsConf.Name = "default-0"
- nsConf.BaseName = "default"
- nsConf.EngType = rockredis.EngType
- nsConf.PartitionNum = 3
- nsConf.Replicator = 1
- nsConf.RaftGroupConf.GroupID = 1000
- nsConf.RaftGroupConf.SeedNodes = append(nsConf.RaftGroupConf.SeedNodes, replica)
- kv := NewServer(kvOpts)
- _, err = kv.InitKVNamespace(1, nsConf, false)
- if err != nil {
- t.Fatalf("failed to init namespace: %v", err)
- }
- nsConf1 := node.NewNSConfig()
- nsConf1.Name = "default-1"
- nsConf1.BaseName = "default"
- nsConf1.EngType = rockredis.EngType
- nsConf1.PartitionNum = 3
- nsConf1.Replicator = 1
- nsConf1.RaftGroupConf.GroupID = 1000
- nsConf1.RaftGroupConf.SeedNodes = append(nsConf.RaftGroupConf.SeedNodes, replica)
- _, err = kv.InitKVNamespace(1, nsConf1, false)
- if err != nil {
- t.Fatalf("failed to init namespace: %v", err)
- }
-
- nsConf2 := node.NewNSConfig()
- nsConf2.Name = "default-2"
- nsConf2.BaseName = "default"
- nsConf2.EngType = rockredis.EngType
- nsConf2.PartitionNum = 3
- nsConf2.Replicator = 1
- nsConf2.RaftGroupConf.GroupID = 1000
- nsConf2.RaftGroupConf.SeedNodes = append(nsConf.RaftGroupConf.SeedNodes, replica)
- _, err = kv.InitKVNamespace(1, nsConf2, false)
- if err != nil {
- t.Fatalf("failed to init namespace: %v", err)
+ for i := 0; i < partNum; i++ {
+ nsConf := node.NewNSConfig()
+ nsConf.Name = "default-" + strconv.Itoa(i)
+ nsConf.BaseName = "default"
+ nsConf.EngType = rockredis.EngType
+ nsConf.PartitionNum = partNum
+ nsConf.Replicator = 1
+ // different partition must use different group id
+ nsConf.RaftGroupConf.GroupID = uint64(1000 + i)
+ nsConf.RaftGroupConf.SeedNodes = append(nsConf.RaftGroupConf.SeedNodes, replica)
+ // since only one replica, we can use same raft id for different group
+ _, err = kv.InitKVNamespace(1, nsConf, false)
+ if err != nil {
+ t.Fatalf("failed to init namespace: %v", err)
+ }
}
kv.Start()
time.Sleep(time.Second)
- return kv, redisportFullScan, tmpDir
+ t.Logf("start test server done at: %v", time.Now())
+ return kv, rport, tmpDir
}
-func waitScanServerForLeader(t *testing.T, w time.Duration) {
+func waitServersForLeader(t *testing.T, kvServer *Server, w time.Duration, partNum int) {
start := time.Now()
for {
leaderNum := 0
- replicaNode := kvsFullScan.GetNamespaceFromFullName("default-0")
- assert.NotNil(t, replicaNode)
- if replicaNode.Node.IsLead() {
- leaderNum++
- }
- replicaNode = kvsFullScan.GetNamespaceFromFullName("default-1")
- assert.NotNil(t, replicaNode)
- if replicaNode.Node.IsLead() {
- leaderNum++
- }
- replicaNode = kvsFullScan.GetNamespaceFromFullName("default-2")
- assert.NotNil(t, replicaNode)
- if replicaNode.Node.IsLead() {
- leaderNum++
+ for i := 0; i < partNum; i++ {
+ replicaNode := kvServer.GetNamespaceFromFullName("default-" + strconv.Itoa(i))
+ assert.NotNil(t, replicaNode)
+ if replicaNode.Node.IsLead() {
+ leaderNum++
+ }
}
- if leaderNum >= 3 {
+ if leaderNum >= partNum {
return
}
if time.Since(start) > w {
@@ -123,8 +102,8 @@ func waitScanServerForLeader(t *testing.T, w time.Duration) {
func getFullScanConn(t *testing.T) *goredis.PoolConn {
testOnceFullScan.Do(func() {
- kvsFullScan, redisportFullScan, gtmpScanDir = startFullScanTestServer(t)
- waitScanServerForLeader(t, time.Second*10)
+ kvsFullScan, redisportFullScan, gtmpScanDir = startFullScanTestServer(t, "unit-test-scan", fullscanTestPortBase, 3)
+ waitServersForLeader(t, kvsFullScan, time.Second*10, 3)
},
)
c := goredis.NewClient("127.0.0.1:"+strconv.Itoa(redisportFullScan), "")
@@ -839,8 +818,7 @@ func checkListFullScan(t *testing.T, c *goredis.PoolConn) {
string(n) != "MDpUVlJGUFRwSUx5OHZMeTh2THk4dll6MD07MjpUa0U5UFRwSlFVRkJRVUZCUVVGQlFUMD07MTpUMUU5UFRwSUx5OHZMeTh2THk4dmF6MD07" &&
string(n) != "MTpUMUU5UFRwSUx5OHZMeTh2THk4dmF6MD07MDpUVlJGUFRwSUx5OHZMeTh2THk4dll6MD07MjpUa0U5UFRwSlFVRkJRVUZCUVVGQlFUMD07" &&
string(n) != "MTpUMUU5UFRwSUx5OHZMeTh2THk4dmF6MD07MjpUa0U5UFRwSlFVRkJRVUZCUVVGQlFUMD07MDpUVlJGUFRwSUx5OHZMeTh2THk4dll6MD07" &&
- string(n) != "MjpUa0U5UFRwSlFVRkJRVUZCUVVGQlFUMD07MDpUVlJGUFRwSUx5OHZMeTh2THk4dll6MD07MTpUMUU5UFRwSUx5OHZMeTh2THk4dmF6MD07" &&
- string(n) != "" {
+ string(n) != "MjpUa0U5UFRwSlFVRkJRVUZCUVVGQlFUMD07MDpUVlJGUFRwSUx5OHZMeTh2THk4dll6MD07MTpUMUU5UFRwSUx5OHZMeTh2THk4dmF6MD07" {
t.Fatal(string(n))
} else {
values := map[string][]string{
diff --git a/server/redis_api_hash_test.go b/server/redis_api_hash_test.go
new file mode 100644
index 00000000..3b94a016
--- /dev/null
+++ b/server/redis_api_hash_test.go
@@ -0,0 +1,528 @@
+package server
+
+import (
+ "fmt"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/siddontang/goredis"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestHashEmptyField(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:hashempty"
+ _, err := c.Do("hset", key, "", "v1")
+ assert.Nil(t, err)
+
+ v, _ := goredis.String(c.Do("hget", key, ""))
+ assert.Equal(t, "v1", v)
+
+ n, err := goredis.Int(c.Do("hexists", key, ""))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, n)
+
+ _, err = c.Do("hdel", key, "")
+ assert.Nil(t, err)
+
+ v, err = goredis.String(c.Do("hget", key, ""))
+ assert.Equal(t, goredis.ErrNil, err)
+ assert.Equal(t, "", v)
+
+ n, err = goredis.Int(c.Do("hexists", key, ""))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+}
+
+func TestHash(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:hasha"
+
+ if n, err := goredis.Int(c.Do("hset", key, 1, 0)); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+ if n, err := goredis.Int(c.Do("hsetnx", key, 1, 0)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("hexists", key, 1)); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("hexists", key, -1)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("hget", key, 1)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("hset", key, 1, 1)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("hget", key, 1)); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("hlen", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+}
+
+func testHashArray(ay []interface{}, checkValues ...int) error {
+ if len(ay) != len(checkValues) {
+ return fmt.Errorf("invalid return number %d != %d", len(ay), len(checkValues))
+ }
+
+ for i := 0; i < len(ay); i++ {
+ if ay[i] == nil && checkValues[i] != 0 {
+ return fmt.Errorf("must nil")
+ } else if checkValues[i] == 0 && ay[i] != nil {
+ return fmt.Errorf("not exist hash field must be nil")
+ } else if ay[i] != nil {
+ v, ok := ay[i].([]byte)
+ if !ok {
+ return fmt.Errorf("invalid return data %d %v :%T", i, ay[i], ay[i])
+ }
+
+ d, _ := strconv.Atoi(string(v))
+
+ if d != checkValues[i] {
+ return fmt.Errorf("invalid data %d %s != %d", i, v, checkValues[i])
+ }
+ }
+ }
+ return nil
+}
+
+func TestHashM(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:msetb"
+ if ok, err := goredis.String(c.Do("hmset", key, 1, 1, 2, 2, 3, 3)); err != nil {
+ t.Fatal(err)
+ } else if ok != OK {
+ t.Fatal(ok)
+ }
+
+ if n, err := goredis.Int(c.Do("hlen", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 3 {
+ t.Fatal(n)
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("hmget", key, 1, 2, 3, 4)); err != nil {
+ t.Fatal(err)
+ } else {
+ if err := testHashArray(v, 1, 2, 3, 0); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if n, err := goredis.Int(c.Do("hdel", key, 1, 2, 3, 4)); err != nil {
+ t.Fatal(err)
+ } else if n != 3 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("hlen", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("hmget", key, 1, 2, 3, 4)); err != nil {
+ t.Fatal(err)
+ } else {
+ if err := testHashArray(v, 0, 0, 0, 0); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if n, err := goredis.Int(c.Do("hlen", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+}
+
+func TestHashIncr(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:hashincr-c"
+ if n, err := goredis.Int(c.Do("hincrby", key, 1, 1)); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(err)
+ }
+
+ if n, err := goredis.Int(c.Do("hlen", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("hincrby", key, 1, 10)); err != nil {
+ t.Fatal(err)
+ } else if n != 11 {
+ t.Fatal(err)
+ }
+
+ if n, err := goredis.Int(c.Do("hlen", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("hincrby", key, 1, -11)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(err)
+ }
+}
+
+func TestHashGetAll(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:hgetalld"
+
+ if ok, err := goredis.String(c.Do("hmset", key, 1, 1, 2, 2, 3, 3)); err != nil {
+ t.Fatal(err)
+ } else if ok != OK {
+ t.Fatal(ok)
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("hkeys", key)); err != nil {
+ t.Fatal(err)
+ } else {
+ if err := testHashArray(v, 1, 2, 3); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("hvals", key)); err != nil {
+ t.Fatal(err)
+ } else {
+ if err := testHashArray(v, 1, 2, 3); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("hgetall", key)); err != nil {
+ t.Fatal(err)
+ } else {
+ t.Log(v)
+ if err := testHashArray(v, 1, 1, 2, 2, 3, 3); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if n, err := goredis.Int(c.Do("hclear", key)); err != nil {
+ t.Fatal(err)
+ } else if n <= 0 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("hlen", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+}
+
+func assertTTLNear(t *testing.T, ttl int, realTTL int) {
+ assert.True(t, realTTL <= ttl, "real ttl should less or equal than set")
+ assert.True(t, realTTL >= ttl-2, "real ttl should not diff too large")
+}
+
+func TestHashExpire(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key1 := "default:test:hash_expa"
+ f1 := "f1"
+ f2 := "f2"
+ f3 := "f3"
+ f4 := "f4"
+ fint := "fint"
+ ttl := 2
+ tn := time.Now().UnixNano()
+
+ n, err := goredis.Int(c.Do("hset", key1, f1, "hello"))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, n)
+
+ c.Do("hexpire", key1, ttl)
+ if v, err := goredis.String(c.Do("hget", key1, f1)); err != nil {
+ t.Fatal(err)
+ } else if v != "hello" {
+ t.Fatal(v)
+ }
+ n, err = goredis.Int(c.Do("stale.hget.version", key1, f1))
+ assert.Nil(t, err)
+ assert.True(t, n >= int(tn), "version should great than now")
+
+ n, err = goredis.Int(c.Do("hkeyexist", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, n)
+ n, err = goredis.Int(c.Do("hexists", key1, f1))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, n)
+
+ realTtl, err := goredis.Int(c.Do("httl", key1))
+ assert.Nil(t, err)
+ assertTTLNear(t, ttl, realTtl)
+
+ // check hset,hmset, hincrby keep ttl
+ _, err = goredis.Int(c.Do("hset", key1, f2, " world"))
+ assert.Nil(t, err)
+ realTtl, err = goredis.Int(c.Do("httl", key1))
+ assert.Nil(t, err)
+ assertTTLNear(t, ttl, realTtl)
+
+ _, err = goredis.String(c.Do("hmset", key1, f3, f3, f4, f4))
+ assert.Nil(t, err)
+ realTtl, err = goredis.Int(c.Do("httl", key1))
+ assert.Nil(t, err)
+ assertTTLNear(t, ttl, realTtl)
+
+ vlist, err := goredis.MultiBulk(c.Do("hmget", key1, f1, f2, f3))
+ assert.Nil(t, err)
+ assert.Equal(t, 3, len(vlist))
+ vlist, err = goredis.MultiBulk(c.Do("stale.hmget.expired", key1, f1, f2, f3))
+ assert.Nil(t, err)
+ assert.Equal(t, 3, len(vlist))
+
+ n, err = goredis.Int(c.Do("hincrby", key1, fint, 1))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, n)
+ realTtl, err = goredis.Int(c.Do("httl", key1))
+ assert.Nil(t, err)
+ assertTTLNear(t, ttl, realTtl)
+
+ vlist, err = goredis.MultiBulk(c.Do("stale.hgetall.expired", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, 10, len(vlist))
+
+ // wait expire
+ time.Sleep(time.Second * time.Duration(ttl+2))
+
+ if v, err := goredis.String(c.Do("hget", key1, f1)); err != goredis.ErrNil {
+ t.Fatalf("expired hash key should be expired: %v, %v", v, err)
+ }
+
+ vlist2, err := goredis.MultiBulk(c.Do("stale.hgetall.expired", key1))
+ assert.Nil(t, err)
+ t.Logf("hgetall expired: %v", vlist2)
+ assert.Equal(t, 10, len(vlist2))
+ assert.Equal(t, vlist, vlist2)
+
+ n, err = goredis.Int(c.Do("hkeyexist", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+ n, err = goredis.Int(c.Do("hexists", key1, f1))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+ n, err = goredis.Int(c.Do("hlen", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+ vlist, err = goredis.MultiBulk(c.Do("hmget", key1, f1, f2, f3))
+ assert.Nil(t, err)
+ t.Logf("hmget : %v", vlist)
+ assert.Equal(t, 3, len(vlist))
+ assert.Nil(t, vlist[0])
+ assert.Nil(t, vlist[1])
+ assert.Nil(t, vlist[2])
+ vlist, err = goredis.MultiBulk(c.Do("stale.hmget.expired", key1, f3, f4))
+ assert.Nil(t, err)
+ t.Logf("hmget : %v", vlist)
+ assert.Equal(t, 2, len(vlist))
+ assert.Equal(t, []byte(f3), vlist[0])
+ assert.Equal(t, []byte(f4), vlist[1])
+
+ vlist, err = goredis.MultiBulk(c.Do("hgetall", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(vlist))
+
+ realTtl, err = goredis.Int(c.Do("httl", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, -1, realTtl)
+
+ //persist expired data should not success
+ c.Do("hpersist", key1)
+ realTtl, err = goredis.Int(c.Do("httl", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, -1, realTtl)
+ n, err = goredis.Int(c.Do("hexists", key1, f1))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+ n, err = goredis.Int(c.Do("hlen", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+
+ // renew hash
+ _, err = goredis.String(c.Do("hmset", key1, f3, f3, f4, f4))
+ assert.Nil(t, err)
+ c.Do("hexpire", key1, ttl)
+
+ realTtl, err = goredis.Int(c.Do("httl", key1))
+ assert.Nil(t, err)
+ assertTTLNear(t, ttl, realTtl)
+ n, err = goredis.Int(c.Do("hexists", key1, f1))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+ n, err = goredis.Int(c.Do("hexists", key1, f3))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, n)
+ n, err = goredis.Int(c.Do("hlen", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, 2, n)
+ n, err = goredis.Int(c.Do("hkeyexist", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, n)
+
+ // persist
+ c.Do("hpersist", key1)
+ realTtl, err = goredis.Int(c.Do("httl", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, -1, realTtl)
+ time.Sleep(time.Second * time.Duration(ttl+1))
+ // should not expired
+ n, err = goredis.Int(c.Do("hexists", key1, f3))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, n)
+ n, err = goredis.Int(c.Do("hlen", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, 2, n)
+ n, err = goredis.Int(c.Do("hkeyexist", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, n)
+
+ // change ttl
+ _, err = c.Do("hexpire", key1, ttl+4)
+ assert.Nil(t, err)
+ realTtl, err = goredis.Int(c.Do("httl", key1))
+ assert.Nil(t, err)
+ assertTTLNear(t, ttl+4, realTtl)
+
+ time.Sleep(time.Second * time.Duration(ttl+6))
+ // check expired kv should not get from any read command
+ if v, err := goredis.String(c.Do("hget", key1, f1)); err != goredis.ErrNil {
+ t.Fatalf("expired hash key should be expired: %v, %v", v, err)
+ }
+ n, err = goredis.Int(c.Do("hkeyexist", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+ n, err = goredis.Int(c.Do("hexists", key1, f1))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+ n, err = goredis.Int(c.Do("hlen", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+ vlist, err = goredis.MultiBulk(c.Do("hmget", key1, f1, f2, f3))
+ assert.Nil(t, err)
+ t.Logf("hmget : %v", vlist)
+ assert.Equal(t, 3, len(vlist))
+ assert.Nil(t, vlist[0])
+ assert.Nil(t, vlist[1])
+ assert.Nil(t, vlist[2])
+
+ vlist, err = goredis.MultiBulk(c.Do("hgetall", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(vlist))
+
+ realTtl, err = goredis.Int(c.Do("httl", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, -1, realTtl)
+}
+
+func TestHashErrorParams(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:hash_err_param"
+ if _, err := c.Do("hset", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("hget", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("hexists", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("hdel", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("hlen", key, "a"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("hincrby", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("hmset", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("hmset", key, "f1", "v1", "f2"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("hmget", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("hgetall"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("hkeys"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("hvals"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("hclear"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("hclear", key, "a"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("hmclear"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+}
diff --git a/server/redis_api_json_test.go b/server/redis_api_json_test.go
new file mode 100644
index 00000000..f5436d2c
--- /dev/null
+++ b/server/redis_api_json_test.go
@@ -0,0 +1,332 @@
+package server
+
+import (
+ "testing"
+
+ "github.com/siddontang/goredis"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestJSON(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:jsonapi_a"
+ n, err := goredis.Int(c.Do("json.keyexists", key))
+ assert.Nil(t, err)
+ assert.Equal(t, int(0), n)
+
+ strRet, err := goredis.String(c.Do("json.set", key, ".a", `"str"`))
+ assert.Nil(t, err)
+ assert.Equal(t, "OK", strRet)
+
+ n, err = goredis.Int(c.Do("json.keyexists", key))
+ assert.Nil(t, err)
+ assert.Equal(t, int(1), n)
+
+ strRets, err := goredis.Strings(c.Do("json.get", key, ".a"))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, len(strRets))
+ assert.Equal(t, "str", strRets[0])
+
+ typeStr, err := goredis.String(c.Do("json.type", key, ".a"))
+ assert.Nil(t, err)
+ assert.Equal(t, "string", typeStr)
+
+ typeStr, err = goredis.String(c.Do("json.type", key, ""))
+ assert.Nil(t, err)
+ assert.Equal(t, "object", typeStr)
+
+ strRet, err = goredis.String(c.Do("json.set", key, "1", "3"))
+ assert.Nil(t, err)
+ assert.Equal(t, "OK", strRet)
+
+ strRets, err = goredis.Strings(c.Do("json.get", key, ""))
+ assert.Nil(t, err)
+ t.Log(strRets)
+ assert.Equal(t, 1, len(strRets))
+ assert.True(t, strRets[0] != "")
+ t.Log(strRets[0])
+ assert.True(t, strRets[0] == `{"a":"str","1":3}` || (strRets[0] == `{"1":3,"a":"str"}`))
+ strRets2, err := goredis.Strings(c.Do("json.get", key))
+ assert.Nil(t, err)
+ assert.Equal(t, strRets, strRets2)
+
+ strRets, err = goredis.Strings(c.Do("json.get", key, "a"))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, len(strRets))
+ assert.Equal(t, "str", strRets[0])
+
+ strRets, err = goredis.Strings(c.Do("json.get", key, "1"))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, len(strRets))
+ t.Log(strRets)
+ assert.Equal(t, "3", strRets[0])
+
+ strRets, err = goredis.Strings(c.Do("json.get", key, "1", "a"))
+ assert.Nil(t, err)
+ assert.Equal(t, 2, len(strRets))
+ t.Log(strRets)
+ assert.Equal(t, "3", strRets[0])
+ assert.Equal(t, "str", strRets[1])
+
+ n, err = goredis.Int(c.Do("json.objlen", key))
+ assert.Nil(t, err)
+ assert.Equal(t, 2, n)
+ strRets, err = goredis.Strings(c.Do("json.objkeys", key))
+ assert.Nil(t, err)
+ assert.Equal(t, 2, len(strRets))
+ for _, s := range strRets {
+ assert.True(t, s == "a" || s == "1")
+ }
+ c.Do("json.del", key, "1")
+ strRets, err = goredis.Strings(c.Do("json.get", key, "1"))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, len(strRets))
+ assert.Equal(t, "", strRets[0])
+
+ typeStr, err = goredis.String(c.Do("json.type", key, "1"))
+ assert.Nil(t, err)
+ assert.Equal(t, "null", typeStr)
+
+ n, err = goredis.Int(c.Do("json.objlen", key))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, n)
+ strRets, err = goredis.Strings(c.Do("json.objkeys", key))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, len(strRets))
+ for _, s := range strRets {
+ assert.True(t, s == "a")
+ }
+
+ c.Do("json.del", key, "a")
+ strRets, err = goredis.Strings(c.Do("json.get", key, ".a"))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, len(strRets))
+ assert.Equal(t, "", strRets[0])
+
+ n, err = goredis.Int(c.Do("json.objlen", key))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+ strRets, err = goredis.Strings(c.Do("json.objkeys", key))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(strRets))
+
+ strRet, err = goredis.String(c.Do("json.set", key, "1", "3"))
+ assert.Nil(t, err)
+ assert.Equal(t, "OK", strRet)
+ strRet, err = goredis.String(c.Do("json.set", key, ".a", `"str"`))
+ assert.Nil(t, err)
+ assert.Equal(t, "OK", strRet)
+
+ c.Do("json.del", key)
+
+ strRets, err = goredis.Strings(c.Do("json.get", key, ".a"))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, len(strRets))
+ assert.Equal(t, "", strRets[0])
+
+ strRets, err = goredis.Strings(c.Do("json.get", key, ".1"))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, len(strRets))
+ assert.Equal(t, "", strRets[0])
+
+ n, err = goredis.Int(c.Do("json.objlen", key))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+ strRets, err = goredis.Strings(c.Do("json.objkeys", key))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(strRets))
+}
+
+func TestJSONInvalidJSON(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:jsonapi_invalid"
+
+ strRet, err := goredis.String(c.Do("json.set", key, ".a", `"str"`))
+ assert.Nil(t, err)
+ assert.Equal(t, "OK", strRet)
+
+ strRet, err = goredis.String(c.Do("json.set", key, "1", "3"))
+ assert.Nil(t, err)
+ assert.Equal(t, "OK", strRet)
+
+ _, err = c.Do("json.set", key, "2", "invalid_str")
+ assert.NotNil(t, err)
+}
+
+func TestJSONSetComplexJSON(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:jsonapi_complex"
+
+ strRet, err := goredis.String(c.Do("json.set", key, "", `{
+ "address": {
+ "street": "2 Avenue",
+ "zipcode": "10075",
+ "building": "1480",
+ "coord": [-73.9557413, 40.7720266]
+ },
+ "borough": "Manhattan",
+ "cuisine": "Italian",
+ "grades": [
+ {
+ "date": "2014-10-01",
+ "grade": "A",
+ "score": 11
+ },
+ {
+ "date": "2014-01-16",
+ "grade": "B",
+ "score": 17
+ }
+ ],
+ "name": "Vella",
+ "restaurant_id": "41704620"
+ }`))
+
+ assert.Nil(t, err)
+ assert.Equal(t, "OK", strRet)
+ strRets, err := goredis.Strings(c.Do("json.get", key, "borough"))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, len(strRets))
+ assert.Equal(t, "Manhattan", strRets[0])
+ strRets, err = goredis.Strings(c.Do("json.get", key, "address.zipcode"))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, len(strRets))
+ assert.Equal(t, "10075", strRets[0])
+ strRets, err = goredis.Strings(c.Do("json.get", key, "grades.0.score"))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, len(strRets))
+ assert.Equal(t, "11", strRets[0])
+ c.Do("json.set", key, "cuisine", `"American"`)
+ c.Do("json.set", key, "address.street", `"East 31st Street"`)
+ strRets, err = goredis.Strings(c.Do("json.get", key, "cuisine"))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, len(strRets))
+ assert.Equal(t, "American", strRets[0])
+ strRets, err = goredis.Strings(c.Do("json.get", key, "address.street"))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, len(strRets))
+ assert.Equal(t, "East 31st Street", strRets[0])
+}
+
+func TestJSONArrayOp(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:json_arrayop_d"
+ _, err := c.Do("json.set", key, "", `[1, 2]`)
+ assert.Nil(t, err)
+ n, err := goredis.Int(c.Do("json.arrappend", key, ".", `{"3":[]}`))
+ assert.Nil(t, err)
+ assert.Equal(t, 3, n)
+
+ n, err = goredis.Int(c.Do("json.arrappend", key, ".", "4", "5"))
+ assert.Nil(t, err)
+ assert.Equal(t, 5, n)
+
+ n, err = goredis.Int(c.Do("json.arrlen", key))
+ assert.Nil(t, err)
+ assert.Equal(t, 5, n)
+
+ n, err = goredis.Int(c.Do("json.arrappend", key, "2.3", "33", "34"))
+ assert.Nil(t, err)
+ assert.Equal(t, 2, n)
+
+ n, err = goredis.Int(c.Do("json.arrlen", key, "2.3"))
+ assert.Nil(t, err)
+ assert.Equal(t, 2, n)
+
+ typeStr, err := goredis.String(c.Do("json.type", key, "2.3"))
+ assert.Nil(t, err)
+ assert.Equal(t, "array", typeStr)
+
+ typeStr, err = goredis.String(c.Do("json.type", key))
+ assert.Nil(t, err)
+ assert.Equal(t, "array", typeStr)
+
+ poped, err := goredis.String(c.Do("json.arrpop", key))
+ assert.Nil(t, err)
+ assert.Equal(t, "5", poped)
+
+ poped, err = goredis.String(c.Do("json.arrpop", key))
+ assert.Nil(t, err)
+ assert.Equal(t, "4", poped)
+
+ n, err = goredis.Int(c.Do("json.arrlen", key))
+ assert.Nil(t, err)
+ assert.Equal(t, 3, n)
+
+ poped, err = goredis.String(c.Do("json.arrpop", key, "2.3"))
+ assert.Nil(t, err)
+ assert.Equal(t, "34", poped)
+
+ n, err = goredis.Int(c.Do("json.arrlen", key, "2.3"))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, n)
+
+ poped, err = goredis.String(c.Do("json.arrpop", key))
+ assert.Nil(t, err)
+ assert.Equal(t, `{"3":[33]}`, poped)
+
+ n, err = goredis.Int(c.Do("json.arrlen", key))
+ assert.Nil(t, err)
+ assert.Equal(t, 2, n)
+
+ poped, err = goredis.String(c.Do("json.arrpop", key))
+ assert.Nil(t, err)
+ assert.Equal(t, "2", poped)
+ poped, err = goredis.String(c.Do("json.arrpop", key))
+ assert.Nil(t, err)
+ assert.Equal(t, "1", poped)
+
+ n, err = goredis.Int(c.Do("json.arrlen", key))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+
+ poped, err = goredis.String(c.Do("json.arrpop", key))
+ assert.Nil(t, err)
+ assert.Equal(t, "", poped)
+}
+
+func TestJSONErrorParams(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:json_err_param"
+ if _, err := c.Do("json.set", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("json.get"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("json.del"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("json.arrylen"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("json.arrappend", key, "a"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("json.arrpop"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("json.objkeys"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("json.objlen"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+}
diff --git a/server/redis_api_kv_test.go b/server/redis_api_kv_test.go
new file mode 100644
index 00000000..818e3a8a
--- /dev/null
+++ b/server/redis_api_kv_test.go
@@ -0,0 +1,1264 @@
+package server
+
+import (
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ ps "github.com/prometheus/client_golang/prometheus"
+ io_prometheus_clients "github.com/prometheus/client_model/go"
+ "github.com/siddontang/goredis"
+ "github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/metric"
+ "github.com/youzan/ZanRedisDB/node"
+)
+
+func TestKV(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key1 := "default:test:a"
+ key2 := "default:test:b"
+ keyExpire := "default:test:xx"
+
+ if v, err := goredis.String(c.Do("getset", key1, "12345")); err != goredis.ErrNil {
+ t.Logf("getset %v", v)
+ t.Fatal(err)
+ } else if v != "" {
+ t.Fatal(v)
+ }
+
+ if ok, err := goredis.String(c.Do("noopwrite", key1, "12345")); err != nil {
+ t.Fatal(err)
+ } else if ok != OK {
+ t.Fatal(ok)
+ }
+
+ if ok, err := goredis.String(c.Do("set", key1, "1234")); err != nil {
+ t.Fatal(err)
+ } else if ok != OK {
+ t.Fatal(ok)
+ }
+
+ if n, err := goredis.Int(c.Do("setnx", key1, "123")); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("setnx", key2, "123")); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+
+ if ok, err := goredis.String(c.Do("setex", keyExpire, 2, "hello world")); err != nil {
+ t.Fatal(err)
+ } else if ok != OK {
+ t.Fatal(ok)
+ }
+ if v, err := goredis.String(c.Do("get", keyExpire)); err != nil {
+ t.Fatal(err)
+ } else if v != "hello world" {
+ t.Fatal(v)
+ }
+
+ time.Sleep(time.Second * 4)
+ if v, err := goredis.String(c.Do("get", keyExpire)); err != goredis.ErrNil {
+ if err == nil && v == "hello world" {
+ time.Sleep(time.Second * 16)
+ if v, err := goredis.String(c.Do("get", keyExpire)); err != goredis.ErrNil {
+ t.Fatalf("expired key should be expired: %v, %v", v, err)
+ }
+ } else {
+ t.Fatalf("get expired key error: %v, %v", v, err)
+ }
+ }
+
+ if v, err := goredis.String(c.Do("get", key1)); err != nil {
+ t.Fatal(err)
+ } else if v != "1234" {
+ t.Fatal(v)
+ }
+
+ if v, err := goredis.String(c.Do("getset", key1, "123")); err != nil {
+ t.Fatal(err)
+ } else if v != "1234" {
+ t.Fatal(v)
+ }
+
+ if v, err := goredis.String(c.Do("get", key1)); err != nil {
+ t.Fatal(err)
+ } else if v != "123" {
+ t.Fatal(v)
+ }
+
+ if n, err := goredis.Int(c.Do("exists", key1)); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+ if n, err := goredis.Int(c.Do("exists", key1, key2)); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("exists", "default:test:empty_key_test")); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+ if _, err := goredis.Int(c.Do("del", key1, key2)); err != nil {
+ t.Fatal(err)
+ }
+
+ if n, err := goredis.Int(c.Do("exists", key1)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("exists", key2)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("exists", key1, key2)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+}
+
+func TestKVSetOpts(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+ key1 := "default:test:setopt_a"
+ _, err := goredis.String(c.Do("set", key1, "1234", "xx"))
+ assert.Equal(t, goredis.ErrNil, err)
+
+ ok, err := goredis.String(c.Do("set", key1, "123", "nx", "ex", "4"))
+ assert.Nil(t, err)
+ assert.Equal(t, OK, ok)
+ v, err := goredis.String(c.Do("get", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, "123", v)
+
+ ok, err = goredis.String(c.Do("set", key1, "1234", "ex", "3"))
+ assert.Nil(t, err)
+ assert.Equal(t, OK, ok)
+ v, err = goredis.String(c.Do("get", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, "1234", v)
+
+ _, err = goredis.String(c.Do("set", key1, "12345", "nx"))
+ assert.Equal(t, goredis.ErrNil, err)
+ v, err = goredis.String(c.Do("get", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, "1234", v)
+
+ _, err = goredis.String(c.Do("set", key1, "123456", "xx"))
+ assert.Nil(t, err)
+ assert.Equal(t, OK, ok)
+ v, err = goredis.String(c.Do("get", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, "123456", v)
+
+ _, err = goredis.String(c.Do("set", key1, "1234567", "xx", "ex", "2"))
+ assert.Nil(t, err)
+ assert.Equal(t, OK, ok)
+ v, err = goredis.String(c.Do("get", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, "1234567", v)
+ // wait expire
+ time.Sleep(time.Second * 3)
+ _, err = goredis.String(c.Do("set", key1, "1234", "xx"))
+ assert.Equal(t, goredis.ErrNil, err)
+ ok, err = goredis.String(c.Do("set", key1, "123", "nx", "ex", "2"))
+ assert.Nil(t, err)
+ assert.Equal(t, OK, ok)
+ v, err = goredis.String(c.Do("get", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, "123", v)
+}
+
+func TestKVSetIfOpts(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+ key1 := "default:test:setifopt_a"
+ n, err := goredis.Int(c.Do("setifeq", key1, "123", "1234", "ex", "4"))
+ assert.Nil(t, err)
+ assert.Equal(t, int(0), n)
+
+ n, err = goredis.Int(c.Do("setifeq", key1, "", "123"))
+ assert.Nil(t, err)
+ assert.Equal(t, int(1), n)
+ v, err := goredis.String(c.Do("get", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, "123", v)
+
+ n, err = goredis.Int(c.Do("setifeq", key1, "", "1234"))
+ assert.Nil(t, err)
+ assert.Equal(t, int(0), n)
+
+ v, err = goredis.String(c.Do("get", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, "123", v)
+
+ n, err = goredis.Int(c.Do("setifeq", key1, "123", "1234", "ex", "3"))
+ assert.Nil(t, err)
+ assert.Equal(t, int(1), n)
+
+ v, err = goredis.String(c.Do("get", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, "1234", v)
+
+ n, err = goredis.Int(c.Do("delifeq", key1, ""))
+ assert.Nil(t, err)
+ assert.Equal(t, int(0), n)
+
+ n, err = goredis.Int(c.Do("delifeq", key1, v))
+ assert.Nil(t, err)
+ assert.Equal(t, int(1), n)
+
+ _, err = goredis.String(c.Do("get", key1))
+ assert.Equal(t, goredis.ErrNil, err)
+
+ n, err = goredis.Int(c.Do("setifeq", key1, "", "1234", "ex", "2"))
+ assert.Nil(t, err)
+ assert.Equal(t, int(1), n)
+ // wait expire
+ time.Sleep(time.Second * 3)
+ n, err = goredis.Int(c.Do("delifeq", key1, "1234"))
+ assert.Nil(t, err)
+ assert.Equal(t, int(0), n)
+ n, err = goredis.Int(c.Do("setifeq", key1, "", "12345", "ex", "3"))
+ assert.Nil(t, err)
+ assert.Equal(t, int(1), n)
+
+ v, err = goredis.String(c.Do("get", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, "12345", v)
+}
+
+func TestKVPipeline(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+ pkey1 := "default:test:kvpla"
+ pkey2 := "default:test:kvplb"
+
+ err := c.Send("set", pkey1, "1")
+ assert.Nil(t, err)
+ err = c.Send("set", pkey2, "2")
+ assert.Nil(t, err)
+ v, err := goredis.String(c.Receive())
+ assert.Nil(t, err)
+ assert.Equal(t, OK, v)
+ v, err = goredis.String(c.Receive())
+ assert.Nil(t, err)
+ assert.Equal(t, OK, v)
+ if v, err := goredis.String(c.Do("get", pkey1)); err != nil {
+ t.Fatal(err)
+ } else if v != "1" {
+ t.Error(v)
+ }
+ if v, err := goredis.String(c.Do("get", pkey2)); err != nil {
+ t.Fatal(err)
+ } else if v != "2" {
+ t.Error(v)
+ }
+}
+
+func TestKVExpire(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key1 := "default:test:expa"
+ ttl := 2
+ tn := time.Now()
+
+ if ok, err := goredis.String(c.Do("setex", key1, ttl, "hello")); err != nil {
+ t.Fatal(err)
+ } else if ok != OK {
+ t.Fatal(ok)
+ }
+ if v, err := goredis.String(c.Do("get", key1)); err != nil {
+ t.Fatal(err)
+ } else if v != "hello" {
+ t.Fatal(v)
+ }
+ realTtl, err := goredis.Int(c.Do("ttl", key1))
+ assert.Nil(t, err)
+ assertTTLNear(t, ttl, realTtl)
+ // check incr, append, setrange keep ttl
+ if cnt, err := goredis.Int(c.Do("append", key1, " world")); err != nil {
+ t.Fatal(err)
+ } else if cnt != len("hello world") {
+ t.Fatal(cnt)
+ }
+ if v, err := goredis.String(c.Do("get", key1)); err != nil {
+ t.Fatal(err)
+ } else if v != "hello world" {
+ t.Fatal(v)
+ }
+ n, err := goredis.Int(c.Do("stale.getversion", key1))
+ assert.Nil(t, err)
+ t.Logf("key ver: %v", n)
+ assert.True(t, n >= int(tn.UnixNano()), n)
+ v, err := goredis.String(c.Do("stale.getexpired", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, "hello world", v)
+
+ realTtl, err = goredis.Int(c.Do("ttl", key1))
+ assert.Nil(t, err)
+ assertTTLNear(t, ttl, realTtl)
+ if cnt, err := goredis.Int(c.Do("setrange", key1, 1, "range")); err != nil {
+ t.Fatal(err)
+ } else if cnt != len("hrangeworld") {
+ t.Fatal(cnt)
+ }
+ if v, err := goredis.String(c.Do("get", key1)); err != nil {
+ t.Fatal(err)
+ } else if v != "hrangeworld" {
+ t.Fatal(v)
+ }
+ realTtl, err = goredis.Int(c.Do("ttl", key1))
+ assert.Nil(t, err)
+ assertTTLNear(t, ttl, realTtl)
+
+ time.Sleep(time.Second * time.Duration(ttl+2))
+ if v, err := goredis.String(c.Do("get", key1)); err != goredis.ErrNil {
+ t.Fatalf("expired key should be expired: %v, %v", v, err)
+ }
+
+ n, err = goredis.Int(c.Do("stale.getversion", key1))
+ assert.Nil(t, err)
+ t.Logf("key ver: %v", n)
+ assert.True(t, n >= int(tn.UnixNano()), n)
+ v, err = goredis.String(c.Do("stale.getexpired", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, "hrangeworld", v)
+
+ realTtl, err = goredis.Int(c.Do("ttl", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, -1, realTtl)
+
+ if ok, err := goredis.String(c.Do("setex", key1, ttl, "1")); err != nil {
+ t.Fatal(err)
+ } else if ok != OK {
+ t.Fatal(ok)
+ }
+ if v, err := goredis.String(c.Do("get", key1)); err != nil {
+ t.Fatal(err)
+ } else if v != "1" {
+ t.Fatal(v)
+ }
+ n, err = goredis.Int(c.Do("stale.getversion", key1))
+ assert.Nil(t, err)
+ t.Logf("key ver: %v", n)
+ assert.True(t, n >= int(tn.UnixNano()), n)
+ v, err = goredis.String(c.Do("stale.getexpired", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, "1", v)
+
+ realTtl, err = goredis.Int(c.Do("ttl", key1))
+ assert.Nil(t, err)
+ assertTTLNear(t, ttl, realTtl)
+ _, err = c.Do("incr", key1)
+ assert.Nil(t, err)
+ if v, err := goredis.String(c.Do("get", key1)); err != nil {
+ t.Fatal(err)
+ } else if v != "2" {
+ t.Fatal(v)
+ }
+ realTtl, err = goredis.Int(c.Do("ttl", key1))
+ assert.Nil(t, err)
+ assertTTLNear(t, ttl, realTtl)
+ //persist
+ c.Do("persist", key1)
+ realTtl, err = goredis.Int(c.Do("ttl", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, -1, realTtl)
+
+ // change ttl
+ _, err = c.Do("expire", key1, ttl+4)
+ assert.Nil(t, err)
+ realTtl, err = goredis.Int(c.Do("ttl", key1))
+ assert.Nil(t, err)
+ assertTTLNear(t, ttl+4, realTtl)
+
+ time.Sleep(time.Second * time.Duration(ttl+5))
+ // check expired kv should not get from any read command
+ if v, err := goredis.String(c.Do("get", key1)); err != goredis.ErrNil {
+ t.Fatalf("expired key should be expired: %v, %v", v, err)
+ }
+ n, err = goredis.Int(c.Do("stale.getversion", key1))
+ assert.Nil(t, err)
+ t.Logf("key ver: %v", n)
+ assert.True(t, n >= int(tn.UnixNano()), n)
+ v, err = goredis.String(c.Do("stale.getexpired", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, "2", v)
+
+ realTtl, err = goredis.Int(c.Do("ttl", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, -1, realTtl)
+ if n, err := goredis.Int(c.Do("exists", key1)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+ vv, err := goredis.MultiBulk(c.Do("mget", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, nil, vv[0])
+ _, err = goredis.String(c.Do("getset", key1, "new1"))
+ assert.Equal(t, goredis.ErrNil, err)
+ nv, err := goredis.String(c.Do("get", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, "new1", nv)
+
+ // persist
+ if ok, err := goredis.String(c.Do("setex", key1, ttl, "1")); err != nil {
+ t.Fatal(err)
+ } else if ok != OK {
+ t.Fatal(ok)
+ }
+ realTtl, err = goredis.Int(c.Do("ttl", key1))
+ assert.Nil(t, err)
+ assertTTLNear(t, ttl, realTtl)
+
+ _, err = c.Do("persist", key1)
+ assert.Nil(t, err)
+
+ realTtl, err = goredis.Int(c.Do("ttl", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, -1, realTtl)
+}
+
+func TestKVM(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key1 := "default:test:kvma"
+ key2 := "default:test:kvmb"
+ key3 := "default:test:kvmc"
+ if ok, err := goredis.String(c.Do("set", key1, "1")); err != nil {
+ t.Fatal(err)
+ } else if ok != OK {
+ t.Fatal(ok)
+ }
+ if ok, err := goredis.String(c.Do("set", key2, "2")); err != nil {
+ t.Fatal(err)
+ } else if ok != OK {
+ t.Fatal(ok)
+ }
+
+ if v, err := goredis.String(c.Do("get", key1)); err != nil {
+ t.Fatal(err)
+ } else if v != "1" {
+ t.Error(v)
+ }
+ if v, err := goredis.String(c.Do("get", key2)); err != nil {
+ t.Fatal(err)
+ } else if v != "2" {
+ t.Error(v)
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("mget", key1, key2, key3)); err != nil {
+ t.Fatal(err)
+ } else if len(v) != 3 {
+ t.Fatal(len(v))
+ } else {
+ if vv, ok := v[0].([]byte); !ok || string(vv) != "1" {
+ t.Fatalf("not 1, %v", v)
+ }
+
+ if vv, ok := v[1].([]byte); !ok || string(vv) != "2" {
+ t.Errorf("not 2, %v", v[1])
+ }
+
+ if v[2] != nil {
+ t.Errorf("must nil: %v", v[2])
+ }
+ }
+}
+
+func TestKVIncrDecr(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:kv_n"
+ if n, err := goredis.Int64(c.Do("incr", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int64(c.Do("incr", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int64(c.Do("incrby", key, 10)); err != nil {
+ t.Fatal(err)
+ } else if n != 12 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int64(c.Do("incrby", key, -10)); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+}
+
+func TestKVBitOp(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:kv_bitop"
+ if n, err := goredis.Int64(c.Do("bitcount", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int64(c.Do("getbit", key, 1)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int64(c.Do("setbit", key, 100, 1)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int64(c.Do("getbit", key, 100)); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+ if n, err := goredis.Int64(c.Do("bitcount", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+ if n, err := goredis.Int64(c.Do("setbit", key, 1, 1)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+ if n, err := goredis.Int64(c.Do("bitcount", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+ if n, err := goredis.Int64(c.Do("bitcount", key, 0, 0)); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+ if n, err := goredis.Int64(c.Do("setbit", key, 8, 1)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+ if n, err := goredis.Int64(c.Do("bitcount", key, 0, 0)); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+ if n, err := goredis.Int64(c.Do("setbit", key, 7, 1)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+ if n, err := goredis.Int64(c.Do("bitcount", key, 0, 0)); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+ if n, err := goredis.Int64(c.Do("bitcount", key, 0, 1)); err != nil {
+ t.Fatal(err)
+ } else if n != 3 {
+ t.Fatal(n)
+ }
+ _, err := goredis.Int64(c.Do("setbit", key, -7, 1))
+ assert.NotNil(t, err)
+}
+
+func TestKVBitExpire(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key1 := "default:test:bit_exp"
+ ttl := 2
+
+ if n, err := goredis.Int(c.Do("setbitv2", key1, 1, 1)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+ if v, err := goredis.Int(c.Do("getbit", key1, 1)); err != nil {
+ t.Fatal(err)
+ } else if v != 1 {
+ t.Fatal(v)
+ }
+ c.Do("bexpire", key1, ttl)
+ realTtl, err := goredis.Int(c.Do("bttl", key1))
+ assert.Nil(t, err)
+ assertTTLNear(t, ttl, realTtl)
+ // check write keep ttl
+ if _, err := goredis.Int(c.Do("setbitv2", key1, 2, 1)); err != nil {
+ t.Fatal(err)
+ }
+ if v, err := goredis.Int(c.Do("getbit", key1, 2)); err != nil {
+ t.Fatal(err)
+ } else if v != 1 {
+ t.Fatal(v)
+ }
+ realTtl, err = goredis.Int(c.Do("bttl", key1))
+ assert.Nil(t, err)
+ assertTTLNear(t, ttl, realTtl)
+
+ time.Sleep(time.Second * time.Duration(ttl+2))
+ if v, err := goredis.Int(c.Do("getbit", key1, 1)); err != goredis.ErrNil && err != nil {
+ t.Fatalf("expired key should be expired: %v, %v", v, err)
+ } else if v != 0 {
+ t.Fatal(v)
+ }
+
+ realTtl, err = goredis.Int(c.Do("bttl", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, -1, realTtl)
+
+ if n, err := goredis.Int(c.Do("setbitv2", key1, 3, 1)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+ if n, err := goredis.Int(c.Do("bexpire", key1, ttl)); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+ if v, err := goredis.Int(c.Do("getbit", key1, 3)); err != nil {
+ t.Fatal(err)
+ } else if v != 1 {
+ t.Fatal(v)
+ }
+ if v, err := goredis.Int(c.Do("getbit", key1, 1)); err != nil {
+ t.Fatal(err)
+ } else if v != 0 {
+ t.Fatal(v)
+ }
+ realTtl, err = goredis.Int(c.Do("bttl", key1))
+ assert.Nil(t, err)
+ assertTTLNear(t, ttl, realTtl)
+ if n, err := goredis.Int(c.Do("setbitv2", key1, 4, 1)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+ realTtl, err = goredis.Int(c.Do("bttl", key1))
+ assert.Nil(t, err)
+ assertTTLNear(t, ttl, realTtl)
+ //persist
+ c.Do("bpersist", key1)
+ realTtl, err = goredis.Int(c.Do("bttl", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, -1, realTtl)
+
+ // change ttl
+ _, err = c.Do("bexpire", key1, ttl+4)
+ assert.Nil(t, err)
+ realTtl, err = goredis.Int(c.Do("bttl", key1))
+ assert.Nil(t, err)
+ assertTTLNear(t, ttl+4, realTtl)
+
+ time.Sleep(time.Second * time.Duration(ttl+5))
+ // check expired kv should not get from any read command
+ if v, err := goredis.Int(c.Do("getbit", key1, 3)); err != goredis.ErrNil && err != nil {
+ t.Fatalf("expired key should be expired: %v, %v", v, err)
+ } else if v != 0 {
+ t.Fatal(v)
+ }
+ realTtl, err = goredis.Int(c.Do("bttl", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, -1, realTtl)
+ if n, err := goredis.Int(c.Do("bkeyexist", key1)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+ // persist
+ if n, err := goredis.Int(c.Do("setbitv2", key1, 5, 1)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+ _, err = c.Do("bexpire", key1, ttl)
+ assert.Nil(t, err)
+
+ realTtl, err = goredis.Int(c.Do("bttl", key1))
+ assert.Nil(t, err)
+ assertTTLNear(t, ttl, realTtl)
+
+ _, err = c.Do("bpersist", key1)
+ assert.Nil(t, err)
+
+ realTtl, err = goredis.Int(c.Do("bttl", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, -1, realTtl)
+}
+
+func TestKVBatch(t *testing.T) {
+
+ var wg sync.WaitGroup
+ concurrency := 100
+ poolList := make([]*goredis.PoolConn, concurrency)
+ for i := 0; i < concurrency; i++ {
+ poolList[i] = getTestConn(t)
+ }
+ defer func() {
+ for i := 0; i < concurrency; i++ {
+ poolList[i].Close()
+ }
+ }()
+ for i := 0; i < concurrency; i++ {
+ wg.Add(1)
+ go func(index int, c *goredis.PoolConn) {
+ defer wg.Done()
+
+ key1 := "default:test:a" + strconv.Itoa(index)
+ key2 := "default:test:b" + strconv.Itoa(index)
+ key3 := "default:test:c" + strconv.Itoa(index)
+ key4 := "default:test:d" + strconv.Itoa(index)
+ keyExpire := "default:test:xx" + strconv.Itoa(index)
+ if ok, err := goredis.String(c.Do("set", key1, "1234")); err != nil {
+ t.Fatal(err)
+ } else if ok != OK {
+ t.Fatal(ok)
+ }
+
+ if n, err := goredis.Int(c.Do("setnx", key1, "123")); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("setnx", key2, "123")); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+
+ if ok, err := goredis.String(c.Do("set", key3, key3)); err != nil {
+ t.Fatal(err)
+ } else if ok != OK {
+ t.Fatal(ok)
+ }
+ if v, err := goredis.String(c.Do("get", key3)); err != nil {
+ t.Fatal(err)
+ } else if v != key3 {
+ t.Fatal(v)
+ }
+
+ if ok, err := goredis.String(c.Do("setex", keyExpire, 3, "hello world")); err != nil {
+ t.Fatal(err)
+ } else if ok != OK {
+ t.Fatal(ok)
+ }
+ if v, err := goredis.String(c.Do("get", keyExpire)); err != nil {
+ t.Fatal(err)
+ } else if v != "hello world" {
+ t.Fatal(v)
+ }
+
+ if ok, err := goredis.String(c.Do("set", key4, key4)); err != nil {
+ t.Fatal(err)
+ } else if ok != OK {
+ t.Fatal(ok)
+ }
+ if v, err := goredis.String(c.Do("get", key4)); err != nil {
+ t.Fatal(err)
+ } else if v != key4 {
+ t.Fatal(v)
+ }
+
+ mkey1 := "default:test:kvma" + strconv.Itoa(index)
+ mkey2 := "default:test:kvmb" + strconv.Itoa(index)
+ mkey3 := "default:test:kvmc" + strconv.Itoa(index)
+ // test pipeline set
+ err := c.Send("set", mkey1, "1")
+ assert.Nil(t, err)
+ err = c.Send("set", mkey2, "2")
+ assert.Nil(t, err)
+ v, err := goredis.String(c.Receive())
+ assert.Nil(t, err)
+ assert.Equal(t, OK, v)
+ v, err = goredis.String(c.Receive())
+ assert.Nil(t, err)
+ assert.Equal(t, OK, v)
+
+ if v, err := goredis.String(c.Do("get", mkey1)); err != nil {
+ t.Fatal(err)
+ } else if v != "1" {
+ t.Error(v)
+ }
+ if v, err := goredis.String(c.Do("get", mkey2)); err != nil {
+ t.Fatal(err)
+ } else if v != "2" {
+ t.Error(v)
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("mget", mkey1, mkey2, mkey3)); err != nil {
+ t.Fatal(err)
+ } else if len(v) != 3 {
+ t.Fatal(len(v))
+ } else {
+ if vv, ok := v[0].([]byte); !ok || string(vv) != "1" {
+ t.Fatalf("not 1, %v", v)
+ }
+
+ if vv, ok := v[1].([]byte); !ok || string(vv) != "2" {
+ t.Errorf("not 2, %v", v[1])
+ }
+
+ if v[2] != nil {
+ t.Errorf("must nil: %v", v[2])
+ }
+ }
+
+ time.Sleep(time.Second * 4)
+ if v, err := goredis.String(c.Do("get", keyExpire)); err != goredis.ErrNil {
+ if err == nil && v == "hello world" {
+ time.Sleep(time.Second * 16)
+ if v, err := goredis.String(c.Do("get", keyExpire)); err != goredis.ErrNil {
+ t.Fatalf("expired key should be expired: %v, %v", v, err)
+ }
+ } else {
+ t.Fatalf("get expired key error: %v, %v", v, err)
+ }
+ }
+
+ if v, err := goredis.String(c.Do("get", key1)); err != nil {
+ t.Fatal(err)
+ } else if v != "1234" {
+ t.Fatal(v)
+ }
+
+ if n, err := goredis.Int(c.Do("exists", key1)); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("exists", "default:test:empty_key_test"+strconv.Itoa(index))); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+ if _, err := goredis.Int(c.Do("del", key1, key2)); err != nil {
+ t.Fatal(err)
+ }
+
+ if n, err := goredis.Int(c.Do("exists", key1)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("exists", key2)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+ }(i, poolList[i])
+ }
+ wg.Wait()
+
+}
+
+func TestKVStringOp(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:kv_stringop"
+ if n, err := goredis.Int64(c.Do("strlen", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+ c.Do("setex", key, 10, "Hello")
+ n, err := goredis.Int64(c.Do("strlen", key))
+ assert.Nil(t, err)
+ assert.Equal(t, len("Hello"), int(n))
+ // append
+}
+
+func TestKVErrorParams(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key1 := "default:test:kv_erra"
+ key2 := "default:test:kv_errb"
+ key3 := "default:test:kv_errc"
+ _, err := c.Do("get", key1, key2, key3)
+ assert.NotNil(t, err)
+
+ _, err = c.Do("set", key1)
+ assert.NotNil(t, err)
+ _, err = c.Do("set", key1, key2, key3)
+ assert.NotNil(t, err)
+ _, err = c.Do("set", key1, key1, "ex")
+ assert.NotNil(t, err)
+ _, err = c.Do("set", key1, key1, "ex", "nx")
+ assert.NotNil(t, err)
+
+ _, err = c.Do("setifeq", key1, "old", "nvalue", "ex")
+ assert.NotNil(t, err)
+ _, err = c.Do("setifeq", key1, "old")
+ assert.NotNil(t, err)
+ _, err = c.Do("delifeq", key1)
+ assert.NotNil(t, err)
+
+ _, err = c.Do("setex", key1, "10")
+ assert.NotNil(t, err)
+
+ _, err = c.Do("setex", key1, "10", key1, key1)
+ assert.NotNil(t, err)
+
+ _, err = c.Do("getset", key1, key2, key3)
+ assert.NotNil(t, err)
+
+ _, err = c.Do("setnx", key1, key2, key3)
+ assert.NotNil(t, err)
+
+ _, err = c.Do("exists")
+ assert.NotNil(t, err)
+
+ _, err = c.Do("incr", key1, key2)
+ assert.NotNil(t, err)
+
+ _, err = c.Do("incrby", key1)
+ assert.NotNil(t, err)
+
+ _, err = c.Do("incrby", key1, "nan")
+ assert.NotNil(t, err)
+
+ _, err = c.Do("decrby", key1)
+ assert.NotNil(t, err)
+
+ _, err = c.Do("del")
+ assert.NotNil(t, err)
+
+ _, err = c.Do("mset")
+ assert.NotNil(t, err)
+
+ _, err = c.Do("mset", key1, key2, key3)
+ assert.NotNil(t, err)
+
+ _, err = c.Do("mget")
+ assert.NotNil(t, err)
+
+ _, err = c.Do("getbit")
+ assert.NotNil(t, err)
+
+ _, err = c.Do("getbit", key1)
+ assert.NotNil(t, err)
+
+ _, err = c.Do("setbit", key1)
+ assert.NotNil(t, err)
+
+ _, err = c.Do("setbit")
+ assert.NotNil(t, err)
+
+ _, err = c.Do("bitcount")
+ assert.NotNil(t, err)
+
+ _, err = c.Do("bitcount", key1, "0")
+ assert.NotNil(t, err)
+}
+
+func TestPFOp(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key1 := "default:test:pf_a"
+ cnt, err := goredis.Int64(c.Do("pfcount", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), cnt)
+
+ // first init with no element
+ cnt, err = goredis.Int64(c.Do("pfadd", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), cnt)
+
+ cnt, err = goredis.Int64(c.Do("pfadd", key1, 1))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), cnt)
+
+ cnt, err = goredis.Int64(c.Do("pfcount", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), cnt)
+
+ cnt, err = goredis.Int64(c.Do("pfadd", key1, 1))
+ assert.Nil(t, err)
+
+ cnt, err = goredis.Int64(c.Do("pfcount", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), cnt)
+
+ // test pfadd with no element on exist key
+ cnt, err = goredis.Int64(c.Do("pfadd", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), cnt)
+
+ cnt, err = goredis.Int64(c.Do("pfadd", key1, 1, 2, 3))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1), cnt)
+
+ cnt, err = goredis.Int64(c.Do("pfcount", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(3), cnt)
+
+ c.Do("del", key1)
+
+ cnt, err = goredis.Int64(c.Do("pfcount", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), cnt)
+}
+
+func TestPFOpErrorParams(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key1 := "default:test:pf_erra"
+ key2 := "default:test:pf_errb"
+ _, err := c.Do("pfadd")
+ assert.NotNil(t, err)
+
+ _, err = c.Do("pfcount", key1, key2)
+ assert.NotNil(t, err)
+
+ _, err = c.Do("pfcount")
+ assert.NotNil(t, err)
+}
+
+func TestSyncerOnlyWrite(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key1 := "default:test:synceronly"
+ key2 := "default:test:synceronly2"
+ _, err := goredis.String(c.Do("set", key1, "1234"))
+ _, err = goredis.String(c.Do("set", key2, "1234"))
+ assert.Nil(t, err)
+ node.SetSyncerOnly(true)
+ defer node.SetSyncerOnly(false)
+
+ _, err = goredis.String(c.Do("getset", key1, "12345"))
+ assert.NotNil(t, err)
+ assert.True(t, strings.HasPrefix(err.Error(), "The cluster is only allowing syncer write"))
+ _, err = goredis.String(c.Do("set", key1, "12345"))
+ assert.NotNil(t, err)
+ assert.True(t, strings.HasPrefix(err.Error(), "The cluster is only allowing syncer write"))
+ _, err = goredis.String(c.Do("plset", key1, "12345"))
+ assert.NotNil(t, err)
+ assert.True(t, strings.HasPrefix(err.Error(), "The cluster is only allowing syncer write"))
+
+ // failed write should not change the key value
+ if v, err := goredis.String(c.Do("get", key1)); err != nil {
+ t.Fatal(err)
+ } else if v != "1234" {
+ t.Fatal(v)
+ }
+
+ if ay, err := goredis.Values(c.Do("ADVSCAN", "default:testscan:"+"", "kv", "count", 5)); err != nil {
+ t.Error(err)
+ } else if len(ay) != 2 {
+ t.Fatal(len(ay))
+ }
+
+ if ay, err := goredis.Values(c.Do("SCAN", "default:testscan:"+"", "count", 5)); err != nil {
+ t.Error(err)
+ } else if len(ay) != 2 {
+ t.Fatal(len(ay))
+ }
+
+ if n, err := goredis.Int(c.Do("exists", key1)); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+ if n, err := goredis.Int(c.Do("exists", key1, key2)); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ _, err = goredis.Int(c.Do("del", key1, key2))
+ assert.NotNil(t, err)
+ assert.True(t, strings.HasPrefix(err.Error(), "The cluster is only allowing syncer write"))
+
+ // failed del should not change the key
+ if n, err := goredis.Int(c.Do("exists", key1)); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+ if n, err := goredis.Int(c.Do("exists", key1, key2)); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+}
+
+func TestSlowLimiterCommand(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key1 := "default:test_slowlimiter:slowa"
+
+ _, err := goredis.String(c.Do("slowwrite1s_test", key1, "12345"))
+ assert.Nil(t, err)
+ _, err = goredis.String(c.Do("slowwrite100ms_test", key1, "12345"))
+ assert.Nil(t, err)
+ _, err = goredis.String(c.Do("slowwrite50ms_test", key1, "12345"))
+ assert.Nil(t, err)
+ _, err = goredis.String(c.Do("slowwrite5ms_test", key1, "12345"))
+ assert.Nil(t, err)
+ start := time.Now()
+ done := make(chan bool)
+ slowed := int64(0)
+ total := int64(0)
+ var wg sync.WaitGroup
+ for i := 0; i < 30; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ cc := getTestConn(t)
+ defer cc.Close()
+ for {
+ c1 := time.Now()
+ _, err := goredis.String(cc.Do("set", key1, "12345"))
+ c2 := time.Since(c1)
+ if c2 > time.Millisecond*500 {
+ atomic.AddInt64(&slowed, 1)
+ }
+ if err != nil {
+ if err.Error() == node.ErrSlowLimiterRefused.Error() {
+ } else {
+ assert.Nil(t, err)
+ }
+ }
+ atomic.AddInt64(&total, 1)
+ select {
+ case <-done:
+ return
+ default:
+ time.Sleep(time.Millisecond)
+ }
+ }
+ }()
+ }
+ loop := 0
+ refused := 0
+ passedAfterRefused := 0
+ slowHalfOpen := time.Second * time.Duration(node.SlowHalfOpenSec)
+ for {
+ if time.Since(start) > slowHalfOpen*2 {
+ break
+ }
+ loop++
+ c.SetReadDeadline(time.Now().Add(time.Second * 10))
+ _, err := goredis.String(c.Do("slowwrite1s_test", key1, "12345"))
+ if err != nil && err.Error() == node.ErrSlowLimiterRefused.Error() {
+ refused++
+ time.Sleep(time.Millisecond)
+ continue
+ }
+ if refused > 0 && err == nil {
+ passedAfterRefused++
+ if passedAfterRefused > 3 {
+ break
+ }
+ }
+ }
+ close(done)
+ t.Logf("slow loop cnt: %v, refused: %v, %v, total %v, slowed: %v at %v",
+ loop, refused, passedAfterRefused, atomic.LoadInt64(&total), atomic.LoadInt64(&slowed), time.Now())
+ assert.True(t, refused > loop/2)
+ assert.True(t, atomic.LoadInt64(&slowed) < atomic.LoadInt64(&total)/10)
+ assert.True(t, passedAfterRefused < 5)
+ assert.True(t, passedAfterRefused > 0)
+ wg.Wait()
+ counter := metric.SlowLimiterRefusedCnt.With(ps.Labels{
+ "table": "test_slowlimiter",
+ "cmd": "slowwrite1s_test",
+ })
+ out := io_prometheus_clients.Metric{}
+ counter.Write(&out)
+ assert.Equal(t, float64(refused), *out.Counter.Value)
+
+ refused = 0
+ passedAfterRefused = 0
+ c2 := getTestConn(t)
+ defer c2.Close()
+ start = time.Now()
+ // wait until we become no slow to test clear history recorded slow
+ for {
+ if time.Since(start) > slowHalfOpen*2 {
+ break
+ }
+ loop++
+ c2.SetReadDeadline(time.Now().Add(time.Second * 10))
+ _, err := goredis.String(c2.Do("slowwrite1s_test", key1, "12345"))
+ if err != nil && err.Error() == node.ErrSlowLimiterRefused.Error() {
+ refused++
+ // we need sleep longer to allow slow down ticker decr counter to 0
+ time.Sleep(time.Second)
+ continue
+ }
+ if refused > 0 && err == nil {
+ passedAfterRefused++
+ if passedAfterRefused > 3 {
+ break
+ }
+ }
+ }
+ t.Logf("slow loop cnt: %v, refused: %v, passed after refused %v at %v",
+ loop, refused, passedAfterRefused, time.Now())
+ assert.True(t, refused > 1)
+ assert.True(t, passedAfterRefused < 5)
+ assert.True(t, passedAfterRefused > 1)
+ c2.SetReadDeadline(time.Now().Add(time.Second * 10))
+ time.Sleep(time.Second * 5)
+ // we become no slow, we try 3 times to avoid just half open pass
+ _, err = goredis.String(c2.Do("slowwrite1s_test", key1, "12345"))
+ assert.Nil(t, err)
+ _, err = goredis.String(c2.Do("slowwrite1s_test", key1, "12345"))
+ assert.Nil(t, err)
+ _, err = goredis.String(c2.Do("slowwrite1s_test", key1, "12345"))
+ assert.Nil(t, err)
+
+ // check changed conf
+ common.SetIntDynamicConf(common.ConfSlowLimiterRefuseCostMs, 601)
+ common.SetIntDynamicConf(common.ConfSlowLimiterHalfOpenSec, 18)
+ assert.Equal(t, int64(601), atomic.LoadInt64(&node.SlowRefuseCostMs))
+ assert.Equal(t, int64(18), atomic.LoadInt64(&node.SlowHalfOpenSec))
+}
diff --git a/server/redis_api_merge_test.go b/server/redis_api_merge_test.go
index c6a4a40b..0b10c6e9 100644
--- a/server/redis_api_merge_test.go
+++ b/server/redis_api_merge_test.go
@@ -3,18 +3,15 @@ package server
import (
"encoding/json"
"fmt"
- "io/ioutil"
- "path"
"strconv"
"sync"
"testing"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/node"
- "github.com/absolute8511/ZanRedisDB/rockredis"
"github.com/siddontang/goredis"
"github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/node"
)
var testOnceMerge sync.Once
@@ -23,112 +20,11 @@ var redisportMerge int
var testNamespaces = make(map[string]*node.NamespaceNode)
var gtmpMergeDir string
-func startMergeTestServer(t *testing.T) (*Server, int, string) {
- tmpDir, err := ioutil.TempDir("", fmt.Sprintf("rocksdb-test-%d", time.Now().UnixNano()))
- if err != nil {
- t.Fatal(err)
- }
- t.Logf("dir:%v\n", tmpDir)
- ioutil.WriteFile(
- path.Join(tmpDir, "myid"),
- []byte(strconv.FormatInt(int64(1), 10)),
- common.FILE_PERM)
- raftAddr := "http://127.0.0.1:32345"
- redisportMerge := 42345
- var replica node.ReplicaInfo
- replica.NodeID = 1
- replica.ReplicaID = 1
- replica.RaftAddr = raftAddr
- kvOpts := ServerConfig{
- ClusterID: "test",
- DataDir: tmpDir,
- RedisAPIPort: redisportMerge,
- HttpAPIPort: redisportMerge + 1,
- LocalRaftAddr: raftAddr,
- BroadcastAddr: "127.0.0.1",
- TickMs: 100,
- ElectionTick: 5,
- }
- nsConf := node.NewNSConfig()
- nsConf.Name = "default-0"
- nsConf.BaseName = "default"
- nsConf.EngType = rockredis.EngType
- nsConf.PartitionNum = 3
- nsConf.Replicator = 1
- nsConf.RaftGroupConf.GroupID = 1000
- nsConf.RaftGroupConf.SeedNodes = append(nsConf.RaftGroupConf.SeedNodes, replica)
- kv := NewServer(kvOpts)
- n, err := kv.InitKVNamespace(1, nsConf, false)
- if err != nil {
- t.Fatalf("failed to init namespace: %v", err)
- }
- nsConf1 := node.NewNSConfig()
- nsConf1.Name = "default-1"
- nsConf1.BaseName = "default"
- nsConf1.EngType = rockredis.EngType
- nsConf1.PartitionNum = 3
- nsConf1.Replicator = 1
- nsConf1.RaftGroupConf.GroupID = 1000
- nsConf1.RaftGroupConf.SeedNodes = append(nsConf.RaftGroupConf.SeedNodes, replica)
- n1, err := kv.InitKVNamespace(1, nsConf1, false)
- if err != nil {
- t.Fatalf("failed to init namespace: %v", err)
- }
-
- nsConf2 := node.NewNSConfig()
- nsConf2.Name = "default-2"
- nsConf2.BaseName = "default"
- nsConf2.EngType = rockredis.EngType
- nsConf2.PartitionNum = 3
- nsConf2.Replicator = 1
- nsConf2.RaftGroupConf.GroupID = 1000
- nsConf2.RaftGroupConf.SeedNodes = append(nsConf.RaftGroupConf.SeedNodes, replica)
- n2, err := kv.InitKVNamespace(1, nsConf2, false)
- if err != nil {
- t.Fatalf("failed to init namespace: %v", err)
- }
-
- kv.Start()
- time.Sleep(time.Second)
- t.Logf("start test server done at: %v", time.Now())
- testNamespaces[nsConf.Name] = n
- testNamespaces[nsConf1.Name] = n1
- testNamespaces[nsConf2.Name] = n2
- return kv, redisportMerge, tmpDir
-}
-func waitMergeServerForLeader(t *testing.T, w time.Duration) {
- start := time.Now()
- for {
- leaderNum := 0
- replicaNode := kvsMerge.GetNamespaceFromFullName("default-0")
- assert.NotNil(t, replicaNode)
- if replicaNode.Node.IsLead() {
- leaderNum++
- }
- replicaNode = kvsMerge.GetNamespaceFromFullName("default-1")
- assert.NotNil(t, replicaNode)
- if replicaNode.Node.IsLead() {
- leaderNum++
- }
- replicaNode = kvsMerge.GetNamespaceFromFullName("default-2")
- assert.NotNil(t, replicaNode)
- if replicaNode.Node.IsLead() {
- leaderNum++
- }
- if leaderNum >= 3 {
- return
- }
- if time.Since(start) > w {
- t.Fatalf("\033[31m timed out %v for wait leader \033[39m\n", time.Since(start))
- break
- }
- time.Sleep(time.Second)
- }
-}
func getMergeTestConn(t *testing.T) *goredis.PoolConn {
testOnceMerge.Do(func() {
- kvsMerge, redisportMerge, gtmpMergeDir = startMergeTestServer(t)
- waitMergeServerForLeader(t, time.Second*10)
+ kvsMerge, redisportMerge, gtmpMergeDir = startFullScanTestServer(t, "unit-test-merge", mergeTestPortBase, 3)
+ testNamespaces = kvsMerge.GetNsMgr().GetNamespaces()
+ waitServersForLeader(t, kvsMerge, time.Second*10, 3)
},
)
c := goredis.NewClient("127.0.0.1:"+strconv.Itoa(redisportMerge), "")
@@ -224,8 +120,7 @@ func checkMergeAdvanceScan(t *testing.T, c *goredis.PoolConn, tp string) {
t.Fatal(err)
} else if len(ay) != 2 {
t.Fatal(len(ay))
- } else if n := ay[0].([]byte); string(n) != "" &&
- string(n) != "" {
+ } else if n := ay[0].([]byte); string(n) != "" {
t.Fatal(string(n))
} else {
if len(ay[1].([]interface{})) != 0 {
@@ -609,3 +504,24 @@ func TestIntHindexMergeSearch(t *testing.T) {
assert.True(t, nv < 10)
}
}
+
+func TestKVRWMultiPart(t *testing.T) {
+ c := getMergeTestConn(t)
+ defer c.Close()
+
+ for i := 0; i < 20; i++ {
+ k := fmt.Sprintf("kv%d", i)
+ if _, err := c.Do("set", "default:test_kv_multi:"+k, []byte(k)); err != nil {
+ t.Errorf("set key: %v, failed:%v", k, err)
+ }
+ }
+
+ for i := 0; i < 20; i++ {
+ k := fmt.Sprintf("kv%d", i)
+ if val, err := goredis.String(c.Do("get", "default:test_kv_multi:"+k)); err != nil {
+ t.Errorf("get key: %v, failed:%v", k, err)
+ } else if val != k {
+ t.Errorf("value should be :%v, actual: %v", k, val)
+ }
+ }
+}
diff --git a/server/redis_api_scan_test.go b/server/redis_api_scan_test.go
new file mode 100644
index 00000000..9d651798
--- /dev/null
+++ b/server/redis_api_scan_test.go
@@ -0,0 +1,312 @@
+package server
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/siddontang/goredis"
+ "github.com/stretchr/testify/assert"
+)
+
+func checkScanValues(t *testing.T, ay interface{}, values ...interface{}) {
+ a, err := goredis.Strings(ay, nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if len(a) != len(values) {
+ t.Error(fmt.Sprintf("len %d != %d", len(a), len(values)))
+ }
+ for i, v := range a {
+ vv := fmt.Sprintf("%v", values[i])
+ if string(v) != vv {
+ if len(v) == len(vv)+8 {
+ if string(v[:len(vv)]) != vv {
+ t.Errorf(fmt.Sprintf("%d %s != %v", i, string(v), values[i]))
+ }
+ } else if len(v)+8 == len(vv) {
+ if string(v) != vv[:len(v)] {
+ t.Errorf(fmt.Sprintf("%d %s != %v", i, string(v), values[i]))
+ }
+ } else {
+ t.Errorf(fmt.Sprintf("%d %s != %v", i, string(v), values[i]))
+ }
+ }
+ }
+}
+
+func checkAdvanceScan(t *testing.T, c *goredis.PoolConn, tp string) {
+ var cursor string
+ if ay, err := goredis.Values(c.Do("ADVSCAN", "default:testscan:", tp, "count", 5)); err != nil {
+ t.Error(err)
+ } else if len(ay) != 2 {
+ t.Fatal(len(ay))
+ //} else if n := ay[0].([]byte); string(n) != "MDpkR1Z6ZEhOallXNDZOQT09Ow==" {
+ } else if n := ay[0].([]byte); string(n) != "MDpOQT09Ow==" {
+ t.Fatal(string(n))
+ } else {
+ cursor = string(n)
+ checkScanValues(t, ay[1], "0", "1", "2", "3", "4")
+ }
+
+ if ay, err := goredis.Values(c.Do("ADVSCAN", "default:testscan:"+cursor, tp, "count", 6)); err != nil {
+ t.Fatal(err)
+ } else if len(ay) != 2 {
+ t.Fatal(len(ay))
+ } else if n := ay[0].([]byte); string(n) != "" {
+ t.Fatal(string(n))
+ } else {
+ checkScanValues(t, ay[1], "5", "6", "7", "8", "9")
+ }
+
+ // cursor 9 for base64(0:base64(9);) is MDpPUT09Ow==
+ if ay, err := goredis.Values(c.Do("ADVSCAN", "default:testscan:MDpPUT09Ow==", tp, "count", 1)); err != nil {
+ t.Fatal(err)
+ } else if len(ay) != 2 {
+ t.Fatal(len(ay))
+ } else if n := ay[0].([]byte); string(n) != "" {
+ t.Fatal(string(n))
+ } else {
+ if len(ay[1].([]interface{})) != 0 {
+ t.Fatal(ay[1])
+ }
+ }
+}
+
+func checkAdvanceRevScan(t *testing.T, c *goredis.PoolConn, tp string) {
+ var cursor string
+ // use cursor 0::; to search, base64 is base64(0:base64(:);)
+ if ay, err := goredis.Values(c.Do("ADVREVSCAN", "default:testscan:MDpPZz09Ow==", tp, "count", 5)); err != nil {
+ t.Error(err)
+ return
+ } else if len(ay) != 2 {
+ t.Fatal(len(ay))
+ //} else if n := ay[0].([]byte); string(n) != "MDpkR1Z6ZEhOallXNDZOQT09Ow==" {
+ } else if n := ay[0].([]byte); string(n) != "MDpOUT09Ow==" {
+ t.Fatal(string(n))
+ } else {
+ cursor = string(n)
+ checkScanValues(t, ay[1], "9", "8", "7", "6", "5")
+ }
+
+ if ay, err := goredis.Values(c.Do("ADVREVSCAN", "default:testscan:"+cursor, tp, "count", 6)); err != nil {
+ t.Error(err)
+ return
+ } else if len(ay) != 2 {
+ t.Fatal(len(ay))
+ } else if n := ay[0].([]byte); string(n) != "" {
+ t.Fatal(string(n))
+ } else {
+ checkScanValues(t, ay[1], "4", "3", "2", "1", "0")
+ }
+
+ // testscan:9 for base64 is MDpkR1Z6ZEhOallXNDZPUT09Ow=
+ if ay, err := goredis.Values(c.Do("ADVREVSCAN", "default:testscan:", tp, "count", 1)); err != nil {
+ t.Error(err)
+ return
+ } else if len(ay) != 2 {
+ t.Fatal(len(ay))
+ } else if n := ay[0].([]byte); string(n) != "" {
+ t.Fatal(string(n))
+ } else {
+ if len(ay[1].([]interface{})) != 0 {
+ t.Fatal(ay[1])
+ }
+ }
+}
+
+func TestScan(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ if testing.Verbose() {
+ changeLogLevel(t, 4, gredisport+1)
+ }
+ testKVScan(t, c)
+ testHashKeyScan(t, c)
+ testListKeyScan(t, c)
+ testZSetKeyScan(t, c)
+ testSetKeyScan(t, c)
+ changeLogLevel(t, 2, gredisport+1)
+}
+
+func testKVScan(t *testing.T, c *goredis.PoolConn) {
+ for i := 0; i < 10; i++ {
+ if _, err := c.Do("set", "default:testscan:"+fmt.Sprintf("%d", i), []byte("value")); err != nil {
+ t.Fatal(err)
+ }
+ }
+ checkAdvanceScan(t, c, "KV")
+ checkAdvanceRevScan(t, c, "KV")
+}
+
+func testHashKeyScan(t *testing.T, c *goredis.PoolConn) {
+ for i := 0; i < 10; i++ {
+ if _, err := c.Do("hset", "default:testscan:"+fmt.Sprintf("%d", i), fmt.Sprintf("%d", i), []byte("value")); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ checkAdvanceScan(t, c, "HASH")
+ checkAdvanceRevScan(t, c, "HASH")
+}
+
+func testListKeyScan(t *testing.T, c *goredis.PoolConn) {
+ for i := 0; i < 10; i++ {
+ if _, err := c.Do("lpush", "default:testscan:"+fmt.Sprintf("%d", i), fmt.Sprintf("%d", i)); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ checkAdvanceScan(t, c, "LIST")
+ checkAdvanceRevScan(t, c, "HASH")
+}
+
+func testZSetKeyScan(t *testing.T, c *goredis.PoolConn) {
+ for i := 0; i < 10; i++ {
+ if _, err := c.Do("zadd", "default:testscan:"+fmt.Sprintf("%d", i), i, []byte("value")); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ checkAdvanceScan(t, c, "ZSET")
+ checkAdvanceRevScan(t, c, "HASH")
+}
+
+func testSetKeyScan(t *testing.T, c *goredis.PoolConn) {
+ for i := 0; i < 10; i++ {
+ if _, err := c.Do("sadd", "default:testscan:"+fmt.Sprintf("%d", i), fmt.Sprintf("%d", i)); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ checkAdvanceScan(t, c, "SET")
+ checkAdvanceRevScan(t, c, "HASH")
+}
+
+func TestHashScan(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:testscan:scan_hash"
+ c.Do("HMSET", key, "a", 1, "b", 2)
+
+ if ay, err := goredis.Values(c.Do("HSCAN", key, "")); err != nil {
+ t.Fatal(err)
+ } else if len(ay) != 2 {
+ t.Fatal(len(ay))
+ } else {
+ checkScanValues(t, ay[1], "a", 1, "b", 2)
+ }
+
+ if ay, err := goredis.Values(c.Do("HREVSCAN", key, "c")); err != nil {
+ t.Fatal(err)
+ } else if len(ay) != 2 {
+ t.Fatal(len(ay))
+ } else {
+ checkScanValues(t, ay[1], "b", 2, "a", 1)
+ }
+ // test scan expired hash key
+ c.Do("hexpire", key, 1)
+ time.Sleep(time.Second * 2)
+ ay, err := goredis.Values(c.Do("HSCAN", key, ""))
+ assert.Nil(t, err)
+ assert.Equal(t, 2, len(ay))
+ ayv, err := goredis.Strings(ay[1], nil)
+ assert.Equal(t, 0, len(ayv))
+
+ ay, err = goredis.Values(c.Do("HREVSCAN", key, "c"))
+ assert.Nil(t, err)
+ assert.Equal(t, 2, len(ay))
+ ayv, err = goredis.Strings(ay[1], nil)
+ assert.Equal(t, 0, len(ayv))
+ // test scan expired and new created hash key
+ c.Do("HMSET", key, "a", 2, "b", 3, "c", 4)
+ ay, err = goredis.Values(c.Do("HSCAN", key, ""))
+ assert.Nil(t, err)
+ assert.Equal(t, 2, len(ay))
+ checkScanValues(t, ay[1], "a", 2, "b", 3, "c", 4)
+
+ ay, err = goredis.Values(c.Do("HREVSCAN", key, "d"))
+ assert.Nil(t, err)
+ assert.Equal(t, 2, len(ay))
+ checkScanValues(t, ay[1], "c", 4, "b", 3, "a", 2)
+}
+
+func TestSetScan(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ if testing.Verbose() {
+ changeLogLevel(t, 4, gredisport+1)
+ }
+ key := "default:test:scan_set"
+ c.Do("SADD", key, "a", "b")
+
+ if ay, err := goredis.Values(c.Do("SSCAN", key, "")); err != nil {
+ t.Fatal(err)
+ } else if len(ay) != 2 {
+ t.Fatal(len(ay))
+ } else {
+ checkScanValues(t, ay[1], "a", "b")
+ }
+
+ if ay, err := goredis.Values(c.Do("SREVSCAN", key, "c")); err != nil {
+ t.Fatal(err)
+ } else if len(ay) != 2 {
+ t.Fatal(len(ay))
+ } else {
+ checkScanValues(t, ay[1], "b", "a")
+ }
+ // test scan expired key
+ c.Do("sexpire", key, 1)
+ time.Sleep(time.Second * 2)
+ ay, err := goredis.Values(c.Do("SSCAN", key, ""))
+ assert.Nil(t, err)
+ assert.Equal(t, 2, len(ay))
+ ayv, err := goredis.Strings(ay[1], nil)
+ assert.Equal(t, 0, len(ayv))
+
+ ay, err = goredis.Values(c.Do("SREVSCAN", key, "c"))
+ assert.Nil(t, err)
+ assert.Equal(t, 2, len(ay))
+ ayv, err = goredis.Strings(ay[1], nil)
+ assert.Equal(t, 0, len(ayv))
+ // test scan expired and new created key
+ c.Do("SADD", key, "c", "d")
+ ay, err = goredis.Values(c.Do("SSCAN", key, ""))
+ assert.Nil(t, err)
+ assert.Equal(t, 2, len(ay))
+ checkScanValues(t, ay[1], "c", "d")
+
+ ay, err = goredis.Values(c.Do("SREVSCAN", key, "e"))
+ assert.Nil(t, err)
+ assert.Equal(t, 2, len(ay))
+ checkScanValues(t, ay[1], "d", "c")
+ changeLogLevel(t, 2, gredisport+1)
+}
+
+func TestZSetScan(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:scan_zset"
+ c.Do("ZADD", key, 1, "a", 2, "b")
+
+ if ay, err := goredis.Values(c.Do("ZSCAN", key, "")); err != nil {
+ t.Fatal(err)
+ } else if len(ay) != 2 {
+ t.Fatal(len(ay))
+ } else {
+ checkScanValues(t, ay[1], "a", 1, "b", 2)
+ }
+
+ if ay, err := goredis.Values(c.Do("ZREVSCAN", key, "c")); err != nil {
+ t.Fatal(err)
+ } else if len(ay) != 2 {
+ t.Fatal(len(ay))
+ } else {
+ checkScanValues(t, ay[1], "b", 2, "a", 1)
+ }
+}
diff --git a/server/redis_api_setlistzset_test.go b/server/redis_api_setlistzset_test.go
new file mode 100644
index 00000000..833f9e78
--- /dev/null
+++ b/server/redis_api_setlistzset_test.go
@@ -0,0 +1,1853 @@
+package server
+
+import (
+ "fmt"
+ "math/rand"
+ "reflect"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/siddontang/goredis"
+ "github.com/stretchr/testify/assert"
+)
+
+func testListIndex(t *testing.T, key string, index int64, v int) error {
+ c := getTestConn(t)
+ defer c.Close()
+
+ n, err := goredis.Int(c.Do("lindex", key, index))
+ if err == goredis.ErrNil && v != 0 {
+ return fmt.Errorf("must nil")
+ } else if err != nil && err != goredis.ErrNil {
+ return err
+ } else if n != v {
+ return fmt.Errorf("index err number %d != %d", n, v)
+ }
+
+ return nil
+}
+
+func testListRange(t *testing.T, key string, start int64, stop int64, checkValues ...int) error {
+ c := getTestConn(t)
+ defer c.Close()
+
+ vs, err := goredis.MultiBulk(c.Do("lrange", key, start, stop))
+ if err != nil {
+ return err
+ }
+
+ if len(vs) != len(checkValues) {
+ return fmt.Errorf("invalid return number %d != %d", len(vs), len(checkValues))
+ }
+
+ var n int
+ for i, v := range vs {
+ if d, ok := v.([]byte); ok {
+ n, err = strconv.Atoi(string(d))
+ if err != nil {
+ return err
+ } else if n != checkValues[i] {
+ return fmt.Errorf("invalid data %d: %d != %d", i, n, checkValues[i])
+ }
+ } else {
+ return fmt.Errorf("invalid data %v %T", v, v)
+ }
+ }
+
+ return nil
+}
+
+func TestList(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:lista"
+ //if n, err := goredis.Int(c.Do("lkeyexists", key)); err != nil {
+ // t.Fatal(err)
+ //} else if n != 0 {
+ // t.Fatal(n)
+ //}
+
+ if n, err := goredis.Int(c.Do("lpush", key, 1)); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+
+ //if n, err := goredis.Int(c.Do("lkeyexists", key)); err != nil {
+ // t.Fatal(err)
+ //} else if n != 1 {
+ // t.Fatal(1)
+ //}
+
+ if n, err := goredis.Int(c.Do("rpush", key, 2)); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("rpush", key, 3)); err != nil {
+ t.Fatal(err)
+ } else if n != 3 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("llen", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 3 {
+ t.Fatal(n)
+ }
+
+ //for ledis-cli a 1 2 3
+ // 127.0.0.1:6379> lrange a 0 0
+ // 1) "1"
+ if err := testListRange(t, key, 0, 0, 1); err != nil {
+ t.Fatal(err)
+ }
+
+ // 127.0.0.1:6379> lrange a 0 1
+ // 1) "1"
+ // 2) "2"
+
+ if err := testListRange(t, key, 0, 1, 1, 2); err != nil {
+ t.Fatal(err)
+ }
+
+ // 127.0.0.1:6379> lrange a 0 5
+ // 1) "1"
+ // 2) "2"
+ // 3) "3"
+ if err := testListRange(t, key, 0, 5, 1, 2, 3); err != nil {
+ t.Fatal(err)
+ }
+
+ // 127.0.0.1:6379> lrange a -1 5
+ // 1) "3"
+ if err := testListRange(t, key, -1, 5, 3); err != nil {
+ t.Fatal(err)
+ }
+
+ // 127.0.0.1:6379> lrange a -5 -1
+ // 1) "1"
+ // 2) "2"
+ // 3) "3"
+ if err := testListRange(t, key, -5, -1, 1, 2, 3); err != nil {
+ t.Fatal(err)
+ }
+
+ // 127.0.0.1:6379> lrange a -2 -1
+ // 1) "2"
+ // 2) "3"
+ if err := testListRange(t, key, -2, -1, 2, 3); err != nil {
+ t.Fatal(err)
+ }
+
+ // 127.0.0.1:6379> lrange a -1 -2
+ // (empty list or set)
+ if err := testListRange(t, key, -1, -2); err != nil {
+ t.Fatal(err)
+ }
+
+ // 127.0.0.1:6379> lrange a -1 2
+ // 1) "3"
+ if err := testListRange(t, key, -1, 2, 3); err != nil {
+ t.Fatal(err)
+ }
+
+ // 127.0.0.1:6379> lrange a -5 5
+ // 1) "1"
+ // 2) "2"
+ // 3) "3"
+ if err := testListRange(t, key, -5, 5, 1, 2, 3); err != nil {
+ t.Fatal(err)
+ }
+
+ // 127.0.0.1:6379> lrange a -1 0
+ // (empty list or set)
+ if err := testListRange(t, key, -1, 0); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := testListRange(t, "default:test:empty list", 0, 100); err != nil {
+ t.Fatal(err)
+ }
+
+ // 127.0.0.1:6379> lrange a -1 -1
+ // 1) "3"
+ if err := testListRange(t, key, -1, -1, 3); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := testListIndex(t, key, -1, 3); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := testListIndex(t, key, 0, 1); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := testListIndex(t, key, 1, 2); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := testListIndex(t, key, 2, 3); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := testListIndex(t, key, 5, 0); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := testListIndex(t, key, -1, 3); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := testListIndex(t, key, -2, 2); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := testListIndex(t, key, -3, 1); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestListMPush(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:listmpushb"
+ if n, err := goredis.Int(c.Do("rpush", key, 1, 2, 3)); err != nil {
+ t.Fatal(err)
+ } else if n != 3 {
+ t.Fatal(n)
+ }
+
+ if err := testListRange(t, key, 0, 3, 1, 2, 3); err != nil {
+ t.Fatal(err)
+ }
+
+ if n, err := goredis.Int(c.Do("lpush", key, 1, 2, 3)); err != nil {
+ t.Fatal(err)
+ } else if n != 6 {
+ t.Fatal(n)
+ }
+
+ if err := testListRange(t, key, 0, 6, 3, 2, 1, 1, 2, 3); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestListPop(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:c"
+ if n, err := goredis.Int(c.Do("rpush", key, 1, 2, 3, 4, 5, 6)); err != nil {
+ t.Fatal(err)
+ } else if n != 6 {
+ t.Fatal(n)
+ }
+
+ if v, err := goredis.Int(c.Do("lpop", key)); err != nil {
+ t.Fatal(err)
+ } else if v != 1 {
+ t.Fatal(v)
+ }
+
+ if v, err := goredis.Int(c.Do("rpop", key)); err != nil {
+ t.Fatal(err)
+ } else if v != 6 {
+ t.Fatal(v)
+ }
+
+ if n, err := goredis.Int(c.Do("lpush", key, 1)); err != nil {
+ t.Fatal(err)
+ } else if n != 5 {
+ t.Fatal(n)
+ }
+
+ if err := testListRange(t, key, 0, 5, 1, 2, 3, 4, 5); err != nil {
+ t.Fatal(err)
+ }
+
+ for i := 1; i <= 5; i++ {
+ if v, err := goredis.Int(c.Do("lpop", key)); err != nil {
+ t.Fatal(err)
+ } else if v != i {
+ t.Fatal(v)
+ }
+ }
+
+ if n, err := goredis.Int(c.Do("llen", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+ c.Do("rpush", key, 1, 2, 3, 4, 5)
+
+ if n, err := goredis.Int(c.Do("lclear", key)); err != nil {
+ t.Fatal(err)
+ } else if n <= 0 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("llen", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+ // test lpop, rpop empty list
+ v, err := goredis.Bytes(c.Do("lpop", key))
+ assert.Equal(t, goredis.ErrNil, err)
+ assert.Nil(t, v)
+ v, err = goredis.Bytes(c.Do("rpop", key))
+ assert.Equal(t, goredis.ErrNil, err)
+ assert.Nil(t, v)
+ sv, err := goredis.String(c.Do("ltrim", key, 0, 1))
+ assert.Nil(t, err)
+ assert.Equal(t, "OK", sv)
+}
+
+func disableTestTrim(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:d"
+ if n, err := goredis.Int(c.Do("rpush", key, 1, 2, 3, 4, 5, 6)); err != nil {
+ t.Fatal(err)
+ } else if n != 6 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("ltrim_front", key, 2)); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("llen", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 4 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("ltrim_back", key, 2)); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("llen", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("ltrim_front", key, 5)); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("llen", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("rpush", key, 1, 2)); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("ltrim_front", key, 2)); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("llen", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+}
+func TestListLPushRPop(t *testing.T) {
+ c := getTestConn(t)
+ c2 := getTestConn(t)
+ defer c.Close()
+ defer c2.Close()
+
+ k1 := []byte("default:test_lpushrpop:1")
+ klist := make([][]byte, 0, 10)
+ klist = append(klist, k1)
+ for i := 2; i < 9; i++ {
+ klist = append(klist, []byte("default:test_lpushrpop:"+strconv.Itoa(i)))
+ }
+
+ n, err := goredis.Int(c.Do("llen", k1))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+ c.Do("lpush", k1, []byte("a"))
+ n, err = goredis.Int(c.Do("llen", k1))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, n)
+ c.Do("rpop", k1)
+ n, err = goredis.Int(c.Do("llen", k1))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+ c.Do("rpop", k1)
+ n, err = goredis.Int(c.Do("llen", k1))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+ c.Do("lpush", k1, []byte("a"))
+ c.Do("lpush", k1, []byte("a"))
+ c.Do("lpush", k1, []byte("a"))
+ c.Do("lpush", k1, []byte("a"))
+ c.Do("rpop", k1)
+ c.Do("rpop", k1)
+ c.Do("lpush", k1, []byte("a"))
+ c.Do("lpush", k1, []byte("a"))
+ c.Do("rpop", k1)
+ c.Do("rpop", k1)
+ c.Do("lpush", k1, []byte("a"))
+ c.Do("rpop", k1)
+ c.Do("rpop", k1)
+ n, err = goredis.Int(c.Do("llen", k1))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, n)
+ v, err := goredis.Bytes(c.Do("rpop", k1))
+ assert.Nil(t, err)
+ assert.Equal(t, []byte("a"), v)
+ n, err = goredis.Int(c.Do("llen", k1))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+
+ pushed := make([]int32, len(klist))
+ poped := make([]int32, len(klist))
+ connPushList := make([]*goredis.PoolConn, len(klist))
+ connPopList := make([]*goredis.PoolConn, len(klist))
+ defer func() {
+ for _, c := range connPushList {
+ c.Close()
+ }
+ for _, c := range connPopList {
+ c.Close()
+ }
+ }()
+
+ start := time.Now()
+ var wg sync.WaitGroup
+ for i := range klist {
+ connPushList[i] = getTestConn(t)
+ connPopList[i] = getTestConn(t)
+ wg.Add(2)
+ go func(index int) {
+ defer wg.Done()
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
+ for {
+ _, err := connPushList[index].Do("lpush", klist[index], []byte("a"))
+ assert.Nil(t, err)
+ atomic.AddInt32(&pushed[index], 1)
+ time.Sleep(time.Microsecond * time.Duration(r.Int31n(1000)))
+ if time.Since(start) > time.Second*10 {
+ break
+ }
+ }
+ }(i)
+ go func(index int) {
+ defer wg.Done()
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
+ for {
+ v, err := goredis.Bytes(connPopList[index].Do("rpop", klist[index]))
+ assert.True(t, err == nil || err == goredis.ErrNil, "should no value or no error")
+ if len(v) > 0 {
+ assert.Nil(t, err)
+ assert.Equal(t, []byte("a"), v)
+ atomic.AddInt32(&poped[index], 1)
+ }
+ time.Sleep(time.Microsecond * time.Duration(r.Int31n(1000)))
+ if time.Since(start) > time.Second*10 {
+ break
+ }
+ }
+ }(i)
+
+ }
+ wg.Wait()
+
+ for i, tk := range klist {
+ n, err = goredis.Int(c.Do("llen", tk))
+ assert.Nil(t, err)
+ t.Logf("pushed %v poped %v", atomic.LoadInt32(&pushed[i]), atomic.LoadInt32(&poped[i]))
+ assert.True(t, pushed[i] >= poped[i])
+ assert.Equal(t, int(pushed[i]-poped[i]), n)
+ }
+}
+
+func TestListErrorParams(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:list_err_param"
+ if _, err := c.Do("lpush", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("rpush", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("lpop", key, "a"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("rpop", key, "a"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("llen", key, "a"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("lindex", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("lrange", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("lclear"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("lmclear"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("ltrim_front", key, "-1"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("ltrim_back", key, "a"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+}
+
+func TestSet(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key1 := "default:test:testdb_cmd_set_1"
+ key2 := "default:test:testdb_cmd_set_2"
+
+ if n, err := goredis.Int(c.Do("scard", key1)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("sadd", key1, 0, 1)); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+ // add again
+ if n, err := goredis.Int(c.Do("sadd", key1, 0, 1)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+ if n, err := goredis.Int(c.Do("sadd", key1, 1)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+ if n, err := goredis.Int(c.Do("scard", key1)); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("sadd", key2, 0, 1, 2)); err != nil {
+ t.Fatal(err)
+ } else if n != 3 {
+ t.Fatal(n)
+ }
+ // part of dup
+ if n, err := goredis.Int(c.Do("sadd", key2, 1, 2, 3)); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("srem", key1, 0, 1)); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("sismember", key2, 0)); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Values(c.Do("smembers", key2)); err != nil {
+ t.Fatal(err)
+ } else if len(n) != 4 {
+ t.Fatal(n)
+ }
+ n, err := goredis.Int(c.Do("scard", key2))
+ assert.Nil(t, err)
+ assert.Equal(t, int(4), n)
+
+ if n, err := goredis.Values(c.Do("srandmember", key2)); err != nil {
+ t.Fatal(err)
+ } else if len(n) != 1 {
+ t.Fatal(n)
+ }
+ if n, err := goredis.Values(c.Do("srandmember", key2, 2)); err != nil {
+ t.Fatal(err)
+ } else if len(n) != 2 {
+ t.Fatal(n)
+ }
+ if n, err := goredis.Values(c.Do("srandmember", key2, 10)); err != nil {
+ t.Fatal(err)
+ } else if len(n) != 4 {
+ t.Fatal(n)
+ }
+
+ if val, err := goredis.String(c.Do("spop", key2)); err != nil {
+ t.Fatal(err)
+ } else if val != "0" {
+ t.Fatal(val)
+ }
+ if val, err := goredis.String(c.Do("spop", key2)); err != nil {
+ t.Fatal(err)
+ } else if val != "1" {
+ t.Fatal(val)
+ }
+ if val, err := goredis.Values(c.Do("spop", key2, 4)); err != nil {
+ t.Fatal(err)
+ } else if len(val) != 2 {
+ t.Fatal(val)
+ }
+ if n, err := goredis.Values(c.Do("smembers", key2)); err != nil {
+ t.Fatal(err)
+ } else if len(n) != 0 {
+ t.Fatal(n)
+ }
+
+ n, err = goredis.Int(c.Do("scard", key2))
+ assert.Nil(t, err)
+ assert.Equal(t, int(0), n)
+ if n, err := goredis.Values(c.Do("srandmember", key2, 10)); err != nil {
+ t.Fatal(err)
+ } else if len(n) != 0 {
+ t.Fatal(n)
+ }
+ // empty spop single will return nil, but spop multi will return empty array
+ if val, err := c.Do("spop", key2); err != nil {
+ t.Fatal(err)
+ } else if val != nil {
+ t.Fatal(val)
+ }
+ if val, err := goredis.Values(c.Do("spop", key2, 2)); err != nil {
+ t.Fatal(err)
+ } else if val == nil {
+ t.Fatal(val)
+ } else if len(val) != 0 {
+ t.Fatal(val)
+ }
+ if n, err := goredis.Int(c.Do("sadd", key2, "member0", "member1", "member2")); err != nil {
+ t.Fatal(err)
+ } else if n != 3 {
+ t.Fatal(n)
+ }
+ if val, err := goredis.String(c.Do("spop", key2)); err != nil {
+ t.Fatal(err)
+ } else if val != "member0" {
+ t.Fatal(val)
+ }
+ if val, err := goredis.Values(c.Do("spop", key2, 2)); err != nil {
+ t.Fatal(err)
+ } else if len(val) != 2 {
+ t.Fatal(val)
+ } else if string(val[0].([]byte)) != "member1" || string(val[1].([]byte)) != "member2" {
+ t.Fatal(val)
+ }
+
+ if n, err := goredis.Int(c.Do("sadd", key2, 0, 1, 2, 3)); err != nil {
+ t.Fatal(err)
+ } else if n != 4 {
+ t.Fatal(n)
+ }
+ if n, err := goredis.Int(c.Do("sclear", key2)); err != nil {
+ t.Fatal(err)
+ } else if n <= 0 {
+ t.Fatal(n)
+ }
+
+ n, err = goredis.Int(c.Do("scard", key2))
+ assert.Nil(t, err)
+ assert.Equal(t, int(0), n)
+}
+
+func TestSetSPopCompatile(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key1 := "default:test:testdb_cmd_set_spop"
+
+ binaryStr := make([]byte, 10)
+ binaryStr = []byte("binary" + string(binaryStr) + "bb")
+ n, err := goredis.Int(c.Do("sadd", key1, binaryStr))
+ assert.Nil(t, err)
+ assert.Equal(t, int(1), n)
+ binaryStr2 := []byte(string(binaryStr) + "2")
+ n, err = goredis.Int(c.Do("sadd", key1, binaryStr2))
+ assert.Nil(t, err)
+ assert.Equal(t, int(1), n)
+ n, err = goredis.Int(c.Do("scard", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, int(2), n)
+
+ val, err := c.Do("spop", key1)
+ assert.Nil(t, err)
+ assert.Equal(t, binaryStr, val)
+ _, ok := val.([]byte)
+ assert.Equal(t, true, ok)
+ vals, err := goredis.MultiBulk(c.Do("spop", key1, 1))
+ assert.Nil(t, err)
+ assert.Equal(t, binaryStr2, vals[0])
+ _, ok = vals[0].([]byte)
+ assert.Equal(t, true, ok)
+
+ // spop empty
+ val, err = c.Do("spop", key1)
+ assert.Nil(t, err)
+ assert.Equal(t, nil, val)
+ vals, err = goredis.MultiBulk(c.Do("spop", key1, 1))
+ assert.Nil(t, err)
+ assert.NotNil(t, vals)
+ assert.Equal(t, 0, len(vals))
+}
+
+func TestSetSPopSAddConcurrent(t *testing.T) {
+ var wg sync.WaitGroup
+ for index := 0; index < 4; index++ {
+ wg.Add(1)
+ client := getTestConn(t)
+ defer client.Close()
+ go func(c *goredis.PoolConn) {
+ defer wg.Done()
+ for loop := 0; loop < 400; loop++ {
+ key1 := "default:test:testdb_spop_sadd_concurrency" + strconv.Itoa(loop)
+ binaryStr := make([]byte, 10)
+ binaryStr = []byte("binary" + string(binaryStr) + "bb")
+ _, err := goredis.Int(c.Do("sadd", key1, binaryStr))
+ assert.Nil(t, err)
+ binaryStr2 := []byte(string(binaryStr) + strconv.Itoa(loop%2))
+ _, err = goredis.Int(c.Do("sadd", key1, binaryStr2))
+ assert.Nil(t, err)
+ _, err = c.Do("smembers", key1)
+ assert.Nil(t, err)
+
+ _, err = c.Do("spop", key1)
+ assert.Nil(t, err)
+ _, err = goredis.MultiBulk(c.Do("spop", key1, 2))
+ assert.Nil(t, err)
+ }
+ }(client)
+ }
+ wg.Wait()
+}
+
+func TestSetExpire(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key1 := "default:test:set_expa"
+ f1 := "f1"
+ f2 := "f2"
+ f3 := "f3"
+ f4 := "f4"
+ f5 := "f5"
+ ttl := 2
+
+ n, err := goredis.Int(c.Do("sadd", key1, f1))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, n)
+
+ c.Do("sexpire", key1, ttl)
+ n, err = goredis.Int(c.Do("sismember", key1, f1))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, n)
+ n, err = goredis.Int(c.Do("skeyexist", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, n)
+
+ realTtl, err := goredis.Int(c.Do("sttl", key1))
+ assert.Nil(t, err)
+ assertTTLNear(t, ttl, realTtl)
+
+ // check sadd,srem, spop keep ttl
+ _, err = goredis.Int(c.Do("sadd", key1, f2, f3))
+ assert.Nil(t, err)
+ realTtl, err = goredis.Int(c.Do("sttl", key1))
+ assert.Nil(t, err)
+ assertTTLNear(t, ttl, realTtl)
+
+ vlist, err := goredis.MultiBulk(c.Do("smembers", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, 3, len(vlist))
+
+ n, err = goredis.Int(c.Do("scard", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, int(3), n)
+
+ n, err = goredis.Int(c.Do("srem", key1, f1))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, n)
+ realTtl, err = goredis.Int(c.Do("sttl", key1))
+ assert.Nil(t, err)
+ assertTTLNear(t, ttl, realTtl)
+
+ v, err := goredis.String(c.Do("spop", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, f2, v)
+ realTtl, err = goredis.Int(c.Do("sttl", key1))
+ assert.Nil(t, err)
+ assertTTLNear(t, ttl, realTtl)
+ // wait expire
+ time.Sleep(time.Second * time.Duration(ttl+2))
+
+ if v, err := goredis.String(c.Do("spop", key1)); err != goredis.ErrNil {
+ t.Fatalf("expired key should be expired: %v, %v", v, err)
+ }
+ n, err = goredis.Int(c.Do("skeyexist", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+ n, err = goredis.Int(c.Do("sismember", key1, f3))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+ n, err = goredis.Int(c.Do("scard", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+ vlist, err = goredis.MultiBulk(c.Do("smembers", key1))
+ assert.Nil(t, err)
+ t.Logf("smembers: %v", vlist)
+ assert.Equal(t, 0, len(vlist))
+
+ n, err = goredis.Int(c.Do("scard", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, int(0), n)
+
+ realTtl, err = goredis.Int(c.Do("sttl", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, -1, realTtl)
+
+ //persist expired data should not success
+ _, err = c.Do("spersist", key1)
+ assert.Nil(t, err)
+ realTtl, err = goredis.Int(c.Do("sttl", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, -1, realTtl)
+ n, err = goredis.Int(c.Do("sismember", key1, f3))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+ n, err = goredis.Int(c.Do("scard", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+
+ // renew hash
+ n, err = goredis.Int(c.Do("sadd", key1, f4, f5))
+ assert.Nil(t, err)
+ assert.Equal(t, 2, n)
+ c.Do("sexpire", key1, ttl)
+
+ realTtl, err = goredis.Int(c.Do("sttl", key1))
+ assert.Nil(t, err)
+ assertTTLNear(t, ttl, realTtl)
+ n, err = goredis.Int(c.Do("sismember", key1, f1))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+ n, err = goredis.Int(c.Do("sismember", key1, f4))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, n)
+ vlist, err = goredis.MultiBulk(c.Do("smembers", key1))
+ assert.Nil(t, err)
+ t.Logf("smembers: %v", vlist)
+ assert.Equal(t, 2, len(vlist))
+
+ n, err = goredis.Int(c.Do("scard", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, 2, n)
+ n, err = goredis.Int(c.Do("skeyexist", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, n)
+
+ // persist
+ _, err = c.Do("spersist", key1)
+ assert.Nil(t, err)
+ realTtl, err = goredis.Int(c.Do("sttl", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, -1, realTtl)
+ time.Sleep(time.Second * time.Duration(ttl+1))
+ // should not expired
+ n, err = goredis.Int(c.Do("sismember", key1, f5))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, n)
+ n, err = goredis.Int(c.Do("scard", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, 2, n)
+ n, err = goredis.Int(c.Do("skeyexist", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, 1, n)
+
+ // change ttl
+ _, err = c.Do("sexpire", key1, ttl+4)
+ assert.Nil(t, err)
+ realTtl, err = goredis.Int(c.Do("sttl", key1))
+ assert.Nil(t, err)
+ assertTTLNear(t, ttl+4, realTtl)
+
+ time.Sleep(time.Second * time.Duration(ttl+6))
+ // check expired kv should not get from any read command
+ if v, err := goredis.String(c.Do("spop", key1)); err != goredis.ErrNil {
+ t.Fatalf("expired key should be expired: %v, %v", v, err)
+ }
+ n, err = goredis.Int(c.Do("skeyexist", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+ n, err = goredis.Int(c.Do("sismember", key1, f5))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+ n, err = goredis.Int(c.Do("scard", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, 0, n)
+ vlist, err = goredis.MultiBulk(c.Do("smembers", key1))
+ assert.Nil(t, err)
+ t.Logf("smembers: %v", vlist)
+ assert.Equal(t, 0, len(vlist))
+
+ n, err = goredis.Int(c.Do("scard", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, int(0), n)
+
+ realTtl, err = goredis.Int(c.Do("sttl", key1))
+ assert.Nil(t, err)
+ assert.Equal(t, -1, realTtl)
+}
+
+func TestSetErrorParams(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:set_error_param"
+ invalidKey := string(append([]byte("default:test:set_error_param"), make([]byte, 10240)...))
+ invalidMember := string(append([]byte("long_param"), make([]byte, 10240)...))
+ if _, err := c.Do("sadd", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+ _, err := c.Do("sadd", invalidKey, key)
+ assert.NotNil(t, err)
+ _, err = c.Do("sadd", key, invalidMember)
+ assert.NotNil(t, err)
+ _, err = c.Do("sadd", key, key, invalidMember)
+ assert.NotNil(t, err)
+ _, err = c.Do("sadd", invalidKey, invalidMember)
+ assert.NotNil(t, err)
+
+ if _, err := c.Do("scard"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("scard", key, key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("sismember", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("sismember", key, "m1", "m2"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("smembers"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+ if _, err := c.Do("srandmember"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+ if _, err := c.Do("srandmember", key, 0); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+ if _, err := c.Do("srandmember", key, -1); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("smembers", key, key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("spop"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+ if _, err := c.Do("spop", key, "0"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("srem"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("srem", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("sclear"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("sclear", key, key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("smclear"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+}
+
+func TestZSet(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:myzset"
+
+ //if n, err := goredis.Int(c.Do("zkeyexists", key)); err != nil {
+ // t.Fatal(err)
+ //} else if n != 0 {
+ // t.Fatal(n)
+ //}
+
+ if n, err := goredis.Int(c.Do("zadd", key, 3, "a", 4, "b")); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ //if n, err := goredis.Int(c.Do("zkeyexists", key)); err != nil {
+ // t.Fatal(err)
+ //} else if n != 1 {
+ // t.Fatal(n)
+ //}
+
+ if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
+ t.Fatal(n)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zadd", key, 1, "a", 2, "b")); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
+ t.Fatal(n)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zadd", key, 3, "c", 4, "d")); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 4 {
+ t.Fatal(n)
+ }
+
+ if s, err := goredis.Int(c.Do("zscore", key, "c")); err != nil {
+ t.Fatal(err)
+ } else if s != 3 {
+ t.Fatal(s)
+ }
+
+ if n, err := goredis.Int(c.Do("zrem", key, "d", "e")); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 3 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zincrby", key, 4, "c")); err != nil {
+ t.Fatal(err)
+ } else if n != 7 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zincrby", key, -4, "c")); err != nil {
+ t.Fatal(err)
+ } else if n != 3 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zincrby", key, 4, "d")); err != nil {
+ t.Fatal(err)
+ } else if n != 4 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 4 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zrem", key, "a", "b", "c", "d")); err != nil {
+ t.Fatal(err)
+ } else if n != 4 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+}
+
+func TestZSetEmptyMember(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:myzset_empty"
+
+ if n, err := goredis.Int(c.Do("zadd", key, 4, "0", 5, nil)); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if s, err := goredis.Int(c.Do("zscore", key, "0")); err != nil {
+ t.Fatal(err)
+ } else if s != 4 {
+ t.Fatal(s)
+ }
+ if s, err := goredis.Int(c.Do("zscore", key, nil)); err != nil {
+ t.Fatal(err)
+ } else if s != 5 {
+ t.Fatal(s)
+ }
+ if n, err := goredis.Int(c.Do("zadd", key, 3, "")); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+ if s, err := goredis.Int(c.Do("zscore", key, "")); err != nil {
+ t.Fatal(err)
+ } else if s != 3 {
+ t.Fatal(s)
+ }
+}
+
+func TestZSetInfScore(t *testing.T) {
+ //TODO: test +inf , -inf score
+ // TODO: test negative score
+}
+
+func TestZSetFloat64Score(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:myzset_float"
+
+ if n, err := goredis.Int(c.Do("zadd", key, 3, "a", 3, "b")); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
+ t.Fatal(n)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zadd", key, 3.1, "a", 3.2, "b")); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
+ t.Fatal(n)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zadd", key, 3.3, "c", 3.4, "d")); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 4 {
+ t.Fatal(n)
+ }
+
+ if s, err := goredis.Float64(c.Do("zscore", key, "c")); err != nil {
+ t.Fatal(err)
+ } else if s != 3.3 {
+ t.Fatal(s)
+ }
+
+ if n, err := goredis.Int(c.Do("zrem", key, "d", "e")); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 3 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Float64(c.Do("zincrby", key, 4, "c")); err != nil {
+ t.Fatal(err)
+ } else if n != 7.3 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Float64(c.Do("zincrby", key, -4, "c")); err != nil {
+ t.Fatal(err)
+ } else if n != 3.3 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Float64(c.Do("zincrby", key, 3.4, "d")); err != nil {
+ t.Fatal(err)
+ } else if n != 3.4 {
+ t.Fatal(n)
+ }
+ if n, err := goredis.Int(c.Do("zrank", key, "a")); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+ if n, err := goredis.Int(c.Do("zrevrank", key, "b")); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+ if n, err := goredis.Int(c.Do("zrank", key, "c")); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+ if n, err := goredis.Int(c.Do("zrank", key, "d")); err != nil {
+ t.Fatal(err)
+ } else if n != 3 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zrevrank", key, "a")); err != nil {
+ t.Fatal(err)
+ } else if n != 3 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 4 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zrem", key, "a", "b", "c", "d")); err != nil {
+ t.Fatal(err)
+ } else if n != 4 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+}
+
+func TestZSetCount(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:myzset"
+ if _, err := goredis.Int(c.Do("zadd", key, 1, "a", 2, "b", 3, "c", 4, "d")); err != nil {
+ t.Fatal(err)
+ }
+
+ if n, err := goredis.Int(c.Do("zcount", key, 2, 4)); err != nil {
+ t.Fatal(err)
+ } else if n != 3 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zcount", key, 4, 4)); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zcount", key, 4, 3)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zcount", key, "(2", 4)); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zcount", key, "2", "(4")); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zcount", key, "(2", "(4")); err != nil {
+ t.Fatal(err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zcount", key, "-inf", "+inf")); err != nil {
+ t.Fatal(err)
+ } else if n != 4 {
+ t.Fatal(n)
+ }
+
+ c.Do("zadd", key, 3, "e")
+
+ if n, err := goredis.Int(c.Do("zcount", key, "(2", "(4")); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ c.Do("zrem", key, "a", "b", "c", "d", "e")
+}
+
+func TestZSetRank(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:myzset"
+ if _, err := goredis.Int(c.Do("zadd", key, 1, "a", 2, "b", 3, "c", 4, "d")); err != nil {
+ t.Fatal(err)
+ }
+
+ if n, err := goredis.Int(c.Do("zrank", key, "c")); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if _, err := goredis.Int(c.Do("zrank", key, "e")); err != goredis.ErrNil {
+ t.Fatal(err)
+ }
+
+ if n, err := goredis.Int(c.Do("zrevrank", key, "c")); err != nil {
+ t.Fatalf("cmd error: %v", err)
+ } else if n != 1 {
+ t.Fatal(n)
+ }
+
+ if _, err := goredis.Int(c.Do("zrevrank", key, "e")); err != goredis.ErrNil {
+ t.Fatal(err)
+ }
+
+ key2 := "default:test:myzset2"
+ if _, err := goredis.Int(c.Do("zadd", key2, 0, "val0", 1, "val1", 2, "val2", 3, "val3")); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := goredis.Int(c.Do("zadd", key2, 4, "val4", 5, "val5", 6, "val6")); err != nil {
+ t.Fatal(err)
+ }
+ // this is used to test the case for iterator seek to max may cause seek to the next last data
+ keyExpire := "default:test:myexpkey"
+ keyExpire2 := "default:test:myexpkey2"
+ c.Do("setex", keyExpire, 10, "v1")
+ c.Do("setex", keyExpire2, 10, "v1")
+
+ if n, err := goredis.Int(c.Do("zrank", key2, "val3")); err != nil {
+ t.Fatal(err)
+ } else if n != 3 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zrevrank", key2, "val3")); err != nil {
+ t.Fatalf("cmd error: %v", err)
+ } else if n != 3 {
+ t.Fatal(n)
+ }
+}
+
+func testZSetRange(ay []interface{}, checkValues ...interface{}) error {
+ if len(ay) != len(checkValues) {
+ return fmt.Errorf("invalid return number %d != %d", len(ay), len(checkValues))
+ }
+
+ for i := 0; i < len(ay); i++ {
+ v, ok := ay[i].([]byte)
+ if !ok {
+ return fmt.Errorf("invalid data %d %v %T", i, ay[i], ay[i])
+ }
+
+ switch cv := checkValues[i].(type) {
+ case string:
+ if string(v) != cv {
+ return fmt.Errorf("not equal %s != %s", v, checkValues[i])
+ }
+ default:
+ if s, _ := strconv.Atoi(string(v)); s != checkValues[i] {
+ return fmt.Errorf("not equal %s != %v", v, checkValues[i])
+ }
+ }
+
+ }
+
+ return nil
+}
+
+func TestZSetRangeScore(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:myzset_range"
+ if _, err := goredis.Int(c.Do("zadd", key, 1, "a", 2, "b", 3, "c", 4, "d")); err != nil {
+ t.Fatal(err)
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("zrangebyscore", key, 1, 4, "withscores")); err != nil {
+ t.Fatal(err)
+ } else {
+ if err := testZSetRange(v, "a", 1, "b", 2, "c", 3, "d", 4); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("zrangebyscore", key, 1, 4, "withscores", "limit", 1, 2)); err != nil {
+ t.Fatal(err)
+ } else {
+ if err := testZSetRange(v, "b", 2, "c", 3); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("zrangebyscore", key, "-inf", "+inf", "withscores")); err != nil {
+ t.Fatal(err)
+ } else {
+ if err := testZSetRange(v, "a", 1, "b", 2, "c", 3, "d", 4); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("zrangebyscore", key, "(1", "(4")); err != nil {
+ t.Fatal(err)
+ } else {
+ if err := testZSetRange(v, "b", "c"); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("zrevrangebyscore", key, 4, 1, "withscores")); err != nil {
+ t.Fatal(err)
+ } else {
+ if err := testZSetRange(v, "d", 4, "c", 3, "b", 2, "a", 1); err != nil {
+ t.Fatalf("%v, %v", err, v)
+ }
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("zrevrangebyscore", key, 4, 1, "withscores", "limit", 1, 2)); err != nil {
+ t.Fatal(err)
+ } else {
+ if err := testZSetRange(v, "c", 3, "b", 2); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("zrevrangebyscore", key, "+inf", "-inf", "withscores")); err != nil {
+ t.Fatal(err)
+ } else {
+ if err := testZSetRange(v, "d", 4, "c", 3, "b", 2, "a", 1); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("zrevrangebyscore", key, "(4", "(1")); err != nil {
+ t.Fatal(err)
+ } else {
+ if err := testZSetRange(v, "c", "b"); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if n, err := goredis.Int(c.Do("zremrangebyscore", key, 2, 3)); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("zrangebyscore", key, 1, 4)); err != nil {
+ t.Fatal(err)
+ } else {
+ if err := testZSetRange(v, "a", "d"); err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestZSetRange(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:myzset_range_rank"
+ if _, err := goredis.Int(c.Do("zadd", key, 1, "a", 2, "b", 3, "c", 4, "d")); err != nil {
+ t.Fatal(err)
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("zrange", key, 0, 3, "withscores")); err != nil {
+ t.Fatal(err)
+ } else {
+ if err := testZSetRange(v, "a", 1, "b", 2, "c", 3, "d", 4); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("zrange", key, 1, 4, "withscores")); err != nil {
+ t.Fatal(err)
+ } else {
+ if err := testZSetRange(v, "b", 2, "c", 3, "d", 4); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("zrange", key, -2, -1, "withscores")); err != nil {
+ t.Fatal(err)
+ } else {
+ if err := testZSetRange(v, "c", 3, "d", 4); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("zrange", key, 0, -1, "withscores")); err != nil {
+ t.Fatal(err)
+ } else {
+ if err := testZSetRange(v, "a", 1, "b", 2, "c", 3, "d", 4); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("zrange", key, -1, -2, "withscores")); err != nil {
+ t.Fatal(err)
+ } else if len(v) != 0 {
+ t.Fatal(len(v))
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("zrevrange", key, 0, 4, "withscores")); err != nil {
+ t.Fatal(err)
+ } else {
+ if err := testZSetRange(v, "d", 4, "c", 3, "b", 2, "a", 1); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("zrevrange", key, 0, -1, "withscores")); err != nil {
+ t.Fatal(err)
+ } else {
+ if err := testZSetRange(v, "d", 4, "c", 3, "b", 2, "a", 1); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("zrevrange", key, 2, 3, "withscores")); err != nil {
+ t.Fatal(err)
+ } else {
+ if err := testZSetRange(v, "b", 2, "a", 1); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("zrevrange", key, -2, -1, "withscores")); err != nil {
+ t.Fatal(err)
+ } else {
+ if err := testZSetRange(v, "b", 2, "a", 1); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if n, err := goredis.Int(c.Do("zremrangebyrank", key, 2, 3)); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if v, err := goredis.MultiBulk(c.Do("zrange", key, 0, 4)); err != nil {
+ t.Fatal(err)
+ } else {
+ if err := testZSetRange(v, "a", "b"); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if n, err := goredis.Int(c.Do("zclear", key)); err != nil {
+ t.Fatal(err)
+ } else if n <= 0 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
+ t.Fatal(err)
+ } else if n != 0 {
+ t.Fatal(n)
+ }
+
+}
+
+func TestZsetErrorParams(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:zset_error_param"
+ //zadd
+ if _, err := c.Do("zadd", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("zadd", key, "a", "b", "c"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("zadd", key, "-a", "a"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("zadd", key, "0.1a", "a"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+ invalidKey := string(append([]byte("default:test:zset_error_param"), make([]byte, 10240)...))
+ invalidMember := string(append([]byte("long_param"), make([]byte, 10240)...))
+ _, err := c.Do("zadd", invalidKey, 0, key)
+ assert.NotNil(t, err)
+ _, err = c.Do("zadd", key, 0, invalidMember)
+ assert.NotNil(t, err)
+ _, err = c.Do("zadd", key, 0, key, 1, invalidMember)
+ assert.NotNil(t, err)
+ _, err = c.Do("zadd", invalidKey, 0, invalidMember)
+ assert.NotNil(t, err)
+
+ //zcard
+ if _, err := c.Do("zcard"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ //zscore
+ if _, err := c.Do("zscore", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ //zrem
+ if _, err := c.Do("zrem", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ //zincrby
+ if _, err := c.Do("zincrby", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ //zcount
+ if _, err := c.Do("zcount", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("zcount", key, "-inf", "=inf"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ //zrank
+ if _, err := c.Do("zrank", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ //zrevzrank
+ if _, err := c.Do("zrevrank", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ //zremrangebyrank
+ if _, err := c.Do("zremrangebyrank", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("zremrangebyrank", key, 0.1, 0.1); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ //zremrangebyscore
+ if _, err := c.Do("zremrangebyscore", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("zremrangebyscore", key, "-inf", "a"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("zremrangebyscore", key, 0, "a"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ //zrange
+ if _, err := c.Do("zrange", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("zrange", key, 0, 1, "withscore"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("zrange", key, 0, 1, "withscores", "a"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ //zrevrange, almost same as zrange
+ if _, err := c.Do("zrevrange", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ //zrangebyscore
+
+ if _, err := c.Do("zrangebyscore", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("zrangebyscore", key, 0, 1, "withscore"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("zrangebyscore", key, 0, 1, "withscores", "limit"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("zrangebyscore", key, 0, 1, "withscores", "limi", 1, 1); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("zrangebyscore", key, 0, 1, "withscores", "limit", "a", 1); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ if _, err := c.Do("zrangebyscore", key, 0, 1, "withscores", "limit", 1, "a"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ //zrevrangebyscore, almost same as zrangebyscore
+ if _, err := c.Do("zrevrangebyscore", key); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ //zclear
+ if _, err := c.Do("zclear"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+
+ //zmclear
+ if _, err := c.Do("zmclear"); err == nil {
+ t.Fatalf("invalid err of %v", err)
+ }
+}
+
+func TestZSetLex(t *testing.T) {
+ c := getTestConn(t)
+ defer c.Close()
+
+ key := "default:test:myzlexset"
+ if _, err := c.Do("zadd", key,
+ 0, "a", 0, "b", 0, "c", 0, "d", 0, "e", 0, "f", 0, "g"); err != nil {
+ t.Fatal(err)
+ }
+
+ if ay, err := goredis.Strings(c.Do("zrangebylex", key, "-", "[c")); err != nil {
+ t.Fatal(err)
+ } else if !reflect.DeepEqual(ay, []string{"a", "b", "c"}) {
+ t.Fatalf("must equal")
+ }
+
+ if ay, err := goredis.Strings(c.Do("zrangebylex", key, "-", "(c")); err != nil {
+ t.Fatal(err)
+ } else if !reflect.DeepEqual(ay, []string{"a", "b"}) {
+ t.Fatalf("must equal")
+ }
+
+ if ay, err := goredis.Strings(c.Do("zrangebylex", key, "[aaa", "(g")); err != nil {
+ t.Fatal(err)
+ } else if !reflect.DeepEqual(ay, []string{"b", "c", "d", "e", "f"}) {
+ t.Fatalf("must equal")
+ }
+
+ if n, err := goredis.Int64(c.Do("zlexcount", key, "-", "(c")); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int64(c.Do("zremrangebylex", key, "[aaa", "(g")); err != nil {
+ t.Fatal(err)
+ } else if n != 5 {
+ t.Fatal(n)
+ }
+
+ if n, err := goredis.Int64(c.Do("zlexcount", key, "-", "+")); err != nil {
+ t.Fatal(err)
+ } else if n != 2 {
+ t.Fatal(n)
+ }
+}
diff --git a/server/redis_api_test.go b/server/redis_api_test.go
index ebd8aa8b..fc622124 100644
--- a/server/redis_api_test.go
+++ b/server/redis_api_test.go
@@ -3,28 +3,37 @@ package server
import (
"fmt"
"io/ioutil"
- "math/rand"
+ "net/http"
"path"
- "reflect"
"strconv"
"sync"
- "sync/atomic"
"testing"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/node"
- "github.com/absolute8511/ZanRedisDB/rockredis"
"github.com/siddontang/goredis"
"github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/node"
+ "github.com/youzan/ZanRedisDB/rockredis"
)
var testOnce sync.Once
-var kvs *Server
-var redisport int
+var gkvs *Server
+var gredisport int
var OK = "OK"
var gtmpDir string
+func changeLogLevel(t *testing.T, l int, port int) {
+ url := fmt.Sprintf("http://127.0.0.1:%v/loglevel/set?loglevel=%d", port, l)
+ rsp, err := http.Post(url, "json", nil)
+ if err != nil {
+ t.Error(err)
+ }
+ if rsp.StatusCode != http.StatusOK {
+ t.Error(rsp.Status)
+ }
+}
+
type testLogger struct {
t *testing.T
}
@@ -32,6 +41,7 @@ type testLogger struct {
func newTestLogger(t *testing.T) *testLogger {
return &testLogger{t: t}
}
+
func (l *testLogger) Output(maxdepth int, s string) error {
l.t.Logf("%v:%v", time.Now().UnixNano(), s)
return nil
@@ -47,7 +57,7 @@ func (l *testLogger) OutputWarning(maxdepth int, s string) error {
return nil
}
-func startTestServer(t *testing.T) (*Server, int, string) {
+func startTestServer(t *testing.T, port int) (*Server, int, string) {
tmpDir, err := ioutil.TempDir("", fmt.Sprintf("rocksdb-test-%d", time.Now().UnixNano()))
assert.Nil(t, err)
t.Logf("dir:%v\n", tmpDir)
@@ -55,21 +65,27 @@ func startTestServer(t *testing.T) (*Server, int, string) {
path.Join(tmpDir, "myid"),
[]byte(strconv.FormatInt(int64(1), 10)),
common.FILE_PERM)
- raftAddr := "http://127.0.0.1:12345"
- redisport := 22345
+ redisport := port
+ raftAddr := "http://127.0.0.1:" + strconv.Itoa(port+2)
var replica node.ReplicaInfo
replica.NodeID = 1
replica.ReplicaID = 1
replica.RaftAddr = raftAddr
kvOpts := ServerConfig{
- ClusterID: "test",
- DataDir: tmpDir,
- RedisAPIPort: redisport,
- LocalRaftAddr: raftAddr,
- BroadcastAddr: "127.0.0.1",
- TickMs: 100,
- ElectionTick: 5,
- }
+ ClusterID: "unit-test-api",
+ DataDir: tmpDir,
+ RedisAPIPort: redisport,
+ HttpAPIPort: redisport + 1,
+ LocalRaftAddr: raftAddr,
+ BroadcastAddr: "127.0.0.1",
+ TickMs: 100,
+ ElectionTick: 5,
+ UseRocksWAL: testUseRocksWAL,
+ SharedRocksWAL: testSharedRocksWAL,
+ UseRedisV2: true,
+ }
+ kvOpts.RocksDBOpts.EnablePartitionedIndexFilter = true
+ kvOpts.WALRocksDBOpts.EngineType = testEngineType
nsConf := node.NewNSConfig()
nsConf.Name = "default-0"
@@ -79,8 +95,11 @@ func startTestServer(t *testing.T) (*Server, int, string) {
nsConf.Replicator = 1
nsConf.RaftGroupConf.GroupID = 1000
nsConf.RaftGroupConf.SeedNodes = append(nsConf.RaftGroupConf.SeedNodes, replica)
- nsConf.ExpirationPolicy = "consistency_deletion"
- kv := NewServer(kvOpts)
+ //nsConf.ExpirationPolicy = common.ConsistencyDeletionExpirationPolicy
+ nsConf.ExpirationPolicy = common.WaitCompactExpirationPolicy
+ nsConf.DataVersion = common.ValueHeaderV1Str
+ kv, err := NewServer(kvOpts)
+ assert.Nil(t, err)
if _, err := kv.InitKVNamespace(1, nsConf, false); err != nil {
t.Fatalf("failed to init namespace: %v", err)
}
@@ -89,10 +108,11 @@ func startTestServer(t *testing.T) (*Server, int, string) {
time.Sleep(time.Second)
return kv, redisport, tmpDir
}
+
func waitServerForLeader(t *testing.T, w time.Duration) {
start := time.Now()
for {
- replicaNode := kvs.GetNamespaceFromFullName("default-0")
+ replicaNode := gkvs.GetNamespaceFromFullName("default-0")
assert.NotNil(t, replicaNode)
if replicaNode.Node.IsLead() {
return
@@ -107,11 +127,11 @@ func waitServerForLeader(t *testing.T, w time.Duration) {
func getTestConn(t *testing.T) *goredis.PoolConn {
testOnce.Do(func() {
- kvs, redisport, gtmpDir = startTestServer(t)
+ gkvs, gredisport, gtmpDir = startTestServer(t, redisAPITestPortBase)
waitServerForLeader(t, time.Second*10)
},
)
- c := goredis.NewClient("127.0.0.1:"+strconv.Itoa(redisport), "")
+ c := goredis.NewClient("127.0.0.1:"+strconv.Itoa(gredisport), "")
c.SetMaxIdleConns(4)
conn, err := c.Get()
if err != nil {
@@ -119,2707 +139,3 @@ func getTestConn(t *testing.T) *goredis.PoolConn {
}
return conn
}
-
-func TestKV(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key1 := "default:test:a"
- key2 := "default:test:b"
- keyExpire := "default:test:xx"
- if ok, err := goredis.String(c.Do("set", key1, "1234")); err != nil {
- t.Fatal(err)
- } else if ok != OK {
- t.Fatal(ok)
- }
-
- if n, err := goredis.Int(c.Do("setnx", key1, "123")); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("setnx", key2, "123")); err != nil {
- t.Fatal(err)
- } else if n != 1 {
- t.Fatal(n)
- }
-
- if ok, err := goredis.String(c.Do("setex", keyExpire, 1, "hello world")); err != nil {
- t.Fatal(err)
- } else if ok != OK {
- t.Fatal(ok)
- }
- if v, err := goredis.String(c.Do("get", keyExpire)); err != nil {
- t.Fatal(err)
- } else if v != "hello world" {
- t.Fatal(v)
- }
-
- time.Sleep(time.Second * 4)
- if v, err := goredis.String(c.Do("get", keyExpire)); err != goredis.ErrNil {
- if err == nil && v == "hello world" {
- time.Sleep(time.Second * 16)
- if v, err := goredis.String(c.Do("get", keyExpire)); err != goredis.ErrNil {
- t.Fatalf("expired key should be expired: %v, %v", v, err)
- }
- } else {
- t.Fatalf("get expired key error: %v, %v", v, err)
- }
- }
-
- if v, err := goredis.String(c.Do("get", key1)); err != nil {
- t.Fatal(err)
- } else if v != "1234" {
- t.Fatal(v)
- }
-
- //if v, err := goredis.String(c.Do("getset", "a", "123")); err != nil {
- // t.Fatal(err)
- //} else if v != "1234" {
- // t.Fatal(v)
- //}
-
- //if v, err := goredis.String(c.Do("get", "a")); err != nil {
- // t.Fatal(err)
- //} else if v != "123" {
- // t.Fatal(v)
- //}
-
- if n, err := goredis.Int(c.Do("exists", key1)); err != nil {
- t.Fatal(err)
- } else if n != 1 {
- t.Fatal(n)
- }
- if n, err := goredis.Int(c.Do("exists", key1, key2)); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("exists", "default:test:empty_key_test")); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-
- if _, err := goredis.Int(c.Do("del", key1, key2)); err != nil {
- t.Fatal(err)
- }
-
- if n, err := goredis.Int(c.Do("exists", key1)); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("exists", key2)); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("exists", key1, key2)); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-}
-
-func TestKVPipeline(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
- pkey1 := "default:test:kvpla"
- pkey2 := "default:test:kvplb"
-
- err := c.Send("set", pkey1, "1")
- assert.Nil(t, err)
- err = c.Send("set", pkey2, "2")
- assert.Nil(t, err)
- v, err := goredis.String(c.Receive())
- assert.Nil(t, err)
- assert.Equal(t, OK, v)
- v, err = goredis.String(c.Receive())
- assert.Nil(t, err)
- assert.Equal(t, OK, v)
- if v, err := goredis.String(c.Do("get", pkey1)); err != nil {
- t.Fatal(err)
- } else if v != "1" {
- t.Error(v)
- }
- if v, err := goredis.String(c.Do("get", pkey2)); err != nil {
- t.Fatal(err)
- } else if v != "2" {
- t.Error(v)
- }
-}
-
-func TestKVM(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key1 := "default:test:kvma"
- key2 := "default:test:kvmb"
- key3 := "default:test:kvmc"
- if ok, err := goredis.String(c.Do("set", key1, "1")); err != nil {
- t.Fatal(err)
- } else if ok != OK {
- t.Fatal(ok)
- }
- if ok, err := goredis.String(c.Do("set", key2, "2")); err != nil {
- t.Fatal(err)
- } else if ok != OK {
- t.Fatal(ok)
- }
-
- if v, err := goredis.String(c.Do("get", key1)); err != nil {
- t.Fatal(err)
- } else if v != "1" {
- t.Error(v)
- }
- if v, err := goredis.String(c.Do("get", key2)); err != nil {
- t.Fatal(err)
- } else if v != "2" {
- t.Error(v)
- }
-
- if v, err := goredis.MultiBulk(c.Do("mget", key1, key2, key3)); err != nil {
- t.Fatal(err)
- } else if len(v) != 3 {
- t.Fatal(len(v))
- } else {
- if vv, ok := v[0].([]byte); !ok || string(vv) != "1" {
- t.Fatalf("not 1, %v", v)
- }
-
- if vv, ok := v[1].([]byte); !ok || string(vv) != "2" {
- t.Errorf("not 2, %v", v[1])
- }
-
- if v[2] != nil {
- t.Errorf("must nil: %v", v[2])
- }
- }
-}
-
-func TestKVIncrDecr(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:kv_n"
- if n, err := goredis.Int64(c.Do("incr", key)); err != nil {
- t.Fatal(err)
- } else if n != 1 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int64(c.Do("incr", key)); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int64(c.Do("incrby", key, 10)); err != nil {
- t.Fatal(err)
- } else if n != 12 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int64(c.Do("incrby", key, -10)); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-}
-
-func TestKVBatch(t *testing.T) {
-
- var wg sync.WaitGroup
- concurrency := 100
- poolList := make([]*goredis.PoolConn, concurrency)
- for i := 0; i < concurrency; i++ {
- poolList[i] = getTestConn(t)
- }
- defer func() {
- for i := 0; i < concurrency; i++ {
- poolList[i].Close()
- }
- }()
- for i := 0; i < concurrency; i++ {
- wg.Add(1)
- go func(index int, c *goredis.PoolConn) {
- defer wg.Done()
-
- key1 := "default:test:a" + strconv.Itoa(index)
- key2 := "default:test:b" + strconv.Itoa(index)
- key3 := "default:test:c" + strconv.Itoa(index)
- key4 := "default:test:d" + strconv.Itoa(index)
- keyExpire := "default:test:xx" + strconv.Itoa(index)
- if ok, err := goredis.String(c.Do("set", key1, "1234")); err != nil {
- t.Fatal(err)
- } else if ok != OK {
- t.Fatal(ok)
- }
-
- if n, err := goredis.Int(c.Do("setnx", key1, "123")); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("setnx", key2, "123")); err != nil {
- t.Fatal(err)
- } else if n != 1 {
- t.Fatal(n)
- }
-
- if ok, err := goredis.String(c.Do("set", key3, key3)); err != nil {
- t.Fatal(err)
- } else if ok != OK {
- t.Fatal(ok)
- }
- if v, err := goredis.String(c.Do("get", key3)); err != nil {
- t.Fatal(err)
- } else if v != key3 {
- t.Fatal(v)
- }
-
- if ok, err := goredis.String(c.Do("setex", keyExpire, 1, "hello world")); err != nil {
- t.Fatal(err)
- } else if ok != OK {
- t.Fatal(ok)
- }
- if v, err := goredis.String(c.Do("get", keyExpire)); err != nil {
- t.Fatal(err)
- } else if v != "hello world" {
- t.Fatal(v)
- }
-
- if ok, err := goredis.String(c.Do("set", key4, key4)); err != nil {
- t.Fatal(err)
- } else if ok != OK {
- t.Fatal(ok)
- }
- if v, err := goredis.String(c.Do("get", key4)); err != nil {
- t.Fatal(err)
- } else if v != key4 {
- t.Fatal(v)
- }
-
- mkey1 := "default:test:kvma" + strconv.Itoa(index)
- mkey2 := "default:test:kvmb" + strconv.Itoa(index)
- mkey3 := "default:test:kvmc" + strconv.Itoa(index)
- // test pipeline set
- err := c.Send("set", mkey1, "1")
- assert.Nil(t, err)
- err = c.Send("set", mkey2, "2")
- assert.Nil(t, err)
- v, err := goredis.String(c.Receive())
- assert.Nil(t, err)
- assert.Equal(t, OK, v)
- v, err = goredis.String(c.Receive())
- assert.Nil(t, err)
- assert.Equal(t, OK, v)
-
- if v, err := goredis.String(c.Do("get", mkey1)); err != nil {
- t.Fatal(err)
- } else if v != "1" {
- t.Error(v)
- }
- if v, err := goredis.String(c.Do("get", mkey2)); err != nil {
- t.Fatal(err)
- } else if v != "2" {
- t.Error(v)
- }
-
- if v, err := goredis.MultiBulk(c.Do("mget", mkey1, mkey2, mkey3)); err != nil {
- t.Fatal(err)
- } else if len(v) != 3 {
- t.Fatal(len(v))
- } else {
- if vv, ok := v[0].([]byte); !ok || string(vv) != "1" {
- t.Fatalf("not 1, %v", v)
- }
-
- if vv, ok := v[1].([]byte); !ok || string(vv) != "2" {
- t.Errorf("not 2, %v", v[1])
- }
-
- if v[2] != nil {
- t.Errorf("must nil: %v", v[2])
- }
- }
-
- time.Sleep(time.Second * 4)
- if v, err := goredis.String(c.Do("get", keyExpire)); err != goredis.ErrNil {
- if err == nil && v == "hello world" {
- time.Sleep(time.Second * 16)
- if v, err := goredis.String(c.Do("get", keyExpire)); err != goredis.ErrNil {
- t.Fatalf("expired key should be expired: %v, %v", v, err)
- }
- } else {
- t.Fatalf("get expired key error: %v, %v", v, err)
- }
- }
-
- if v, err := goredis.String(c.Do("get", key1)); err != nil {
- t.Fatal(err)
- } else if v != "1234" {
- t.Fatal(v)
- }
-
- if n, err := goredis.Int(c.Do("exists", key1)); err != nil {
- t.Fatal(err)
- } else if n != 1 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("exists", "default:test:empty_key_test"+strconv.Itoa(index))); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-
- if _, err := goredis.Int(c.Do("del", key1, key2)); err != nil {
- t.Fatal(err)
- }
-
- if n, err := goredis.Int(c.Do("exists", key1)); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("exists", key2)); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
- }(i, poolList[i])
- }
- wg.Wait()
-
-}
-
-func TestKVErrorParams(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key1 := "default:test:kv_erra"
- key2 := "default:test:kv_errb"
- key3 := "default:test:kv_errc"
- _, err := c.Do("get", key1, key2, key3)
- assert.NotNil(t, err)
-
- _, err = c.Do("set", key1, key2, key3)
- assert.NotNil(t, err)
-
- _, err = c.Do("getset", key1, key2, key3)
- assert.NotNil(t, err)
-
- _, err = c.Do("setnx", key1, key2, key3)
- assert.NotNil(t, err)
-
- _, err = c.Do("exists")
- assert.NotNil(t, err)
-
- _, err = c.Do("incr", key1, key2)
- assert.NotNil(t, err)
-
- _, err = c.Do("incrby", key1)
- assert.NotNil(t, err)
-
- _, err = c.Do("incrby", key1, "nan")
- assert.NotNil(t, err)
-
- _, err = c.Do("decrby", key1)
- assert.NotNil(t, err)
-
- _, err = c.Do("del")
- assert.NotNil(t, err)
-
- _, err = c.Do("mset")
- assert.NotNil(t, err)
-
- _, err = c.Do("mset", key1, key2, key3)
- assert.NotNil(t, err)
-
- _, err = c.Do("mget")
- assert.NotNil(t, err)
-}
-
-func TestPFOp(t *testing.T) {
- if testing.Verbose() {
- rockredis.SetLogger(int32(common.LOG_DETAIL), newTestLogger(t))
- }
- c := getTestConn(t)
- defer c.Close()
-
- key1 := "default:test:pf_a"
- cnt, err := goredis.Int64(c.Do("pfcount", key1))
- assert.Nil(t, err)
- assert.Equal(t, int64(0), cnt)
-
- // first init with no element
- cnt, err = goredis.Int64(c.Do("pfadd", key1))
- assert.Nil(t, err)
- assert.Equal(t, int64(1), cnt)
-
- cnt, err = goredis.Int64(c.Do("pfadd", key1, 1))
- assert.Nil(t, err)
- assert.Equal(t, int64(1), cnt)
- cnt, err = goredis.Int64(c.Do("pfadd", key1, 1))
- assert.Nil(t, err)
- assert.Equal(t, int64(0), cnt)
-
- // test pfadd with no element on exist key
- cnt, err = goredis.Int64(c.Do("pfadd", key1))
- assert.Nil(t, err)
- assert.Equal(t, int64(0), cnt)
-
- cnt, err = goredis.Int64(c.Do("pfadd", key1, 1, 2, 3))
- assert.Nil(t, err)
- assert.Equal(t, int64(1), cnt)
-
- cnt, err = goredis.Int64(c.Do("pfcount", key1))
- assert.Nil(t, err)
- assert.Equal(t, int64(3), cnt)
-
- c.Do("del", key1)
-
- cnt, err = goredis.Int64(c.Do("pfcount", key1))
- assert.Nil(t, err)
- assert.Equal(t, int64(0), cnt)
-}
-
-func TestPFOpErrorParams(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key1 := "default:test:pf_erra"
- key2 := "default:test:pf_errb"
- _, err := c.Do("pfadd")
- assert.NotNil(t, err)
-
- _, err = c.Do("pfcount", key1, key2)
- assert.NotNil(t, err)
-
- _, err = c.Do("pfcount")
- assert.NotNil(t, err)
-}
-
-func TestHashEmptyField(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:hashempty"
- _, err := c.Do("hset", key, "", "v1")
- assert.Nil(t, err)
-
- v, _ := goredis.String(c.Do("hget", key, ""))
- assert.Equal(t, "v1", v)
-
- n, err := goredis.Int(c.Do("hexists", key, ""))
- assert.Nil(t, err)
- assert.Equal(t, 1, n)
-
- _, err = c.Do("hdel", key, "")
- assert.Nil(t, err)
-
- v, err = goredis.String(c.Do("hget", key, ""))
- assert.Equal(t, goredis.ErrNil, err)
- assert.Equal(t, "", v)
-
- n, err = goredis.Int(c.Do("hexists", key, ""))
- assert.Nil(t, err)
- assert.Equal(t, 0, n)
-}
-
-func TestHash(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:hasha"
-
- if n, err := goredis.Int(c.Do("hset", key, 1, 0)); err != nil {
- t.Fatal(err)
- } else if n != 1 {
- t.Fatal(n)
- }
- if n, err := goredis.Int(c.Do("hsetnx", key, 1, 0)); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("hexists", key, 1)); err != nil {
- t.Fatal(err)
- } else if n != 1 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("hexists", key, -1)); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("hget", key, 1)); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("hset", key, 1, 1)); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("hget", key, 1)); err != nil {
- t.Fatal(err)
- } else if n != 1 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("hlen", key)); err != nil {
- t.Fatal(err)
- } else if n != 1 {
- t.Fatal(n)
- }
-}
-
-func testHashArray(ay []interface{}, checkValues ...int) error {
- if len(ay) != len(checkValues) {
- return fmt.Errorf("invalid return number %d != %d", len(ay), len(checkValues))
- }
-
- for i := 0; i < len(ay); i++ {
- if ay[i] == nil && checkValues[i] != 0 {
- return fmt.Errorf("must nil")
- } else if ay[i] != nil {
- v, ok := ay[i].([]byte)
- if !ok {
- return fmt.Errorf("invalid return data %d %v :%T", i, ay[i], ay[i])
- }
-
- d, _ := strconv.Atoi(string(v))
-
- if d != checkValues[i] {
- return fmt.Errorf("invalid data %d %s != %d", i, v, checkValues[i])
- }
- }
- }
- return nil
-}
-
-func TestHashM(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:msetb"
- if ok, err := goredis.String(c.Do("hmset", key, 1, 1, 2, 2, 3, 3)); err != nil {
- t.Fatal(err)
- } else if ok != OK {
- t.Fatal(ok)
- }
-
- if n, err := goredis.Int(c.Do("hlen", key)); err != nil {
- t.Fatal(err)
- } else if n != 3 {
- t.Fatal(n)
- }
-
- if v, err := goredis.MultiBulk(c.Do("hmget", key, 1, 2, 3, 4)); err != nil {
- t.Fatal(err)
- } else {
- if err := testHashArray(v, 1, 2, 3, 0); err != nil {
- t.Fatal(err)
- }
- }
-
- if n, err := goredis.Int(c.Do("hdel", key, 1, 2, 3, 4)); err != nil {
- t.Fatal(err)
- } else if n != 3 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("hlen", key)); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-
- if v, err := goredis.MultiBulk(c.Do("hmget", key, 1, 2, 3, 4)); err != nil {
- t.Fatal(err)
- } else {
- if err := testHashArray(v, 0, 0, 0, 0); err != nil {
- t.Fatal(err)
- }
- }
-
- if n, err := goredis.Int(c.Do("hlen", key)); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-}
-
-func TestHashIncr(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:hashincr-c"
- if n, err := goredis.Int(c.Do("hincrby", key, 1, 1)); err != nil {
- t.Fatal(err)
- } else if n != 1 {
- t.Fatal(err)
- }
-
- if n, err := goredis.Int(c.Do("hlen", key)); err != nil {
- t.Fatal(err)
- } else if n != 1 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("hincrby", key, 1, 10)); err != nil {
- t.Fatal(err)
- } else if n != 11 {
- t.Fatal(err)
- }
-
- if n, err := goredis.Int(c.Do("hlen", key)); err != nil {
- t.Fatal(err)
- } else if n != 1 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("hincrby", key, 1, -11)); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(err)
- }
-}
-
-func TestHashGetAll(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:hgetalld"
-
- if ok, err := goredis.String(c.Do("hmset", key, 1, 1, 2, 2, 3, 3)); err != nil {
- t.Fatal(err)
- } else if ok != OK {
- t.Fatal(ok)
- }
-
- if v, err := goredis.MultiBulk(c.Do("hgetall", key)); err != nil {
- t.Fatal(err)
- } else {
- if err := testHashArray(v, 1, 1, 2, 2, 3, 3); err != nil {
- t.Fatal(err)
- }
- }
-
- if v, err := goredis.MultiBulk(c.Do("hkeys", key)); err != nil {
- t.Fatal(err)
- } else {
- if err := testHashArray(v, 1, 2, 3); err != nil {
- t.Fatal(err)
- }
- }
-
- //if v, err := goredis.MultiBulk(c.Do("hvals", key)); err != nil {
- // t.Fatal(err)
- //} else {
- // if err := testHashArray(v, 1, 2, 3); err != nil {
- // t.Fatal(err)
- // }
- //}
-
- if n, err := goredis.Int(c.Do("hclear", key)); err != nil {
- t.Fatal(err)
- } else if n != 3 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("hlen", key)); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-}
-
-func TestHashErrorParams(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:hash_err_param"
- if _, err := c.Do("hset", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("hget", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("hexists", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("hdel", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("hlen", key, "a"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("hincrby", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("hmset", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("hmset", key, "f1", "v1", "f2"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("hmget", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("hgetall"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("hkeys"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("hvals"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("hclear"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("hclear", key, "a"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("hmclear"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-}
-
-func testListIndex(t *testing.T, key string, index int64, v int) error {
- c := getTestConn(t)
- defer c.Close()
-
- n, err := goredis.Int(c.Do("lindex", key, index))
- if err == goredis.ErrNil && v != 0 {
- return fmt.Errorf("must nil")
- } else if err != nil && err != goredis.ErrNil {
- return err
- } else if n != v {
- return fmt.Errorf("index err number %d != %d", n, v)
- }
-
- return nil
-}
-
-func testListRange(t *testing.T, key string, start int64, stop int64, checkValues ...int) error {
- c := getTestConn(t)
- defer c.Close()
-
- vs, err := goredis.MultiBulk(c.Do("lrange", key, start, stop))
- if err != nil {
- return err
- }
-
- if len(vs) != len(checkValues) {
- return fmt.Errorf("invalid return number %d != %d", len(vs), len(checkValues))
- }
-
- var n int
- for i, v := range vs {
- if d, ok := v.([]byte); ok {
- n, err = strconv.Atoi(string(d))
- if err != nil {
- return err
- } else if n != checkValues[i] {
- return fmt.Errorf("invalid data %d: %d != %d", i, n, checkValues[i])
- }
- } else {
- return fmt.Errorf("invalid data %v %T", v, v)
- }
- }
-
- return nil
-}
-
-func TestList(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:lista"
- //if n, err := goredis.Int(c.Do("lkeyexists", key)); err != nil {
- // t.Fatal(err)
- //} else if n != 0 {
- // t.Fatal(n)
- //}
-
- if n, err := goredis.Int(c.Do("lpush", key, 1)); err != nil {
- t.Fatal(err)
- } else if n != 1 {
- t.Fatal(n)
- }
-
- //if n, err := goredis.Int(c.Do("lkeyexists", key)); err != nil {
- // t.Fatal(err)
- //} else if n != 1 {
- // t.Fatal(1)
- //}
-
- if n, err := goredis.Int(c.Do("rpush", key, 2)); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("rpush", key, 3)); err != nil {
- t.Fatal(err)
- } else if n != 3 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("llen", key)); err != nil {
- t.Fatal(err)
- } else if n != 3 {
- t.Fatal(n)
- }
-
- //for ledis-cli a 1 2 3
- // 127.0.0.1:6379> lrange a 0 0
- // 1) "1"
- if err := testListRange(t, key, 0, 0, 1); err != nil {
- t.Fatal(err)
- }
-
- // 127.0.0.1:6379> lrange a 0 1
- // 1) "1"
- // 2) "2"
-
- if err := testListRange(t, key, 0, 1, 1, 2); err != nil {
- t.Fatal(err)
- }
-
- // 127.0.0.1:6379> lrange a 0 5
- // 1) "1"
- // 2) "2"
- // 3) "3"
- if err := testListRange(t, key, 0, 5, 1, 2, 3); err != nil {
- t.Fatal(err)
- }
-
- // 127.0.0.1:6379> lrange a -1 5
- // 1) "3"
- if err := testListRange(t, key, -1, 5, 3); err != nil {
- t.Fatal(err)
- }
-
- // 127.0.0.1:6379> lrange a -5 -1
- // 1) "1"
- // 2) "2"
- // 3) "3"
- if err := testListRange(t, key, -5, -1, 1, 2, 3); err != nil {
- t.Fatal(err)
- }
-
- // 127.0.0.1:6379> lrange a -2 -1
- // 1) "2"
- // 2) "3"
- if err := testListRange(t, key, -2, -1, 2, 3); err != nil {
- t.Fatal(err)
- }
-
- // 127.0.0.1:6379> lrange a -1 -2
- // (empty list or set)
- if err := testListRange(t, key, -1, -2); err != nil {
- t.Fatal(err)
- }
-
- // 127.0.0.1:6379> lrange a -1 2
- // 1) "3"
- if err := testListRange(t, key, -1, 2, 3); err != nil {
- t.Fatal(err)
- }
-
- // 127.0.0.1:6379> lrange a -5 5
- // 1) "1"
- // 2) "2"
- // 3) "3"
- if err := testListRange(t, key, -5, 5, 1, 2, 3); err != nil {
- t.Fatal(err)
- }
-
- // 127.0.0.1:6379> lrange a -1 0
- // (empty list or set)
- if err := testListRange(t, key, -1, 0); err != nil {
- t.Fatal(err)
- }
-
- if err := testListRange(t, "default:test:empty list", 0, 100); err != nil {
- t.Fatal(err)
- }
-
- // 127.0.0.1:6379> lrange a -1 -1
- // 1) "3"
- if err := testListRange(t, key, -1, -1, 3); err != nil {
- t.Fatal(err)
- }
-
- if err := testListIndex(t, key, -1, 3); err != nil {
- t.Fatal(err)
- }
-
- if err := testListIndex(t, key, 0, 1); err != nil {
- t.Fatal(err)
- }
-
- if err := testListIndex(t, key, 1, 2); err != nil {
- t.Fatal(err)
- }
-
- if err := testListIndex(t, key, 2, 3); err != nil {
- t.Fatal(err)
- }
-
- if err := testListIndex(t, key, 5, 0); err != nil {
- t.Fatal(err)
- }
-
- if err := testListIndex(t, key, -1, 3); err != nil {
- t.Fatal(err)
- }
-
- if err := testListIndex(t, key, -2, 2); err != nil {
- t.Fatal(err)
- }
-
- if err := testListIndex(t, key, -3, 1); err != nil {
- t.Fatal(err)
- }
-}
-
-func TestListMPush(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:listmpushb"
- if n, err := goredis.Int(c.Do("rpush", key, 1, 2, 3)); err != nil {
- t.Fatal(err)
- } else if n != 3 {
- t.Fatal(n)
- }
-
- if err := testListRange(t, key, 0, 3, 1, 2, 3); err != nil {
- t.Fatal(err)
- }
-
- if n, err := goredis.Int(c.Do("lpush", key, 1, 2, 3)); err != nil {
- t.Fatal(err)
- } else if n != 6 {
- t.Fatal(n)
- }
-
- if err := testListRange(t, key, 0, 6, 3, 2, 1, 1, 2, 3); err != nil {
- t.Fatal(err)
- }
-}
-
-func TestPop(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:c"
- if n, err := goredis.Int(c.Do("rpush", key, 1, 2, 3, 4, 5, 6)); err != nil {
- t.Fatal(err)
- } else if n != 6 {
- t.Fatal(n)
- }
-
- if v, err := goredis.Int(c.Do("lpop", key)); err != nil {
- t.Fatal(err)
- } else if v != 1 {
- t.Fatal(v)
- }
-
- if v, err := goredis.Int(c.Do("rpop", key)); err != nil {
- t.Fatal(err)
- } else if v != 6 {
- t.Fatal(v)
- }
-
- if n, err := goredis.Int(c.Do("lpush", key, 1)); err != nil {
- t.Fatal(err)
- } else if n != 5 {
- t.Fatal(n)
- }
-
- if err := testListRange(t, key, 0, 5, 1, 2, 3, 4, 5); err != nil {
- t.Fatal(err)
- }
-
- for i := 1; i <= 5; i++ {
- if v, err := goredis.Int(c.Do("lpop", key)); err != nil {
- t.Fatal(err)
- } else if v != i {
- t.Fatal(v)
- }
- }
-
- if n, err := goredis.Int(c.Do("llen", key)); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-
- c.Do("rpush", key, 1, 2, 3, 4, 5)
-
- if n, err := goredis.Int(c.Do("lclear", key)); err != nil {
- t.Fatal(err)
- } else if n != 5 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("llen", key)); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-
-}
-
-func disableTestTrim(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:d"
- if n, err := goredis.Int(c.Do("rpush", key, 1, 2, 3, 4, 5, 6)); err != nil {
- t.Fatal(err)
- } else if n != 6 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("ltrim_front", key, 2)); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("llen", key)); err != nil {
- t.Fatal(err)
- } else if n != 4 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("ltrim_back", key, 2)); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("llen", key)); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("ltrim_front", key, 5)); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("llen", key)); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("rpush", key, 1, 2)); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("ltrim_front", key, 2)); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("llen", key)); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-}
-func TestListLPushRPop(t *testing.T) {
- c := getTestConn(t)
- c2 := getTestConn(t)
- defer c.Close()
- defer c2.Close()
-
- k1 := []byte("default:test_lpushrpop:1")
- klist := make([][]byte, 0, 10)
- klist = append(klist, k1)
- for i := 2; i < 9; i++ {
- klist = append(klist, []byte("default:test_lpushrpop:"+strconv.Itoa(i)))
- }
-
- n, err := goredis.Int(c.Do("llen", k1))
- assert.Nil(t, err)
- assert.Equal(t, 0, n)
- c.Do("lpush", k1, []byte("a"))
- n, err = goredis.Int(c.Do("llen", k1))
- assert.Nil(t, err)
- assert.Equal(t, 1, n)
- c.Do("rpop", k1)
- n, err = goredis.Int(c.Do("llen", k1))
- assert.Nil(t, err)
- assert.Equal(t, 0, n)
- c.Do("rpop", k1)
- n, err = goredis.Int(c.Do("llen", k1))
- assert.Nil(t, err)
- assert.Equal(t, 0, n)
- c.Do("lpush", k1, []byte("a"))
- c.Do("lpush", k1, []byte("a"))
- c.Do("lpush", k1, []byte("a"))
- c.Do("lpush", k1, []byte("a"))
- c.Do("rpop", k1)
- c.Do("rpop", k1)
- c.Do("lpush", k1, []byte("a"))
- c.Do("lpush", k1, []byte("a"))
- c.Do("rpop", k1)
- c.Do("rpop", k1)
- c.Do("lpush", k1, []byte("a"))
- c.Do("rpop", k1)
- c.Do("rpop", k1)
- n, err = goredis.Int(c.Do("llen", k1))
- assert.Nil(t, err)
- assert.Equal(t, 1, n)
- v, err := goredis.Bytes(c.Do("rpop", k1))
- assert.Nil(t, err)
- assert.Equal(t, []byte("a"), v)
- n, err = goredis.Int(c.Do("llen", k1))
- assert.Nil(t, err)
- assert.Equal(t, 0, n)
-
- pushed := make([]int32, len(klist))
- poped := make([]int32, len(klist))
- connPushList := make([]*goredis.PoolConn, len(klist))
- connPopList := make([]*goredis.PoolConn, len(klist))
-
- start := time.Now()
- var wg sync.WaitGroup
- for i := range klist {
- connPushList[i] = getTestConn(t)
- connPopList[i] = getTestConn(t)
- wg.Add(2)
- go func(index int) {
- defer wg.Done()
- r := rand.New(rand.NewSource(time.Now().UnixNano()))
- for {
- _, err := connPushList[index].Do("lpush", klist[index], []byte("a"))
- assert.Nil(t, err)
- atomic.AddInt32(&pushed[index], 1)
- time.Sleep(time.Microsecond * time.Duration(r.Int31n(1000)))
- if time.Since(start) > time.Second*10 {
- break
- }
- }
- }(i)
- go func(index int) {
- defer wg.Done()
- r := rand.New(rand.NewSource(time.Now().UnixNano()))
- for {
- v, err := goredis.Bytes(connPopList[index].Do("rpop", klist[index]))
- assert.Nil(t, err)
- if len(v) > 0 {
- assert.Equal(t, []byte("a"), v)
- atomic.AddInt32(&poped[index], 1)
- }
- time.Sleep(time.Microsecond * time.Duration(r.Int31n(1000)))
- if time.Since(start) > time.Second*10 {
- break
- }
- }
- }(i)
-
- }
- wg.Wait()
-
- for i, tk := range klist {
- n, err = goredis.Int(c.Do("llen", tk))
- assert.Nil(t, err)
- t.Logf("pushed %v poped %v", atomic.LoadInt32(&pushed[i]), atomic.LoadInt32(&poped[i]))
- assert.True(t, pushed[i] >= poped[i])
- assert.Equal(t, int(pushed[i]-poped[i]), n)
- }
-}
-
-func TestListErrorParams(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:list_err_param"
- if _, err := c.Do("lpush", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("rpush", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("lpop", key, "a"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("rpop", key, "a"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("llen", key, "a"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("lindex", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("lrange", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("lclear"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("lmclear"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("ltrim_front", key, "-1"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("ltrim_back", key, "a"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-}
-
-func TestSet(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key1 := "default:test:testdb_cmd_set_1"
- key2 := "default:test:testdb_cmd_set_2"
-
- if n, err := goredis.Int(c.Do("scard", key1)); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("sadd", key1, 0, 1)); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("scard", key1)); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("sadd", key2, 0, 1, 2, 3)); err != nil {
- t.Fatal(err)
- } else if n != 4 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("srem", key1, 0, 1)); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("sismember", key2, 0)); err != nil {
- t.Fatal(err)
- } else if n != 1 {
- t.Fatal(n)
- }
-
- if n, err := goredis.MultiBulk(c.Do("smembers", key2)); err != nil {
- t.Fatal(err)
- } else if len(n) != 4 {
- t.Fatal(n)
- }
- if val, err := goredis.String(c.Do("spop", key2)); err != nil {
- t.Fatal(err)
- } else if val != "0" {
- t.Fatal(val)
- }
- if val, err := goredis.String(c.Do("spop", key2)); err != nil {
- t.Fatal(err)
- } else if val != "1" {
- t.Fatal(val)
- }
- if val, err := goredis.Values(c.Do("spop", key2, 4)); err != nil {
- t.Fatal(err)
- } else if len(val) != 2 {
- t.Fatal(val)
- }
- if n, err := goredis.Values(c.Do("smembers", key2)); err != nil {
- t.Fatal(err)
- } else if len(n) != 0 {
- t.Fatal(n)
- }
- // empty spop single will return nil, but spop multi will return empty array
- if val, err := c.Do("spop", key2); err != nil {
- t.Fatal(err)
- } else if val != nil {
- t.Fatal(val)
- }
- if val, err := goredis.Values(c.Do("spop", key2, 2)); err != nil {
- t.Fatal(err)
- } else if val == nil {
- t.Fatal(val)
- } else if len(val) != 0 {
- t.Fatal(val)
- }
- if n, err := goredis.Int(c.Do("sadd", key2, 0, 1, 2, 3)); err != nil {
- t.Fatal(err)
- } else if n != 4 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("sclear", key2)); err != nil {
- t.Fatal(err)
- } else if n != 4 {
- t.Fatal(n)
- }
-}
-
-func TestSetErrorParams(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:set_error_param"
- if _, err := c.Do("sadd", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("scard"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("scard", key, key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("sismember", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("sismember", key, "m1", "m2"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("smembers"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("smembers", key, key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("spop"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
- if _, err := c.Do("spop", key, "0"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("srem"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("srem", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("sclear"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("sclear", key, key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("smclear"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
-}
-
-func TestZSet(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:myzset"
-
- //if n, err := goredis.Int(c.Do("zkeyexists", key)); err != nil {
- // t.Fatal(err)
- //} else if n != 0 {
- // t.Fatal(n)
- //}
-
- if n, err := goredis.Int(c.Do("zadd", key, 3, "a", 4, "b")); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- //if n, err := goredis.Int(c.Do("zkeyexists", key)); err != nil {
- // t.Fatal(err)
- //} else if n != 1 {
- // t.Fatal(n)
- //}
-
- if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
- t.Fatal(n)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zadd", key, 1, "a", 2, "b")); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
- t.Fatal(n)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zadd", key, 3, "c", 4, "d")); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
- t.Fatal(err)
- } else if n != 4 {
- t.Fatal(n)
- }
-
- if s, err := goredis.Int(c.Do("zscore", key, "c")); err != nil {
- t.Fatal(err)
- } else if s != 3 {
- t.Fatal(s)
- }
-
- if n, err := goredis.Int(c.Do("zrem", key, "d", "e")); err != nil {
- t.Fatal(err)
- } else if n != 1 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
- t.Fatal(err)
- } else if n != 3 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zincrby", key, 4, "c")); err != nil {
- t.Fatal(err)
- } else if n != 7 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zincrby", key, -4, "c")); err != nil {
- t.Fatal(err)
- } else if n != 3 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zincrby", key, 4, "d")); err != nil {
- t.Fatal(err)
- } else if n != 4 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
- t.Fatal(err)
- } else if n != 4 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zrem", key, "a", "b", "c", "d")); err != nil {
- t.Fatal(err)
- } else if n != 4 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-
-}
-
-func TestZSetInfScore(t *testing.T) {
- //TODO: test +inf , -inf score
- // TODO: test negative score
-}
-
-func TestZSetFloat64Score(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:myzset_float"
-
- if n, err := goredis.Int(c.Do("zadd", key, 3, "a", 3, "b")); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
- t.Fatal(n)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zadd", key, 3.1, "a", 3.2, "b")); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
- t.Fatal(n)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zadd", key, 3.3, "c", 3.4, "d")); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
- t.Fatal(err)
- } else if n != 4 {
- t.Fatal(n)
- }
-
- if s, err := goredis.Float64(c.Do("zscore", key, "c")); err != nil {
- t.Fatal(err)
- } else if s != 3.3 {
- t.Fatal(s)
- }
-
- if n, err := goredis.Int(c.Do("zrem", key, "d", "e")); err != nil {
- t.Fatal(err)
- } else if n != 1 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
- t.Fatal(err)
- } else if n != 3 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Float64(c.Do("zincrby", key, 4, "c")); err != nil {
- t.Fatal(err)
- } else if n != 7.3 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Float64(c.Do("zincrby", key, -4, "c")); err != nil {
- t.Fatal(err)
- } else if n != 3.3 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Float64(c.Do("zincrby", key, 3.4, "d")); err != nil {
- t.Fatal(err)
- } else if n != 3.4 {
- t.Fatal(n)
- }
- if n, err := goredis.Int(c.Do("zrank", key, "a")); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
- if n, err := goredis.Int(c.Do("zrevrank", key, "b")); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
- if n, err := goredis.Int(c.Do("zrank", key, "c")); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
- if n, err := goredis.Int(c.Do("zrank", key, "d")); err != nil {
- t.Fatal(err)
- } else if n != 3 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zrevrank", key, "a")); err != nil {
- t.Fatal(err)
- } else if n != 3 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
- t.Fatal(err)
- } else if n != 4 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zrem", key, "a", "b", "c", "d")); err != nil {
- t.Fatal(err)
- } else if n != 4 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-}
-
-func TestZSetCount(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:myzset"
- if _, err := goredis.Int(c.Do("zadd", key, 1, "a", 2, "b", 3, "c", 4, "d")); err != nil {
- t.Fatal(err)
- }
-
- if n, err := goredis.Int(c.Do("zcount", key, 2, 4)); err != nil {
- t.Fatal(err)
- } else if n != 3 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zcount", key, 4, 4)); err != nil {
- t.Fatal(err)
- } else if n != 1 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zcount", key, 4, 3)); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zcount", key, "(2", 4)); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zcount", key, "2", "(4")); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zcount", key, "(2", "(4")); err != nil {
- t.Fatal(err)
- } else if n != 1 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zcount", key, "-inf", "+inf")); err != nil {
- t.Fatal(err)
- } else if n != 4 {
- t.Fatal(n)
- }
-
- c.Do("zadd", key, 3, "e")
-
- if n, err := goredis.Int(c.Do("zcount", key, "(2", "(4")); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- c.Do("zrem", key, "a", "b", "c", "d", "e")
-}
-
-func TestZSetRank(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:myzset"
- if _, err := goredis.Int(c.Do("zadd", key, 1, "a", 2, "b", 3, "c", 4, "d")); err != nil {
- t.Fatal(err)
- }
-
- if n, err := goredis.Int(c.Do("zrank", key, "c")); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if _, err := goredis.Int(c.Do("zrank", key, "e")); err != goredis.ErrNil {
- t.Fatal(err)
- }
-
- if n, err := goredis.Int(c.Do("zrevrank", key, "c")); err != nil {
- t.Fatalf("cmd error: %v", err)
- } else if n != 1 {
- t.Fatal(n)
- }
-
- if _, err := goredis.Int(c.Do("zrevrank", key, "e")); err != goredis.ErrNil {
- t.Fatal(err)
- }
-
- key2 := "default:test:myzset2"
- if _, err := goredis.Int(c.Do("zadd", key2, 0, "val0", 1, "val1", 2, "val2", 3, "val3")); err != nil {
- t.Fatal(err)
- }
- if _, err := goredis.Int(c.Do("zadd", key2, 4, "val4", 5, "val5", 6, "val6")); err != nil {
- t.Fatal(err)
- }
- // this is used to test the case for iterator seek to max may cause seek to the next last data
- keyExpire := "default:test:myexpkey"
- keyExpire2 := "default:test:myexpkey2"
- c.Do("setex", keyExpire, 10, "v1")
- c.Do("setex", keyExpire2, 10, "v1")
-
- if n, err := goredis.Int(c.Do("zrank", key2, "val3")); err != nil {
- t.Fatal(err)
- } else if n != 3 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zrevrank", key2, "val3")); err != nil {
- t.Fatalf("cmd error: %v", err)
- } else if n != 3 {
- t.Fatal(n)
- }
-}
-
-func testZSetRange(ay []interface{}, checkValues ...interface{}) error {
- if len(ay) != len(checkValues) {
- return fmt.Errorf("invalid return number %d != %d", len(ay), len(checkValues))
- }
-
- for i := 0; i < len(ay); i++ {
- v, ok := ay[i].([]byte)
- if !ok {
- return fmt.Errorf("invalid data %d %v %T", i, ay[i], ay[i])
- }
-
- switch cv := checkValues[i].(type) {
- case string:
- if string(v) != cv {
- return fmt.Errorf("not equal %s != %s", v, checkValues[i])
- }
- default:
- if s, _ := strconv.Atoi(string(v)); s != checkValues[i] {
- return fmt.Errorf("not equal %s != %v", v, checkValues[i])
- }
- }
-
- }
-
- return nil
-}
-
-func TestZSetRangeScore(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:myzset_range"
- if _, err := goredis.Int(c.Do("zadd", key, 1, "a", 2, "b", 3, "c", 4, "d")); err != nil {
- t.Fatal(err)
- }
-
- if v, err := goredis.MultiBulk(c.Do("zrangebyscore", key, 1, 4, "withscores")); err != nil {
- t.Fatal(err)
- } else {
- if err := testZSetRange(v, "a", 1, "b", 2, "c", 3, "d", 4); err != nil {
- t.Fatal(err)
- }
- }
-
- if v, err := goredis.MultiBulk(c.Do("zrangebyscore", key, 1, 4, "withscores", "limit", 1, 2)); err != nil {
- t.Fatal(err)
- } else {
- if err := testZSetRange(v, "b", 2, "c", 3); err != nil {
- t.Fatal(err)
- }
- }
-
- if v, err := goredis.MultiBulk(c.Do("zrangebyscore", key, "-inf", "+inf", "withscores")); err != nil {
- t.Fatal(err)
- } else {
- if err := testZSetRange(v, "a", 1, "b", 2, "c", 3, "d", 4); err != nil {
- t.Fatal(err)
- }
- }
-
- if v, err := goredis.MultiBulk(c.Do("zrangebyscore", key, "(1", "(4")); err != nil {
- t.Fatal(err)
- } else {
- if err := testZSetRange(v, "b", "c"); err != nil {
- t.Fatal(err)
- }
- }
-
- if v, err := goredis.MultiBulk(c.Do("zrevrangebyscore", key, 4, 1, "withscores")); err != nil {
- t.Fatal(err)
- } else {
- if err := testZSetRange(v, "d", 4, "c", 3, "b", 2, "a", 1); err != nil {
- t.Fatalf("%v, %v", err, v)
- }
- }
-
- if v, err := goredis.MultiBulk(c.Do("zrevrangebyscore", key, 4, 1, "withscores", "limit", 1, 2)); err != nil {
- t.Fatal(err)
- } else {
- if err := testZSetRange(v, "c", 3, "b", 2); err != nil {
- t.Fatal(err)
- }
- }
-
- if v, err := goredis.MultiBulk(c.Do("zrevrangebyscore", key, "+inf", "-inf", "withscores")); err != nil {
- t.Fatal(err)
- } else {
- if err := testZSetRange(v, "d", 4, "c", 3, "b", 2, "a", 1); err != nil {
- t.Fatal(err)
- }
- }
-
- if v, err := goredis.MultiBulk(c.Do("zrevrangebyscore", key, "(4", "(1")); err != nil {
- t.Fatal(err)
- } else {
- if err := testZSetRange(v, "c", "b"); err != nil {
- t.Fatal(err)
- }
- }
-
- if n, err := goredis.Int(c.Do("zremrangebyscore", key, 2, 3)); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if v, err := goredis.MultiBulk(c.Do("zrangebyscore", key, 1, 4)); err != nil {
- t.Fatal(err)
- } else {
- if err := testZSetRange(v, "a", "d"); err != nil {
- t.Fatal(err)
- }
- }
-}
-
-func TestZSetRange(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:myzset_range_rank"
- if _, err := goredis.Int(c.Do("zadd", key, 1, "a", 2, "b", 3, "c", 4, "d")); err != nil {
- t.Fatal(err)
- }
-
- if v, err := goredis.MultiBulk(c.Do("zrange", key, 0, 3, "withscores")); err != nil {
- t.Fatal(err)
- } else {
- if err := testZSetRange(v, "a", 1, "b", 2, "c", 3, "d", 4); err != nil {
- t.Fatal(err)
- }
- }
-
- if v, err := goredis.MultiBulk(c.Do("zrange", key, 1, 4, "withscores")); err != nil {
- t.Fatal(err)
- } else {
- if err := testZSetRange(v, "b", 2, "c", 3, "d", 4); err != nil {
- t.Fatal(err)
- }
- }
-
- if v, err := goredis.MultiBulk(c.Do("zrange", key, -2, -1, "withscores")); err != nil {
- t.Fatal(err)
- } else {
- if err := testZSetRange(v, "c", 3, "d", 4); err != nil {
- t.Fatal(err)
- }
- }
-
- if v, err := goredis.MultiBulk(c.Do("zrange", key, 0, -1, "withscores")); err != nil {
- t.Fatal(err)
- } else {
- if err := testZSetRange(v, "a", 1, "b", 2, "c", 3, "d", 4); err != nil {
- t.Fatal(err)
- }
- }
-
- if v, err := goredis.MultiBulk(c.Do("zrange", key, -1, -2, "withscores")); err != nil {
- t.Fatal(err)
- } else if len(v) != 0 {
- t.Fatal(len(v))
- }
-
- if v, err := goredis.MultiBulk(c.Do("zrevrange", key, 0, 4, "withscores")); err != nil {
- t.Fatal(err)
- } else {
- if err := testZSetRange(v, "d", 4, "c", 3, "b", 2, "a", 1); err != nil {
- t.Fatal(err)
- }
- }
-
- if v, err := goredis.MultiBulk(c.Do("zrevrange", key, 0, -1, "withscores")); err != nil {
- t.Fatal(err)
- } else {
- if err := testZSetRange(v, "d", 4, "c", 3, "b", 2, "a", 1); err != nil {
- t.Fatal(err)
- }
- }
-
- if v, err := goredis.MultiBulk(c.Do("zrevrange", key, 2, 3, "withscores")); err != nil {
- t.Fatal(err)
- } else {
- if err := testZSetRange(v, "b", 2, "a", 1); err != nil {
- t.Fatal(err)
- }
- }
-
- if v, err := goredis.MultiBulk(c.Do("zrevrange", key, -2, -1, "withscores")); err != nil {
- t.Fatal(err)
- } else {
- if err := testZSetRange(v, "b", 2, "a", 1); err != nil {
- t.Fatal(err)
- }
- }
-
- if n, err := goredis.Int(c.Do("zremrangebyrank", key, 2, 3)); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if v, err := goredis.MultiBulk(c.Do("zrange", key, 0, 4)); err != nil {
- t.Fatal(err)
- } else {
- if err := testZSetRange(v, "a", "b"); err != nil {
- t.Fatal(err)
- }
- }
-
- if n, err := goredis.Int(c.Do("zclear", key)); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int(c.Do("zcard", key)); err != nil {
- t.Fatal(err)
- } else if n != 0 {
- t.Fatal(n)
- }
-
-}
-
-func TestZsetErrorParams(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:zset_error_param"
- //zadd
- if _, err := c.Do("zadd", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("zadd", key, "a", "b", "c"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("zadd", key, "-a", "a"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("zadd", key, "0.1a", "a"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- //zcard
- if _, err := c.Do("zcard"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- //zscore
- if _, err := c.Do("zscore", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- //zrem
- if _, err := c.Do("zrem", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- //zincrby
- if _, err := c.Do("zincrby", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- //zcount
- if _, err := c.Do("zcount", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("zcount", key, "-inf", "=inf"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- //zrank
- if _, err := c.Do("zrank", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- //zrevzrank
- if _, err := c.Do("zrevrank", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- //zremrangebyrank
- if _, err := c.Do("zremrangebyrank", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("zremrangebyrank", key, 0.1, 0.1); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- //zremrangebyscore
- if _, err := c.Do("zremrangebyscore", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("zremrangebyscore", key, "-inf", "a"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("zremrangebyscore", key, 0, "a"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- //zrange
- if _, err := c.Do("zrange", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("zrange", key, 0, 1, "withscore"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("zrange", key, 0, 1, "withscores", "a"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- //zrevrange, almost same as zrange
- if _, err := c.Do("zrevrange", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- //zrangebyscore
-
- if _, err := c.Do("zrangebyscore", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("zrangebyscore", key, 0, 1, "withscore"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("zrangebyscore", key, 0, 1, "withscores", "limit"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("zrangebyscore", key, 0, 1, "withscores", "limi", 1, 1); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("zrangebyscore", key, 0, 1, "withscores", "limit", "a", 1); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("zrangebyscore", key, 0, 1, "withscores", "limit", 1, "a"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- //zrevrangebyscore, almost same as zrangebyscore
- if _, err := c.Do("zrevrangebyscore", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- //zclear
- if _, err := c.Do("zclear"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- //zmclear
- if _, err := c.Do("zmclear"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-}
-
-func TestZSetLex(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:myzlexset"
- if _, err := c.Do("zadd", key,
- 0, "a", 0, "b", 0, "c", 0, "d", 0, "e", 0, "f", 0, "g"); err != nil {
- t.Fatal(err)
- }
-
- if ay, err := goredis.Strings(c.Do("zrangebylex", key, "-", "[c")); err != nil {
- t.Fatal(err)
- } else if !reflect.DeepEqual(ay, []string{"a", "b", "c"}) {
- t.Fatalf("must equal")
- }
-
- if ay, err := goredis.Strings(c.Do("zrangebylex", key, "-", "(c")); err != nil {
- t.Fatal(err)
- } else if !reflect.DeepEqual(ay, []string{"a", "b"}) {
- t.Fatalf("must equal")
- }
-
- if ay, err := goredis.Strings(c.Do("zrangebylex", key, "[aaa", "(g")); err != nil {
- t.Fatal(err)
- } else if !reflect.DeepEqual(ay, []string{"b", "c", "d", "e", "f"}) {
- t.Fatalf("must equal")
- }
-
- if n, err := goredis.Int64(c.Do("zlexcount", key, "-", "(c")); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int64(c.Do("zremrangebylex", key, "[aaa", "(g")); err != nil {
- t.Fatal(err)
- } else if n != 5 {
- t.Fatal(n)
- }
-
- if n, err := goredis.Int64(c.Do("zlexcount", key, "-", "+")); err != nil {
- t.Fatal(err)
- } else if n != 2 {
- t.Fatal(n)
- }
-}
-
-func checkScanValues(t *testing.T, ay interface{}, values ...interface{}) {
- a, err := goredis.Strings(ay, nil)
- if err != nil {
- t.Error(err)
- }
-
- if len(a) != len(values) {
- t.Error(fmt.Sprintf("len %d != %d", len(a), len(values)))
- }
- for i, v := range a {
- vv := fmt.Sprintf("%v", values[i])
- if string(v) != vv {
- if len(v) == len(vv)+8 {
- if string(v[:len(vv)]) != vv {
- t.Fatal(fmt.Sprintf("%d %s != %v", i, string(v), values[i]))
- }
- } else if len(v)+8 == len(vv) {
- if string(v) != vv[:len(v)] {
- t.Fatal(fmt.Sprintf("%d %s != %v", i, string(v), values[i]))
- }
- } else {
- t.Fatal(fmt.Sprintf("%d %s != %v", i, string(v), values[i]))
- }
- }
- }
-}
-
-func checkAdvanceScan(t *testing.T, c *goredis.PoolConn, tp string) {
- if ay, err := goredis.Values(c.Do("ADVSCAN", "default:testscan:"+"", tp, "count", 5)); err != nil {
- t.Error(err)
- } else if len(ay) != 2 {
- t.Fatal(len(ay))
- //} else if n := ay[0].([]byte); string(n) != "MDpkR1Z6ZEhOallXNDZOQT09Ow==" {
- } else if n := ay[0].([]byte); string(n) != "MDpOQT09Ow==" {
- t.Fatal(string(n))
- } else {
- checkScanValues(t, ay[1], "0", "1", "2", "3", "4")
- }
-
- if ay, err := goredis.Values(c.Do("ADVSCAN", "default:testscan:MDpOQT09Ow==", tp, "count", 6)); err != nil {
- t.Fatal(err)
- } else if len(ay) != 2 {
- t.Fatal(len(ay))
- } else if n := ay[0].([]byte); string(n) != "" {
- t.Fatal(string(n))
- } else {
- checkScanValues(t, ay[1], "5", "6", "7", "8", "9")
- }
-
- if ay, err := goredis.Values(c.Do("ADVSCAN", "default:testscan:MDpkR1Z6ZEhOallXNDZPUT09Ow==", tp, "count", 0)); err != nil {
- t.Fatal(err)
- } else if len(ay) != 2 {
- t.Fatal(len(ay))
- } else if n := ay[0].([]byte); string(n) != "" {
- t.Fatal(string(n))
- } else {
- if len(ay[1].([]interface{})) != 0 {
- t.Fatal(ay[1])
- }
- }
-}
-
-func TestScan(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- testKVScan(t, c)
- testHashKeyScan(t, c)
- testListKeyScan(t, c)
- testZSetKeyScan(t, c)
- testSetKeyScan(t, c)
-}
-
-func testKVScan(t *testing.T, c *goredis.PoolConn) {
- for i := 0; i < 10; i++ {
- if _, err := c.Do("set", "default:testscan:"+fmt.Sprintf("%d", i), []byte("value")); err != nil {
- t.Fatal(err)
- }
- }
- checkAdvanceScan(t, c, "KV")
-}
-
-func testHashKeyScan(t *testing.T, c *goredis.PoolConn) {
- for i := 0; i < 10; i++ {
- if _, err := c.Do("hset", "default:testscan:"+fmt.Sprintf("%d", i), fmt.Sprintf("%d", i), []byte("value")); err != nil {
- t.Fatal(err)
- }
- }
-
- checkAdvanceScan(t, c, "HASH")
-}
-
-func testListKeyScan(t *testing.T, c *goredis.PoolConn) {
- for i := 0; i < 10; i++ {
- if _, err := c.Do("lpush", "default:testscan:"+fmt.Sprintf("%d", i), fmt.Sprintf("%d", i)); err != nil {
- t.Fatal(err)
- }
- }
-
- checkAdvanceScan(t, c, "LIST")
-}
-
-func testZSetKeyScan(t *testing.T, c *goredis.PoolConn) {
- for i := 0; i < 10; i++ {
- if _, err := c.Do("zadd", "default:testscan:"+fmt.Sprintf("%d", i), i, []byte("value")); err != nil {
- t.Fatal(err)
- }
- }
-
- checkAdvanceScan(t, c, "ZSET")
-}
-
-func testSetKeyScan(t *testing.T, c *goredis.PoolConn) {
- for i := 0; i < 10; i++ {
- if _, err := c.Do("sadd", "default:testscan:"+fmt.Sprintf("%d", i), fmt.Sprintf("%d", i)); err != nil {
- t.Fatal(err)
- }
- }
-
- checkAdvanceScan(t, c, "SET")
-}
-
-func TestHashScan(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:testscan:scan_hash"
- c.Do("HMSET", key, "a", 1, "b", 2)
-
- if ay, err := goredis.Values(c.Do("HSCAN", key, "")); err != nil {
- t.Fatal(err)
- } else if len(ay) != 2 {
- t.Fatal(len(ay))
- } else {
- checkScanValues(t, ay[1], "a", 1, "b", 2)
- }
-}
-
-func TestSetScan(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:scan_set"
- c.Do("SADD", key, "a", "b")
-
- if ay, err := goredis.Values(c.Do("SSCAN", key, "")); err != nil {
- t.Fatal(err)
- } else if len(ay) != 2 {
- t.Fatal(len(ay))
- } else {
- checkScanValues(t, ay[1], "a", "b")
- }
-}
-
-func TestZSetScan(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:scan_zset"
- c.Do("ZADD", key, 1, "a", 2, "b")
-
- if ay, err := goredis.Values(c.Do("ZSCAN", key, "")); err != nil {
- t.Fatal(err)
- } else if len(ay) != 2 {
- t.Fatal(len(ay))
- } else {
- checkScanValues(t, ay[1], "a", 1, "b", 2)
- }
-}
-
-func TestJSON(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:jsonapi_a"
- n, err := goredis.Int(c.Do("json.keyexists", key))
- assert.Nil(t, err)
- assert.Equal(t, int(0), n)
-
- strRet, err := goredis.String(c.Do("json.set", key, ".a", `"str"`))
- assert.Nil(t, err)
- assert.Equal(t, "OK", strRet)
-
- n, err = goredis.Int(c.Do("json.keyexists", key))
- assert.Nil(t, err)
- assert.Equal(t, int(1), n)
-
- strRets, err := goredis.Strings(c.Do("json.get", key, ".a"))
- assert.Nil(t, err)
- assert.Equal(t, 1, len(strRets))
- assert.Equal(t, "str", strRets[0])
-
- typeStr, err := goredis.String(c.Do("json.type", key, ".a"))
- assert.Nil(t, err)
- assert.Equal(t, "string", typeStr)
-
- typeStr, err = goredis.String(c.Do("json.type", key, ""))
- assert.Nil(t, err)
- assert.Equal(t, "object", typeStr)
-
- strRet, err = goredis.String(c.Do("json.set", key, "1", "3"))
- assert.Nil(t, err)
- assert.Equal(t, "OK", strRet)
-
- strRets, err = goredis.Strings(c.Do("json.get", key, ""))
- assert.Nil(t, err)
- t.Log(strRets)
- assert.Equal(t, 1, len(strRets))
- assert.True(t, strRets[0] != "")
- t.Log(strRets[0])
- assert.True(t, strRets[0] == `{"a":"str","1":3}` || (strRets[0] == `{"1":3,"a":"str"}`))
-
- strRets, err = goredis.Strings(c.Do("json.get", key, "a"))
- assert.Nil(t, err)
- assert.Equal(t, 1, len(strRets))
- assert.Equal(t, "str", strRets[0])
-
- strRets, err = goredis.Strings(c.Do("json.get", key, "1"))
- assert.Nil(t, err)
- assert.Equal(t, 1, len(strRets))
- t.Log(strRets)
- assert.Equal(t, "3", strRets[0])
-
- strRets, err = goredis.Strings(c.Do("json.get", key, "1", "a"))
- assert.Nil(t, err)
- assert.Equal(t, 2, len(strRets))
- t.Log(strRets)
- assert.Equal(t, "3", strRets[0])
- assert.Equal(t, "str", strRets[1])
-
- n, err = goredis.Int(c.Do("json.objlen", key))
- assert.Nil(t, err)
- assert.Equal(t, 2, n)
- strRets, err = goredis.Strings(c.Do("json.objkeys", key))
- assert.Nil(t, err)
- assert.Equal(t, 2, len(strRets))
- for _, s := range strRets {
- assert.True(t, s == "a" || s == "1")
- }
- c.Do("json.del", key, "1")
- strRets, err = goredis.Strings(c.Do("json.get", key, "1"))
- assert.Nil(t, err)
- assert.Equal(t, 1, len(strRets))
- assert.Equal(t, "", strRets[0])
-
- typeStr, err = goredis.String(c.Do("json.type", key, "1"))
- assert.Nil(t, err)
- assert.Equal(t, "null", typeStr)
-
- n, err = goredis.Int(c.Do("json.objlen", key))
- assert.Nil(t, err)
- assert.Equal(t, 1, n)
- strRets, err = goredis.Strings(c.Do("json.objkeys", key))
- assert.Nil(t, err)
- assert.Equal(t, 1, len(strRets))
- for _, s := range strRets {
- assert.True(t, s == "a")
- }
-
- c.Do("json.del", key, "a")
- strRets, err = goredis.Strings(c.Do("json.get", key, ".a"))
- assert.Nil(t, err)
- assert.Equal(t, 1, len(strRets))
- assert.Equal(t, "", strRets[0])
-
- n, err = goredis.Int(c.Do("json.objlen", key))
- assert.Nil(t, err)
- assert.Equal(t, 0, n)
- strRets, err = goredis.Strings(c.Do("json.objkeys", key))
- assert.Nil(t, err)
- assert.Equal(t, 0, len(strRets))
-}
-
-func TestJSONInvalidJSON(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:jsonapi_invalid"
-
- strRet, err := goredis.String(c.Do("json.set", key, ".a", `"str"`))
- assert.Nil(t, err)
- assert.Equal(t, "OK", strRet)
-
- strRet, err = goredis.String(c.Do("json.set", key, "1", "3"))
- assert.Nil(t, err)
- assert.Equal(t, "OK", strRet)
-
- _, err = c.Do("json.set", key, "2", "invalid_str")
- assert.NotNil(t, err)
-}
-
-func TestJSONSetComplexJSON(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:jsonapi_complex"
-
- strRet, err := goredis.String(c.Do("json.set", key, "", `{
- "address": {
- "street": "2 Avenue",
- "zipcode": "10075",
- "building": "1480",
- "coord": [-73.9557413, 40.7720266]
- },
- "borough": "Manhattan",
- "cuisine": "Italian",
- "grades": [
- {
- "date": "2014-10-01",
- "grade": "A",
- "score": 11
- },
- {
- "date": "2014-01-16",
- "grade": "B",
- "score": 17
- }
- ],
- "name": "Vella",
- "restaurant_id": "41704620"
- }`))
-
- assert.Nil(t, err)
- assert.Equal(t, "OK", strRet)
- strRets, err := goredis.Strings(c.Do("json.get", key, "borough"))
- assert.Nil(t, err)
- assert.Equal(t, 1, len(strRets))
- assert.Equal(t, "Manhattan", strRets[0])
- strRets, err = goredis.Strings(c.Do("json.get", key, "address.zipcode"))
- assert.Nil(t, err)
- assert.Equal(t, 1, len(strRets))
- assert.Equal(t, "10075", strRets[0])
- strRets, err = goredis.Strings(c.Do("json.get", key, "grades.0.score"))
- assert.Nil(t, err)
- assert.Equal(t, 1, len(strRets))
- assert.Equal(t, "11", strRets[0])
- c.Do("json.set", key, "cuisine", `"American"`)
- c.Do("json.set", key, "address.street", `"East 31st Street"`)
- strRets, err = goredis.Strings(c.Do("json.get", key, "cuisine"))
- assert.Nil(t, err)
- assert.Equal(t, 1, len(strRets))
- assert.Equal(t, "American", strRets[0])
- strRets, err = goredis.Strings(c.Do("json.get", key, "address.street"))
- assert.Nil(t, err)
- assert.Equal(t, 1, len(strRets))
- assert.Equal(t, "East 31st Street", strRets[0])
-}
-
-func TestJSONArrayOp(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:json_arrayop_d"
- _, err := c.Do("json.set", key, "", `[1, 2]`)
- assert.Nil(t, err)
- n, err := goredis.Int(c.Do("json.arrappend", key, ".", `{"3":[]}`))
- assert.Nil(t, err)
- assert.Equal(t, 3, n)
-
- n, err = goredis.Int(c.Do("json.arrappend", key, ".", "4", "5"))
- assert.Nil(t, err)
- assert.Equal(t, 5, n)
-
- n, err = goredis.Int(c.Do("json.arrlen", key))
- assert.Nil(t, err)
- assert.Equal(t, 5, n)
-
- n, err = goredis.Int(c.Do("json.arrappend", key, "2.3", "33", "34"))
- assert.Nil(t, err)
- assert.Equal(t, 2, n)
-
- n, err = goredis.Int(c.Do("json.arrlen", key, "2.3"))
- assert.Nil(t, err)
- assert.Equal(t, 2, n)
-
- typeStr, err := goredis.String(c.Do("json.type", key, "2.3"))
- assert.Nil(t, err)
- assert.Equal(t, "array", typeStr)
-
- typeStr, err = goredis.String(c.Do("json.type", key))
- assert.Nil(t, err)
- assert.Equal(t, "array", typeStr)
-
- poped, err := goredis.String(c.Do("json.arrpop", key))
- assert.Nil(t, err)
- assert.Equal(t, "5", poped)
-
- poped, err = goredis.String(c.Do("json.arrpop", key))
- assert.Nil(t, err)
- assert.Equal(t, "4", poped)
-
- n, err = goredis.Int(c.Do("json.arrlen", key))
- assert.Nil(t, err)
- assert.Equal(t, 3, n)
-
- poped, err = goredis.String(c.Do("json.arrpop", key, "2.3"))
- assert.Nil(t, err)
- assert.Equal(t, "34", poped)
-
- n, err = goredis.Int(c.Do("json.arrlen", key, "2.3"))
- assert.Nil(t, err)
- assert.Equal(t, 1, n)
-
- poped, err = goredis.String(c.Do("json.arrpop", key))
- assert.Nil(t, err)
- assert.Equal(t, `{"3":[33]}`, poped)
-
- n, err = goredis.Int(c.Do("json.arrlen", key))
- assert.Nil(t, err)
- assert.Equal(t, 2, n)
-
- poped, err = goredis.String(c.Do("json.arrpop", key))
- assert.Nil(t, err)
- assert.Equal(t, "2", poped)
- poped, err = goredis.String(c.Do("json.arrpop", key))
- assert.Nil(t, err)
- assert.Equal(t, "1", poped)
-
- n, err = goredis.Int(c.Do("json.arrlen", key))
- assert.Nil(t, err)
- assert.Equal(t, 0, n)
-
- poped, err = goredis.String(c.Do("json.arrpop", key))
- assert.Nil(t, err)
- assert.Equal(t, "", poped)
-}
-
-func TestJSONErrorParams(t *testing.T) {
- c := getTestConn(t)
- defer c.Close()
-
- key := "default:test:json_err_param"
- if _, err := c.Do("json.set", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("json.get", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("json.del", key); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("json.arrylen"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("json.arrappend", key, "a"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("json.arrpop"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("json.objkeys"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-
- if _, err := c.Do("json.objlen"); err == nil {
- t.Fatalf("invalid err of %v", err)
- }
-}
diff --git a/server/scan_merge.go b/server/scan_merge.go
index c4c7fea6..06bcdd8a 100644
--- a/server/scan_merge.go
+++ b/server/scan_merge.go
@@ -4,6 +4,7 @@ import (
"bytes"
"encoding/base64"
"errors"
+ "fmt"
"reflect"
"strconv"
"strings"
@@ -11,9 +12,9 @@ import (
"sync/atomic"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/node"
"github.com/absolute8511/redcon"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/node"
)
var (
@@ -46,7 +47,7 @@ func (s *Server) doScanCommon(cmd redcon.Command) ([]interface{}, []byte, error)
var err error
rawKey := cmd.Args[1]
- _, rk, err := common.ExtractNamesapce(rawKey)
+ rk, err := common.CutNamesapce(rawKey)
if err != nil {
return nil, nil, err
}
@@ -72,8 +73,11 @@ func (s *Server) doScanCommon(cmd redcon.Command) ([]interface{}, []byte, error)
var wg sync.WaitGroup
var results []interface{}
- handlers, cmds, _, err := s.GetMergeHandlers(cmd)
+ hasWrite, handlers, cmds, _, err := s.GetMergeHandlers(cmd)
if err == nil {
+ if hasWrite && node.IsSyncerOnly() {
+ return nil, nil, fmt.Errorf("The cluster is only allowing syncer write")
+ }
length := len(handlers)
everyCount := count / length
results = make([]interface{}, length)
diff --git a/server/secondary_index_merge.go b/server/secondary_index_merge.go
index aa2f102f..603df038 100644
--- a/server/secondary_index_merge.go
+++ b/server/secondary_index_merge.go
@@ -4,8 +4,8 @@ import (
"bytes"
"strconv"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/node"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/node"
"github.com/absolute8511/redcon"
)
diff --git a/server/server.go b/server/server.go
index ddc542b5..75bae11b 100644
--- a/server/server.go
+++ b/server/server.go
@@ -2,6 +2,7 @@ package server
import (
"errors"
+ "fmt"
"io"
"net/http"
"net/url"
@@ -12,18 +13,24 @@ import (
"sync/atomic"
"time"
- "github.com/absolute8511/ZanRedisDB/rockredis"
-
- "github.com/absolute8511/ZanRedisDB/cluster"
- "github.com/absolute8511/ZanRedisDB/cluster/datanode_coord"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/node"
- "github.com/absolute8511/ZanRedisDB/pkg/types"
- "github.com/absolute8511/ZanRedisDB/raft"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
- "github.com/absolute8511/ZanRedisDB/stats"
- "github.com/absolute8511/ZanRedisDB/transport/rafthttp"
+ _ "net/http/pprof"
+
+ "github.com/youzan/ZanRedisDB/engine"
+ "github.com/youzan/ZanRedisDB/slow"
+
"github.com/absolute8511/redcon"
+ ps "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+ "github.com/youzan/ZanRedisDB/cluster"
+ "github.com/youzan/ZanRedisDB/cluster/datanode_coord"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/metric"
+ "github.com/youzan/ZanRedisDB/node"
+ "github.com/youzan/ZanRedisDB/pkg/types"
+ "github.com/youzan/ZanRedisDB/raft"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/stats"
+ "github.com/youzan/ZanRedisDB/transport/rafthttp"
"golang.org/x/net/context"
)
@@ -31,7 +38,12 @@ var (
errRaftGroupNotReady = errors.New("raft group not ready")
)
-var sLog = common.NewLevelLogger(common.LOG_INFO, common.NewDefaultLogger("server"))
+const (
+ slowClusterWriteLogTime = time.Millisecond * 500
+ slowPreWaitQueueTime = time.Second * 2
+)
+
+var sLog = common.NewLevelLogger(common.LOG_INFO, common.NewLogger())
func SetLogger(level int32, logger common.Logger) {
sLog.SetLevel(level)
@@ -42,6 +54,20 @@ func SLogger() *common.LevelLogger {
return sLog
}
+type writeQ struct {
+ q *entryQueue
+ stopC chan struct{}
+ readyC chan struct{}
+}
+
+func newWriteQ(len uint64) *writeQ {
+ return &writeQ{
+ q: newEntryQueue(len, 0),
+ stopC: make(chan struct{}),
+ readyC: make(chan struct{}, 1),
+ }
+}
+
type Server struct {
mutex sync.Mutex
conf ServerConfig
@@ -53,13 +79,13 @@ type Server struct {
nsMgr *node.NamespaceMgr
startTime time.Time
maxScanJob int32
- scanStats common.ScanStats
+ scanStats metric.ScanStats
}
-func NewServer(conf ServerConfig) *Server {
+func NewServer(conf ServerConfig) (*Server, error) {
hname, err := os.Hostname()
if err != nil {
- sLog.Fatal(err)
+ return nil, err
}
if conf.TickMs < 100 {
conf.TickMs = 100
@@ -73,13 +99,18 @@ func NewServer(conf ServerConfig) *Server {
if conf.ProfilePort == 0 {
conf.ProfilePort = 7666
}
+ if conf.DefaultSnapCount > 0 {
+ common.DefaultSnapCount = conf.DefaultSnapCount
+ }
+ if conf.DefaultSnapCatchup > 0 {
+ common.DefaultSnapCatchup = conf.DefaultSnapCatchup
+ }
- if conf.SyncerWriteOnly {
- node.SetSyncerOnly(true)
+ if conf.UseRedisV2 {
+ node.UseRedisV2 = true
}
- if conf.LearnerRole != "" && conf.SyncerNormalInit {
- sLog.Infof("server started as normal init")
- node.SetSyncerNormalInit()
+ if conf.SlowLimiterRefuseCostMs > 0 {
+ node.ChangeSlowRefuseCost(conf.SlowLimiterRefuseCostMs)
}
myNode := &cluster.NodeInfo{
@@ -99,18 +130,23 @@ func NewServer(conf ServerConfig) *Server {
}
if conf.ClusterID == "" {
- sLog.Fatalf("cluster id can not be empty")
+ return nil, errors.New("cluster id can not be empty")
}
if conf.BroadcastInterface != "" {
myNode.NodeIP = common.GetIPv4ForInterfaceName(conf.BroadcastInterface)
}
+ if conf.RsyncLimit > 0 {
+ common.SetRsyncLimit(conf.RsyncLimit)
+ }
if myNode.NodeIP == "" {
myNode.NodeIP = conf.BroadcastAddr
} else {
conf.BroadcastAddr = myNode.NodeIP
}
if myNode.NodeIP == "0.0.0.0" || myNode.NodeIP == "" {
- sLog.Fatalf("can not decide the broadcast ip: %v", myNode.NodeIP)
+ err := fmt.Errorf("can not decide the broadcast ip: %v , %v", myNode.NodeIP, conf.BroadcastInterface)
+ sLog.Errorf(err.Error())
+ return nil, err
}
conf.LocalRaftAddr = strings.Replace(conf.LocalRaftAddr, "0.0.0.0", myNode.NodeIP, 1)
myNode.RaftTransportAddr = conf.LocalRaftAddr
@@ -118,6 +154,7 @@ func NewServer(conf ServerConfig) *Server {
myNode.Tags[k] = tag
}
os.MkdirAll(conf.DataDir, common.DIR_PERM)
+ slow.SetRemoteLogger(conf.RemoteLogAddr)
s := &Server{
conf: conf,
@@ -143,23 +180,44 @@ func NewServer(conf ServerConfig) *Server {
DataRootDir: conf.DataDir,
TickMs: conf.TickMs,
ElectionTick: conf.ElectionTick,
+ KeepBackup: conf.KeepBackup,
+ KeepWAL: conf.KeepWAL,
+ UseRocksWAL: conf.UseRocksWAL,
+ SharedRocksWAL: conf.SharedRocksWAL,
LearnerRole: conf.LearnerRole,
RemoteSyncCluster: conf.RemoteSyncCluster,
StateMachineType: conf.StateMachineType,
RocksDBOpts: conf.RocksDBOpts,
+ WALRocksDBOpts: conf.WALRocksDBOpts,
}
if mconf.RocksDBOpts.UseSharedCache || mconf.RocksDBOpts.AdjustThreadPool || mconf.RocksDBOpts.UseSharedRateLimiter {
- sc := rockredis.NewSharedRockConfig(conf.RocksDBOpts)
+ sc, err := engine.NewSharedEngConfig(conf.RocksDBOpts)
+ if err != nil {
+ return nil, err
+ }
mconf.RocksDBSharedConfig = sc
}
+
+ if mconf.UseRocksWAL {
+ if mconf.WALRocksDBOpts.UseSharedCache || mconf.WALRocksDBOpts.AdjustThreadPool || mconf.WALRocksDBOpts.UseSharedRateLimiter {
+ sc, err := engine.NewSharedEngConfig(conf.WALRocksDBOpts)
+ if err != nil {
+ return nil, err
+ }
+ mconf.WALRocksDBSharedConfig = sc
+ }
+ }
s.nsMgr = node.NewNamespaceMgr(s.raftTransport, mconf)
myNode.RegID = mconf.NodeID
if conf.EtcdClusterAddresses != "" {
- r := cluster.NewDNEtcdRegister(conf.EtcdClusterAddresses)
+ r, err := cluster.NewDNEtcdRegister(conf.EtcdClusterAddresses)
+ if err != nil {
+ return nil, err
+ }
s.dataCoord = datanode_coord.NewDataCoordinator(conf.ClusterID, myNode, s.nsMgr)
if err := s.dataCoord.SetRegister(r); err != nil {
- sLog.Fatalf("failed to init register for coordinator: %v", err)
+ return nil, err
}
s.raftTransport.ID = types.ID(s.dataCoord.GetMyRegID())
s.nsMgr.SetIClusterInfo(s.dataCoord)
@@ -167,11 +225,94 @@ func NewServer(conf ServerConfig) *Server {
s.raftTransport.ID = types.ID(myNode.RegID)
}
- return s
+ metricAddr := conf.MetricAddr
+ if metricAddr == "" {
+ metricAddr = ":8800"
+ }
+ go func() {
+ mux := http.NewServeMux()
+ mux.Handle("/metrics", promhttp.Handler())
+ http.ListenAndServe(metricAddr, mux)
+ }()
+
+ return s, nil
+}
+
+func (s *Server) getOrInitSyncerWriteOnly() error {
+ if s.conf.SyncerWriteOnly {
+ node.SetSyncerOnly(true)
+ }
+ initValue := node.IsSyncerOnly()
+ if s.dataCoord == nil {
+ return nil
+ }
+ // if etcd key is exist, then use the value in etcd. If not, we set it to the initValue and update to etcd
+ origV, err := s.dataCoord.GetSyncerWriteOnly()
+ if err == nil {
+ node.SetSyncerOnly(origV)
+ return nil
+ }
+ if err != cluster.ErrKeyNotFound {
+ return err
+ }
+ err = s.updateSyncerWriteOnlyToRegister(initValue)
+ if err != nil {
+ return err
+ }
+ node.SetSyncerOnly(initValue)
+ return nil
+}
+
+func (s *Server) updateSyncerWriteOnlyToRegister(isSyncerWriteOnly bool) error {
+ if s.dataCoord == nil {
+ return nil
+ }
+ return s.dataCoord.UpdateSyncerWriteOnly(isSyncerWriteOnly)
+}
+
+func (s *Server) getOrInitSyncerNormalInit() error {
+ if s.conf.LearnerRole != "" && s.conf.SyncerNormalInit {
+ sLog.Infof("server started as normal init")
+ node.SetSyncerNormalInit(true)
+ }
+ initValue := node.IsSyncerNormalInit()
+ if s.dataCoord == nil {
+ return nil
+ }
+ if s.conf.LearnerRole == "" {
+ // non-syncer node no need write state to etcd
+ return nil
+ }
+ // if etcd key is exist, then use the value in etcd. If not, we set it to the initValue and update to etcd
+ origV, err := s.dataCoord.GetSyncerNormalInit()
+ if err == nil {
+ sLog.Infof("server started normal init state is: %v", origV)
+ node.SetSyncerNormalInit(origV)
+ return nil
+ }
+ if err != cluster.ErrKeyNotFound {
+ return err
+ }
+ err = s.updateSyncerNormalInitToRegister(initValue)
+ if err != nil {
+ return err
+ }
+ node.SetSyncerNormalInit(initValue)
+ return nil
+}
+
+func (s *Server) updateSyncerNormalInitToRegister(v bool) error {
+ if s.dataCoord == nil {
+ return nil
+ }
+ return s.dataCoord.UpdateSyncerNormalInit(v)
}
func (s *Server) Stop() {
sLog.Infof("server begin stopping")
+ s.nsMgr.BackupDB("", true)
+ // wait backup done
+ time.Sleep(time.Second * 3)
if s.dataCoord != nil {
s.dataCoord.Stop()
} else {
@@ -184,6 +325,7 @@ func (s *Server) Stop() {
default:
}
close(s.stopC)
+
s.raftTransport.Stop()
s.wg.Wait()
sLog.Infof("server stopped")
@@ -200,17 +342,43 @@ func (s *Server) GetNamespaceFromFullName(ns string) *node.NamespaceNode {
return s.nsMgr.GetNamespaceNode(ns)
}
-func (s *Server) GetLogSyncStatsInSyncLearner() ([]common.LogSyncStats, []common.LogSyncStats) {
+func (s *Server) GetLogSyncStatsInSyncLearner() ([]metric.LogSyncStats, []metric.LogSyncStats) {
return s.nsMgr.GetLogSyncStatsInSyncer()
}
-func (s *Server) GetLogSyncStats(leaderOnly bool, srcClusterName string) []common.LogSyncStats {
+func (s *Server) GetLogSyncStats(leaderOnly bool, srcClusterName string) []metric.LogSyncStats {
return s.nsMgr.GetLogSyncStats(leaderOnly, srcClusterName)
}
-func (s *Server) GetStats(leaderOnly bool) common.ServerStats {
- var ss common.ServerStats
- ss.NSStats = s.nsMgr.GetStats(leaderOnly)
+func (s *Server) GetTableStats(leaderOnly bool, table string) map[string]metric.TableStats {
+ var ss metric.ServerStats
+ ss.NSStats = s.nsMgr.GetStats(leaderOnly, table, true)
+ allTbs := make(map[string]metric.TableStats)
+ for _, s := range ss.NSStats {
+ ns, _ := common.GetNamespaceAndPartition(s.Name)
+ var tbs metric.TableStats
+ tbs.Name = table
+ if t, ok := allTbs[ns]; ok {
+ tbs = t
+ }
+ for _, ts := range s.TStats {
+ if ts.Name != table {
+ continue
+ }
+ tbs.KeyNum += ts.KeyNum
+ tbs.DiskBytesUsage += ts.DiskBytesUsage
+ tbs.ApproximateKeyNum += ts.ApproximateKeyNum
+ }
+ if tbs.KeyNum > 0 || tbs.DiskBytesUsage > 0 || tbs.ApproximateKeyNum > 0 {
+ allTbs[ns] = tbs
+ }
+ }
+ return allTbs
+}
+
+func (s *Server) GetStats(leaderOnly bool, tableDetail bool) metric.ServerStats {
+ var ss metric.ServerStats
+ ss.NSStats = s.nsMgr.GetStats(leaderOnly, "", tableDetail)
ss.ScanStats = s.scanStats.Copy()
return ss
}
@@ -219,8 +387,8 @@ func (s *Server) GetDBStats(leaderOnly bool) map[string]string {
return s.nsMgr.GetDBStats(leaderOnly)
}
-func (s *Server) OptimizeDB(ns string, table string) {
- s.nsMgr.OptimizeDB(ns, table)
+func (s *Server) GetWALDBStats(leaderOnly bool) map[string]map[string]interface{} {
+ return s.nsMgr.GetWALDBStats(leaderOnly)
}
func (s *Server) DeleteRange(ns string, dtr node.DeleteTableRange) error {
@@ -239,13 +407,29 @@ func (s *Server) RestartAsStandalone(fullNamespace string) error {
}
func (s *Server) Start() {
+ err := s.getOrInitSyncerWriteOnly()
+ if err != nil {
+ sLog.Panicf("failed to init syncer write only state: %v", err.Error())
+ }
+ err = s.getOrInitSyncerNormalInit()
+ if err != nil {
+ sLog.Panicf("failed to init syncer normal init state: %v", err.Error())
+ }
+
s.raftTransport.Start()
s.stopC = make(chan struct{})
+
s.wg.Add(1)
go func() {
defer s.wg.Done()
s.serveRaft(s.stopC)
}()
+
+ if s.conf.ProfilePort >= 0 {
+ //http.DefaultServeMux.Handle("/debug/fgprof", fgprof.Handler())
+ go http.ListenAndServe(":"+strconv.Itoa(s.conf.ProfilePort), nil)
+ }
+
s.wg.Add(1)
// redis api enable first, because there are many partitions, some partitions may recover first
// and become leader. In this way we need redis api enabled to allow r/w these partitions.
@@ -258,61 +442,96 @@ func (s *Server) Start() {
defer s.wg.Done()
s.serveGRPCAPI(s.conf.GrpcAPIPort, s.stopC)
}()
-
+ // start http api first to allow manager before all started
+ s.wg.Add(1)
+ go func() {
+ defer s.wg.Done()
+ s.serveHttpAPI(s.conf.HttpAPIPort, s.stopC)
+ }()
+ s.wg.Add(1)
+ go func() {
+ defer s.wg.Done()
+ s.metricLoop(s.stopC)
+ }()
if s.dataCoord != nil {
err := s.dataCoord.Start()
if err != nil {
- sLog.Fatalf("data coordinator start failed: %v", err)
+ sLog.Panicf("data coordinator start failed: %v", err)
}
} else {
s.nsMgr.Start()
}
-
- s.wg.Add(1)
- go func() {
- defer s.wg.Done()
- s.serveHttpAPI(s.conf.HttpAPIPort, s.stopC)
- }()
}
-func (s *Server) GetHandler(cmdName string, cmd redcon.Command) (bool, common.CommandFunc, redcon.Command, error) {
+func GetPKAndHashSum(cmdName string, cmd redcon.Command) (string, []byte, int, error) {
if len(cmd.Args) < 2 {
- return false, nil, cmd, common.ErrInvalidArgs
+ return "", nil, 0, common.ErrInvalidArgs
}
rawKey := cmd.Args[1]
namespace, pk, err := common.ExtractNamesapce(rawKey)
if err != nil {
- sLog.Infof("failed to get the namespace of the redis command:%v", string(rawKey))
- return false, nil, cmd, err
+ return namespace, nil, 0, err
+ }
+ pkSum := node.HashedKey(pk)
+ return namespace, pk, pkSum, nil
+}
+
+func (s *Server) GetHandleNode(ns string, pk []byte, pkSum int, cmdName string,
+ cmd redcon.Command) (*node.KVNode, error) {
+ if len(cmd.Args) < 2 {
+ return nil, common.ErrInvalidArgs
}
// we need decide the partition id from the primary key
// if the command need cross multi partitions, we need handle separate
- n, err := s.nsMgr.GetNamespaceNodeWithPrimaryKey(namespace, pk)
+ n, err := s.nsMgr.GetNamespaceNodeWithPrimaryKeySum(ns, pk, pkSum)
if err != nil {
- return false, nil, cmd, err
+ return nil, err
+ }
+ if n.Node.IsStopping() {
+ return nil, common.ErrStopped
+ }
+ return n.Node, nil
+}
+
+func isAllowStaleReadCmd(cmdName string) bool {
+ if strings.HasPrefix(cmdName, "stale.") {
+ return true
}
- // TODO: for multi primary keys such as mset, mget, we need make sure they are all in the same partition
- h, isWrite, ok := n.Node.GetHandler(cmdName)
+ return false
+}
+
+func (s *Server) GetHandler(cmdName string,
+ cmd redcon.Command, kvn *node.KVNode) (common.CommandFunc, redcon.Command, error) {
+ // for multi primary keys such as mset, mget, we need make sure they are all in the same partition
+ h, ok := kvn.GetHandler(cmdName)
if !ok {
- return isWrite, nil, cmd, common.ErrInvalidCommand
+ return nil, cmd, common.ErrInvalidCommand
}
- if !isWrite && !n.Node.IsLead() && (atomic.LoadInt32(&allowStaleRead) == 0) {
+ if !kvn.IsLead() && (atomic.LoadInt32(&allowStaleRead) == 0) && !isAllowStaleReadCmd(cmdName) {
// read only to leader to avoid stale read
- // TODO: also read command can request the raft read index if not leader
- return isWrite, nil, cmd, node.ErrNamespaceNotLeader
+ return nil, cmd, node.ErrNamespaceNotLeader
+ }
+ return h, cmd, nil
+}
+
+func (s *Server) GetWriteHandler(cmdName string,
+ cmd redcon.Command, kvn *node.KVNode) (common.WriteCommandFunc, redcon.Command, error) {
+ h, ok := kvn.GetWriteHandler(cmdName)
+ if !ok {
+ return nil, cmd, common.ErrInvalidCommand
}
- return isWrite, h, cmd, nil
+ return h, cmd, nil
}
func (s *Server) serveRaft(stopCh <-chan struct{}) {
url, err := url.Parse(s.conf.LocalRaftAddr)
if err != nil {
- sLog.Fatalf("failed parsing raft url: %v", err)
+ sLog.Panicf("failed parsing raft url: %v", err)
}
ln, err := common.NewStoppableListener(url.Host, stopCh)
if err != nil {
- sLog.Fatalf("failed to listen rafthttp : %v", err)
+ sLog.Panicf("failed to listen rafthttp : %v", err)
}
err = (&http.Server{Handler: s.raftTransport.Handler()}).Serve(ln)
select {
@@ -326,8 +545,35 @@ func (s *Server) serveRaft(stopCh <-chan struct{}) {
// implement the Raft interface for transport
func (s *Server) Process(ctx context.Context, m raftpb.Message) error {
if m.Type == raftpb.MsgVoteResp {
- sLog.Infof("got message from raft transport %v ", m.String())
+ sLog.Debugf("got vote resp message from raft transport %v ", m.String())
+ }
+
+ if len(m.Entries) > 0 {
+ // we debug the slow raft log transfer
+ level := atomic.LoadInt32(&costStatsLevel)
+ evnt := m.Entries[0]
+ if evnt.Data != nil && level > 2 {
+ var reqList node.BatchInternalRaftRequest
+ err := reqList.Unmarshal(evnt.Data)
+ if err == nil && reqList.Timestamp > 0 {
+ n := time.Now().UnixNano()
+ rt := n - reqList.Timestamp
+ if rt > int64(time.Millisecond*100) {
+ sLog.Warningf("receive raft request slow cost: %v, src: %v", rt, reqList.ReqNum)
+ if len(m.Entries) > 1 || len(reqList.Reqs) > 1 {
+ oldest := reqList.Reqs[0].Header.Timestamp
+ newest := m.Entries[len(m.Entries)-1]
+ reqList.Unmarshal(newest.Data)
+ if len(reqList.Reqs) > 0 {
+ diff := reqList.Reqs[len(reqList.Reqs)-1].Header.Timestamp - oldest
+ sLog.Infof("recieve raft request slow, max time diff: %v", diff)
+ }
+ }
+ }
+ }
+ }
}
+
kv := s.nsMgr.GetNamespaceNodeFromGID(m.ToGroup.GroupId)
if kv == nil {
sLog.Errorf("from %v, to %v(%v), kv namespace not found while processing %v, %v, %v ",
@@ -381,3 +627,148 @@ func (s *Server) SaveDBFrom(r io.Reader, msg raftpb.Message) (int64, error) {
return kv.Node.SaveDBFrom(r, msg)
}
+
+func (s *Server) GetNsMgr() *node.NamespaceMgr {
+ return s.nsMgr
+}
+
+func (s *Server) handleRedisSingleCmd(cmdName string, pk []byte, pkSum int, kvn *node.KVNode, conn redcon.Conn, cmd redcon.Command) error {
+ isWrite := false
+ var h common.CommandFunc
+ var wh common.WriteCommandFunc
+ h, cmd, err := s.GetHandler(cmdName, cmd, kvn)
+ if err == common.ErrInvalidCommand {
+ wh, cmd, err = s.GetWriteHandler(cmdName, cmd, kvn)
+ isWrite = (err == nil)
+ }
+ if err != nil {
+ return fmt.Errorf("%s : Err handle command %s", err.Error(), cmdName)
+ }
+
+ if isWrite && node.IsSyncerOnly() {
+ return fmt.Errorf("The cluster is only allowing syncer write : ERR handle command %s ", cmdName)
+ }
+ if isWrite {
+ s.handleRedisWrite(cmdName, kvn, pk, pkSum, wh, conn, cmd)
+ } else {
+ metric.ReadCmdCounter.Inc()
+ h(conn, cmd)
+ }
+ return nil
+}
+
+func (s *Server) handleRedisWrite(cmdName string, kvn *node.KVNode,
+ pk []byte, pkSum int, h common.WriteCommandFunc, conn redcon.Conn, cmd redcon.Command) {
+ start := time.Now()
+ table, _, _ := common.ExtractTable(pk)
+ // we check if we need slow down proposal if the state machine apply can not catchup with
+ // the raft logs.
+ // only refuse on leader since the follower may fall behind while appling, so it is possible
+ // the apply buffer is full.
+ if kvn.IsLead() && !kvn.CanPass(start.UnixNano(), cmdName, string(table)) {
+ conn.WriteError(node.ErrSlowLimiterRefused.Error())
+ return
+ }
+ var sw *node.SlowWaitDone
+ if kvn.IsLead() {
+ var err error
+ ctx, cancel := context.WithTimeout(context.Background(), slowPreWaitQueueTime)
+ sw, err = kvn.PreWaitQueue(ctx, cmdName, string(table))
+ cancel()
+ if err != nil {
+ conn.WriteError(err.Error())
+ return
+ }
+ }
+
+ rsp, err := h(cmd)
+ cost1 := time.Since(start)
+ var v interface{}
+ v = rsp
+ if err != nil {
+ v = err
+ } else {
+ futureRsp, ok := rsp.(*node.FutureRsp)
+ if ok {
+ // wait and get async response
+ var frsp interface{}
+ frsp, err = futureRsp.WaitRsp()
+ if err != nil {
+ v = err
+ } else {
+ v = frsp
+ }
+ } else {
+ // the command is in sync mode
+ }
+ }
+ cost2 := time.Since(start)
+ kvn.UpdateWriteStats(int64(len(cmd.Raw)), cost2.Microseconds())
+ slow.LogSlowForSteps(
+ slowClusterWriteLogTime,
+ common.LOG_INFO,
+ slow.NewSlowLogInfo(kvn.GetFullName(), string(pk), "write request "+cmdName),
+ cost1,
+ cost2,
+ )
+ kvn.MaybeAddSlow(start.Add(cost2).UnixNano(), cost2, cmdName, string(table))
+ if err == nil && !kvn.IsWriteReady() {
+ sLog.Infof("write request %s on raft success but raft member is less than replicator",
+ cmd.Raw)
+ }
+
+ if sw != nil {
+ sw.Done()
+ }
+ switch rv := v.(type) {
+ case error:
+ conn.WriteError(rv.Error())
+ case string:
+ // note the simple string should use WriteString, but the binary string should use
+ // WriteBulk or WriteBulkString
+ conn.WriteString(rv)
+ case int64:
+ conn.WriteInt64(rv)
+ case int:
+ conn.WriteInt64(int64(rv))
+ case nil:
+ conn.WriteNull()
+ case []byte:
+ conn.WriteBulk(rv)
+ case [][]byte:
+ conn.WriteArray(len(rv))
+ for _, d := range rv {
+ conn.WriteBulk(d)
+ }
+ default:
+ // Do we have any other resp arrays for write command which is not [][]byte?
+ conn.WriteError("Invalid response type")
+ }
+}
+
+func (s *Server) metricLoop(stopC chan struct{}) {
+ ticker := time.NewTicker(time.Second * 10)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-stopC:
+ return
+ case <-ticker.C:
+ stats := s.nsMgr.GetStats(true, "", true)
+ for _, stat := range stats {
+ ns := stat.Name
+ for _, ts := range stat.TStats {
+ metric.TableKeyNum.With(ps.Labels{
+ "table": ts.Name,
+ "group": ns,
+ }).Set(float64(ts.KeyNum))
+
+ metric.TableDiskUsage.With(ps.Labels{
+ "table": ts.Name,
+ "group": ns,
+ }).Set(float64(ts.DiskBytesUsage))
+ }
+ }
+ }
+ }
+}
diff --git a/server/server_test.go b/server/server_test.go
index 4304b438..8d81b185 100644
--- a/server/server_test.go
+++ b/server/server_test.go
@@ -1,25 +1,42 @@
package server
import (
+ "errors"
+ "flag"
"fmt"
"io/ioutil"
"os"
"path"
+ "path/filepath"
"strconv"
"strings"
"sync"
"testing"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/node"
- "github.com/absolute8511/ZanRedisDB/rockredis"
+ "github.com/absolute8511/glog"
"github.com/siddontang/goredis"
"github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/engine"
+ "github.com/youzan/ZanRedisDB/node"
+ "github.com/youzan/ZanRedisDB/pkg/fileutil"
+ "github.com/youzan/ZanRedisDB/raft"
+ "github.com/youzan/ZanRedisDB/rockredis"
+ "github.com/youzan/ZanRedisDB/slow"
+ "github.com/youzan/ZanRedisDB/transport/rafthttp"
+ "github.com/youzan/ZanRedisDB/wal"
)
var clusterName = "cluster-unittest-server"
+const (
+ //testEngineType = "rocksdb"
+ testEngineType = "mem"
+ testUseRocksWAL = true
+ testSharedRocksWAL = true
+)
+
type fakeClusterInfo struct {
clusterName string
snapSyncs []common.SnapshotSyncInfo
@@ -49,27 +66,34 @@ var kvsCluster []testClusterInfo
var learnerServers []*Server
var gtmpClusterDir string
var seedNodes []node.ReplicaInfo
-var remoteClusterBasePort = 51845
+var remoteClusterBasePort = 22345
+var fullscanTestPortBase = 52345
+var mergeTestPortBase = 42345
+var redisAPITestPortBase = 12345
+var localClusterTestPortBase = 24345
+var tmpUsedClusterPortBase = 23345
//
var testSnap = 10
var testSnapCatchup = 3
func TestMain(m *testing.M) {
- //SetLogger(int32(common.LOG_INFO), newTestLogger(t))
- //rockredis.SetLogger(int32(common.LOG_INFO), newTestLogger(t))
- //node.SetLogger(int32(common.LOG_INFO), newTestLogger(t))
node.EnableForTest()
+ node.EnableSlowLimiterTest(true)
+
+ flag.Parse()
if testing.Verbose() {
- rockredis.SetLogLevel(int32(common.LOG_DETAIL))
- node.SetLogLevel(int(common.LOG_DETAIL))
- sLog.SetLevel(int32(common.LOG_DETAIL))
+ SetLogger(int32(common.LOG_DETAIL), common.NewLogger())
+ rockredis.SetLogger(int32(common.LOG_DEBUG), common.NewLogger())
+ slow.SetLogger(int32(common.LOG_DEBUG), common.NewLogger())
+ node.SetLogger(int32(common.LOG_DEBUG), common.NewLogger())
+ engine.SetLogger(int32(common.LOG_DEBUG), common.NewLogger())
}
ret := m.Run()
- if kvs != nil {
- kvs.Stop()
+ if gkvs != nil {
+ gkvs.Stop()
}
if kvsMerge != nil {
kvsMerge.Stop()
@@ -98,18 +122,30 @@ func TestMain(m *testing.M) {
os.RemoveAll(gtmpDir)
}
}
+ glog.Flush()
os.Exit(ret)
}
func startTestCluster(t *testing.T, replicaNum int, syncLearnerNum int) ([]testClusterInfo, []node.ReplicaInfo, []*Server, string) {
- rport := 52845
- return startTestClusterWithBasePort(t, rport, replicaNum, syncLearnerNum)
+ rport := localClusterTestPortBase
+ kvs, ns, servers, path, err := startTestClusterWithBasePort("unit-test-localcluster", rport, replicaNum, syncLearnerNum, false)
+ if err != nil {
+ t.Fatalf("init cluster failed: %v", err)
+ }
+ return kvs, ns, servers, path
}
-func startTestClusterWithBasePort(t *testing.T, portBase int, replicaNum int, syncLearnerNum int) ([]testClusterInfo, []node.ReplicaInfo, []*Server, string) {
+func startTestClusterWithEmptySM(replicaNum int) ([]testClusterInfo, []node.ReplicaInfo, []*Server, string, error) {
+ rport := tmpUsedClusterPortBase
+ return startTestClusterWithBasePort("unit-test-emptysm", rport, replicaNum, 0, true)
+}
+
+func startTestClusterWithBasePort(cid string, portBase int, replicaNum int,
+ syncLearnerNum int, useEmptySM bool) ([]testClusterInfo, []node.ReplicaInfo, []*Server, string, error) {
ctmpDir, err := ioutil.TempDir("", fmt.Sprintf("rocksdb-test-%d", time.Now().UnixNano()))
- assert.Nil(t, err)
- t.Logf("dir:%v\n", ctmpDir)
+ if err != nil {
+ return nil, nil, nil, "", err
+ }
kvsClusterTmp := make([]testClusterInfo, 0, replicaNum)
learnerServersTmp := make([]*Server, 0, syncLearnerNum)
rport := portBase
@@ -142,27 +178,35 @@ func startTestClusterWithBasePort(t *testing.T, portBase int, replicaNum int, sy
common.FILE_PERM)
raftAddr := "http://127.0.0.1:" + strconv.Itoa(raftPort+index)
redisport := rport + index
- grpcPort := rport - 110 + index
+ grpcPort := rport - 310 + index
httpPort := rport - 210 + index
var replica node.ReplicaInfo
replica.NodeID = uint64(1 + index)
replica.ReplicaID = uint64(1 + index)
replica.RaftAddr = raftAddr
kvOpts := ServerConfig{
- ClusterID: "unit-test-cluster",
- DataDir: tmpDir,
- RedisAPIPort: redisport,
- GrpcAPIPort: grpcPort,
- HttpAPIPort: httpPort,
- LocalRaftAddr: raftAddr,
- BroadcastAddr: "127.0.0.1",
- TickMs: 20,
- ElectionTick: 20,
+ ClusterID: cid,
+ DataDir: tmpDir,
+ RedisAPIPort: redisport,
+ GrpcAPIPort: grpcPort,
+ HttpAPIPort: httpPort,
+ LocalRaftAddr: raftAddr,
+ BroadcastAddr: "127.0.0.1",
+ TickMs: 20,
+ ElectionTick: 20,
+ UseRocksWAL: testUseRocksWAL,
+ SharedRocksWAL: testSharedRocksWAL,
}
+ kvOpts.RocksDBOpts.EnablePartitionedIndexFilter = true
+ kvOpts.RocksDBOpts.EngineType = testEngineType
+
if index >= replicaNum {
kvOpts.LearnerRole = common.LearnerRoleLogSyncer
// use test:// will ignore the remote cluster fail
- kvOpts.RemoteSyncCluster = "test://127.0.0.1:" + strconv.Itoa(remoteClusterBasePort-110)
+ kvOpts.RemoteSyncCluster = "test://127.0.0.1:" + strconv.Itoa(remoteClusterBasePort-310)
+ }
+ if useEmptySM {
+ kvOpts.StateMachineType = "empty_sm"
}
nsConf := node.NewNSConfig()
@@ -175,11 +219,12 @@ func startTestClusterWithBasePort(t *testing.T, portBase int, replicaNum int, sy
nsConf.Replicator = replicaNum
nsConf.RaftGroupConf.GroupID = 1000
nsConf.RaftGroupConf.SeedNodes = tmpSeeds
- nsConf.ExpirationPolicy = "consistency_deletion"
- kv := NewServer(kvOpts)
+ nsConf.ExpirationPolicy = common.WaitCompactExpirationPolicy
+ nsConf.DataVersion = common.ValueHeaderV1Str
+ kv, _ := NewServer(kvOpts)
kv.nsMgr.SetIClusterInfo(fakeCI)
if _, err := kv.InitKVNamespace(replica.ReplicaID, nsConf, false); err != nil {
- t.Fatalf("failed to init namespace: %v", err)
+ return kvsClusterTmp, tmpSeeds, learnerServersTmp, ctmpDir, err
}
kv.Start()
if index >= replicaNum {
@@ -191,7 +236,7 @@ func startTestClusterWithBasePort(t *testing.T, portBase int, replicaNum int, sy
}
time.Sleep(time.Second * 3)
- return kvsClusterTmp, tmpSeeds, learnerServersTmp, ctmpDir
+ return kvsClusterTmp, tmpSeeds, learnerServersTmp, ctmpDir, nil
}
func getTestConnForPort(t *testing.T, port int) *goredis.PoolConn {
@@ -256,11 +301,29 @@ func waitForLeader(t *testing.T, w time.Duration) *node.NamespaceNode {
return nil
}
+func waitForLeaderForClusters(w time.Duration, kvs []testClusterInfo) (*node.NamespaceNode, error) {
+ start := time.Now()
+ for {
+ for _, n := range kvs {
+ replicaNode := n.server.GetNamespaceFromFullName("default-0")
+ if replicaNode.Node.IsLead() {
+ leaderNode := replicaNode
+ return leaderNode, nil
+ }
+ }
+ if time.Since(start) > w {
+ return nil, fmt.Errorf("\033[31m timed out %v for wait leader \033[39m\n", time.Since(start))
+ }
+ time.Sleep(time.Second)
+ }
+ return nil, errors.New("no leader")
+}
+
func waitSyncedWithCommit(t *testing.T, w time.Duration, leaderci uint64, node *node.NamespaceNode, logSyncer bool) {
start := time.Now()
for {
- nsStats := node.Node.GetStats()
- ci := node.Node.GetCommittedIndex()
+ nsStats := node.Node.GetStats("", true)
+ ci := node.Node.GetAppliedIndex()
if ci >= leaderci {
assert.Equal(t, leaderci, ci)
@@ -285,17 +348,19 @@ func waitSyncedWithCommit(t *testing.T, w time.Duration, leaderci uint64, node *
}
}
}
+
func TestStartClusterWithLogSyncer(t *testing.T) {
//SetLogger(int32(common.LOG_INFO), newTestLogger(t))
//rockredis.SetLogger(int32(common.LOG_INFO), newTestLogger(t))
//node.SetLogger(int32(common.LOG_INFO), newTestLogger(t))
- remoteServers, _, _, remoteDir := startTestClusterWithBasePort(t, remoteClusterBasePort, 1, 0)
+ remoteServers, _, _, remoteDir, err := startTestClusterWithBasePort("unit-test-remotecluster", remoteClusterBasePort, 1, 0, false)
defer func() {
for _, remote := range remoteServers {
remote.server.Stop()
}
os.RemoveAll(remoteDir)
}()
+ assert.Nil(t, err)
assert.Equal(t, 1, len(remoteServers))
remoteConn := getTestConnForPort(t, remoteServers[0].redisPort)
defer remoteConn.Close()
@@ -313,7 +378,7 @@ func TestStartClusterWithLogSyncer(t *testing.T) {
learnerNode := learnerServers[0].GetNamespaceFromFullName("default-0")
assert.NotNil(t, learnerNode)
m := learnerNode.Node.GetLocalMemberInfo()
- nsStats := learnerNode.Node.GetStats()
+ nsStats := learnerNode.Node.GetStats("", true)
assert.Equal(t, common.LearnerRoleLogSyncer, nsStats.InternalStats["role"])
raftStats := leaderNode.Node.GetRaftStatus()
@@ -321,7 +386,7 @@ func TestStartClusterWithLogSyncer(t *testing.T) {
assert.Equal(t, false, ok)
node.SetSyncerOnly(true)
- err := leaderNode.Node.ProposeAddLearner(*m)
+ err = leaderNode.Node.ProposeAddLearner(*m)
assert.Nil(t, err)
time.Sleep(time.Second * 3)
assert.Equal(t, true, learnerNode.IsReady())
@@ -341,7 +406,7 @@ func TestStartClusterWithLogSyncer(t *testing.T) {
node.SetSyncerOnly(true)
// wait raft log synced
time.Sleep(time.Second)
- leaderci := leaderNode.Node.GetCommittedIndex()
+ leaderci := leaderNode.Node.GetAppliedIndex()
waitSyncedWithCommit(t, time.Minute, leaderci, learnerNode, true)
_, remoteIndex, ts := remoteNode.Node.GetRemoteClusterSyncedRaft(clusterName)
assert.Equal(t, leaderci, remoteIndex)
@@ -364,12 +429,13 @@ func TestStartClusterWithLogSyncer(t *testing.T) {
writeTs2 := time.Now().UnixNano()
time.Sleep(time.Second * 3)
- newci := leaderNode.Node.GetCommittedIndex()
+ newci := leaderNode.Node.GetAppliedIndex()
assert.Equal(t, leaderci+1, newci)
leaderci = newci
t.Logf("current leader commit: %v", leaderci)
waitSyncedWithCommit(t, time.Minute, leaderci, learnerNode, true)
_, remoteIndex, ts = remoteNode.Node.GetRemoteClusterSyncedRaft(clusterName)
+ t.Logf("remote synced: %v, %v (%v-%v)", remoteIndex, ts, writeTs, writeTs2)
assert.Equal(t, leaderci, remoteIndex)
assert.True(t, ts <= writeTs2)
assert.True(t, ts > writeTs)
@@ -417,18 +483,20 @@ func TestStartClusterWithLogSyncer(t *testing.T) {
nsConf.SnapCount = testSnap
nsConf.SnapCatchup = testSnapCatchup
nsConf.RaftGroupConf.GroupID = 1000
- nsConf.ExpirationPolicy = "consistency_deletion"
+ nsConf.ExpirationPolicy = common.WaitCompactExpirationPolicy
+ nsConf.DataVersion = common.ValueHeaderV1Str
learnerNode, err = learnerServers[0].InitKVNamespace(m.ID, nsConf, true)
assert.Nil(t, err)
node.SetSyncerOnly(true)
err = learnerNode.Start(false)
assert.Nil(t, err)
- leaderci = leaderNode.Node.GetCommittedIndex()
+ leaderci = leaderNode.Node.GetAppliedIndex()
t.Logf("current leader commit: %v", leaderci)
waitSyncedWithCommit(t, time.Minute, leaderci, learnerNode, true)
_, remoteIndex, ts = remoteNode.Node.GetRemoteClusterSyncedRaft(clusterName)
+ t.Logf("remote synced: %v, %v (%v-%v)", remoteIndex, ts, writeTs2, writeTs3)
assert.Equal(t, leaderci, remoteIndex)
assert.True(t, ts <= writeTs3)
assert.True(t, ts > writeTs2)
@@ -451,12 +519,17 @@ func TestStartClusterWithLogSyncer(t *testing.T) {
assert.Equal(t, "12346", v)
}
+func TestRecoveryNewerSnapReplaying(t *testing.T) {
+ // TODO: test recovery from newer snapshot, and no need
+ // replay old logs, check replaying status
+}
+
func TestRestartFollower(t *testing.T) {
- if testing.Verbose() {
- SetLogger(int32(common.LOG_DETAIL), newTestLogger(t))
- rockredis.SetLogger(int32(common.LOG_DETAIL), newTestLogger(t))
- node.SetLogger(int32(common.LOG_DEBUG), newTestLogger(t))
- }
+ //if testing.Verbose() {
+ // SetLogger(int32(common.LOG_DETAIL), newTestLogger(t))
+ // rockredis.SetLogger(int32(common.LOG_DETAIL), newTestLogger(t))
+ // node.SetLogger(int32(common.LOG_DEBUG), newTestLogger(t))
+ //}
c := getTestClusterConn(t, true)
defer c.Close()
@@ -483,10 +556,11 @@ func TestRestartFollower(t *testing.T) {
assert.Nil(t, err)
assert.Equal(t, OK, rsp)
+ time.Sleep(time.Second)
follower, err = followerS.server.InitKVNamespace(m.ID, followerS.nsConf, true)
assert.Nil(t, err)
follower.Start(false)
- leaderci := leaderNode.Node.GetCommittedIndex()
+ leaderci := leaderNode.Node.GetAppliedIndex()
waitSyncedWithCommit(t, time.Second*30, leaderci, follower, false)
}
@@ -500,8 +574,7 @@ func TestRestartCluster(t *testing.T) {
leaderNode := waitForLeader(t, time.Minute)
assert.NotNil(t, leaderNode)
- ci := leaderNode.Node.GetCommittedIndex()
-
+ ci := leaderNode.Node.GetAppliedIndex()
key := "default:test-cluster:a"
rsp, err := goredis.String(c.Do("set", key, "1234"))
assert.Nil(t, err)
@@ -511,6 +584,7 @@ func TestRestartCluster(t *testing.T) {
node := s.server.GetNamespaceFromFullName("default-0")
node.Close()
}
+ time.Sleep(time.Second)
for _, s := range kvsCluster {
node, err := s.server.InitKVNamespace(s.replicaID, s.nsConf, true)
@@ -525,7 +599,7 @@ func TestRestartCluster(t *testing.T) {
for _, s := range kvsCluster {
replicaNode := s.server.GetNamespaceFromFullName("default-0")
assert.NotNil(t, replicaNode)
- newci := replicaNode.Node.GetCommittedIndex()
+ newci := replicaNode.Node.GetAppliedIndex()
assert.Equal(t, ci+1+1, newci)
if replicaNode.Node.IsLead() {
hasLeader = true
@@ -533,3 +607,447 @@ func TestRestartCluster(t *testing.T) {
}
assert.Equal(t, true, hasLeader)
}
+
+func TestReadWriteAfterStopped(t *testing.T) {
+ // TODO: stop all nodes in cluster and send read write should error
+ c := getTestClusterConn(t, true)
+ defer c.Close()
+
+ assert.Equal(t, 3, len(kvsCluster))
+
+ leaderNode := waitForLeader(t, time.Minute)
+ assert.NotNil(t, leaderNode)
+}
+
+func TestCompactCancelAfterStopped(t *testing.T) {
+ // TODO: stop all nodes in cluster should cancel the running compaction
+ // also check if compact error after closed
+ c := getTestClusterConn(t, true)
+ defer c.Close()
+
+ assert.Equal(t, 3, len(kvsCluster))
+
+ leaderNode := waitForLeader(t, time.Minute)
+ assert.NotNil(t, leaderNode)
+}
+
+func TestOptimizeExpireMeta(t *testing.T) {
+ c := getTestClusterConn(t, true)
+ defer c.Close()
+
+ assert.Equal(t, 3, len(kvsCluster))
+
+ leaderNode := waitForLeader(t, time.Minute)
+ assert.NotNil(t, leaderNode)
+ key1 := "default:test:exp_compact"
+ ttl := 2
+
+ for i := 0; i < 1000; i++ {
+ _, err := goredis.String(c.Do("setex", key1+strconv.Itoa(i), ttl, "hello"))
+ assert.Nil(t, err)
+ }
+ for _, s := range kvsCluster {
+ s.server.nsMgr.DisableOptimizeDB(true)
+ s.server.nsMgr.OptimizeDBExpire("default")
+ s.server.nsMgr.OptimizeDBAnyRange("default", node.CompactAPIRange{StartFrom: []byte("start"), EndTo: []byte("end")})
+ }
+ for _, s := range kvsCluster {
+ s.server.nsMgr.DisableOptimizeDB(false)
+ s.server.nsMgr.OptimizeDBExpire("default")
+ s.server.nsMgr.OptimizeDBAnyRange("default", node.CompactAPIRange{StartFrom: []byte("start"), EndTo: []byte("end")})
+ }
+}
+
+func readWALNames(dirpath string) []string {
+ names, err := fileutil.ReadDir(dirpath)
+ if err != nil {
+ return nil
+ }
+ wnames := checkWalNames(names)
+ if len(wnames) == 0 {
+ return nil
+ }
+ return wnames
+}
+func checkWalNames(names []string) []string {
+ wnames := make([]string, 0)
+ for _, name := range names {
+ if _, _, err := parseWALName(name); err != nil {
+ // don't complain about left over tmp files
+ if !strings.HasSuffix(name, ".tmp") {
+ }
+ continue
+ }
+ wnames = append(wnames, name)
+ }
+ return wnames
+}
+
+func parseWALName(str string) (seq, index uint64, err error) {
+ if !strings.HasSuffix(str, ".wal") {
+ return 0, 0, errors.New("bad wal file")
+ }
+ _, err = fmt.Sscanf(str, "%016x-%016x.wal", &seq, &index)
+ return seq, index, err
+}
+
+func TestLeaderRestartWithTailWALLogLost(t *testing.T) {
+ // stop all nodes in cluster and start one by one
+ kvs, _, _, path, err := startTestClusterWithBasePort("unit-test-leaderwallost", tmpUsedClusterPortBase, 1, 0, false)
+ if err != nil {
+ t.Fatalf("init cluster failed: %v", err)
+ }
+ if path != "" {
+ defer os.RemoveAll(path)
+ }
+ time.Sleep(time.Second)
+ defer func() {
+ for _, n := range kvs {
+ n.server.Stop()
+ }
+ }()
+ leaderNode, err := waitForLeaderForClusters(time.Minute, kvs)
+ assert.Nil(t, err)
+ assert.NotNil(t, leaderNode)
+ rport := kvs[0].redisPort
+
+ client := goredis.NewClient("127.0.0.1:"+strconv.Itoa(rport), "")
+ client.SetMaxIdleConns(4)
+ defer client.Close()
+ c, err := client.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+
+ assert.Equal(t, 1, len(kvs))
+
+ key := "default:test-cluster:a"
+ rsp, err := goredis.String(c.Do("set", key, "1234"))
+ assert.Nil(t, err)
+ assert.Equal(t, OK, rsp)
+ for i := 0; i < testSnap*100; i++ {
+ rsp, err := goredis.String(c.Do("set", key, strconv.Itoa(i)+"aaaaa"))
+ assert.Nil(t, err)
+ assert.Equal(t, OK, rsp)
+ }
+
+ ci := leaderNode.Node.GetAppliedIndex()
+ for _, s := range kvs {
+ node := s.server.GetNamespaceFromFullName("default-0")
+ node.Close()
+ }
+ // truncate the leader logs
+ // note we only truncate the logs which will keep recent valid snapshot
+ raftConf := leaderNode.Node.GetRaftConfig()
+ names := readWALNames(raftConf.WALDir)
+ lastWAL := filepath.Join(raftConf.WALDir, names[len(names)-1])
+ snaps, err := wal.ValidSnapshotEntries(raftConf.WALDir)
+ t.Logf("truncating the wal: %s", lastWAL)
+ t.Logf("snaps in the wal: %v", snaps)
+ for i := 140; i > 0; i-- {
+ os.Truncate(lastWAL, int64(i*1000))
+ snaps2, err := wal.ValidSnapshotEntries(raftConf.WALDir)
+ assert.Nil(t, err)
+ if len(snaps2) < len(snaps)-1 {
+ t.Logf("snaps in the wal after truncate: %v", snaps2)
+ break
+ }
+ }
+ // wait namespace deleted by callback
+ time.Sleep(time.Second)
+
+ for _, s := range kvs {
+ node, err := s.server.InitKVNamespace(s.replicaID, s.nsConf, true)
+ assert.Nil(t, err)
+ assert.NotNil(t, node)
+ err = node.Start(false)
+ assert.Nil(t, err)
+ if err != nil {
+ return
+ }
+ }
+ time.Sleep(time.Second * 2)
+
+ hasLeader := false
+ newci := uint64(0)
+ for _, s := range kvs {
+ replicaNode := s.server.GetNamespaceFromFullName("default-0")
+ assert.NotNil(t, replicaNode)
+ newci = replicaNode.Node.GetAppliedIndex()
+ t.Logf("restarted ci: %v, old %v", newci, ci)
+ assert.True(t, newci < ci)
+ if replicaNode.Node.IsLead() {
+ hasLeader = true
+ }
+ }
+ assert.Equal(t, true, hasLeader)
+ rsp, err = goredis.String(c.Do("get", key))
+ assert.Nil(t, err)
+ assert.Equal(t, strconv.Itoa(testSnap*100-int(ci+2-newci))+"aaaaa", rsp)
+}
+
+func TestFollowRestartWithTailWALLogLost(t *testing.T) {
+ // stop all nodes in cluster and start one by one
+ kvs, _, _, path, err := startTestClusterWithBasePort("unit-test-follower-wallost", tmpUsedClusterPortBase, 2, 0, false)
+ if err != nil {
+ t.Fatalf("init cluster failed: %v", err)
+ }
+ if path != "" {
+ defer os.RemoveAll(path)
+ }
+ time.Sleep(time.Second)
+ defer func() {
+ for _, n := range kvs {
+ n.server.Stop()
+ }
+ }()
+ leaderNode, err := waitForLeaderForClusters(time.Minute, kvs)
+ assert.Nil(t, err)
+ assert.NotNil(t, leaderNode)
+ var follower *node.NamespaceNode
+ rport := 0
+ for _, n := range kvs {
+ replicaNode := n.server.GetNamespaceFromFullName("default-0")
+ if !replicaNode.Node.IsLead() {
+ follower = replicaNode
+ } else {
+ rport = n.redisPort
+ }
+ }
+
+ client := goredis.NewClient("127.0.0.1:"+strconv.Itoa(rport), "")
+ client.SetMaxIdleConns(4)
+ defer client.Close()
+ c, err := client.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+
+ assert.Equal(t, 2, len(kvs))
+
+ key := "default:test-cluster:a"
+ rsp, err := goredis.String(c.Do("set", key, "1234"))
+ assert.Nil(t, err)
+ assert.Equal(t, OK, rsp)
+ for i := 0; i < testSnap*100; i++ {
+ rsp, err := goredis.String(c.Do("set", key, strconv.Itoa(i)+"aaaaa"))
+ assert.Nil(t, err)
+ assert.Equal(t, OK, rsp)
+ }
+
+ time.Sleep(time.Second)
+ ci := leaderNode.Node.GetAppliedIndex()
+ raftConf := follower.Node.GetRaftConfig()
+ for _, s := range kvs {
+ node := s.server.GetNamespaceFromFullName("default-0")
+ node.Close()
+ }
+ // truncate the logs
+ // note we only truncate the logs which will keep recent valid snapshot
+ names := readWALNames(raftConf.WALDir)
+ lastWAL := filepath.Join(raftConf.WALDir, names[len(names)-1])
+ t.Logf("truncating the wal: %s", lastWAL)
+ snaps, err := wal.ValidSnapshotEntries(raftConf.WALDir)
+ for i := 140; i > 0; i-- {
+ os.Truncate(lastWAL, int64(i*1000))
+ snaps2, err := wal.ValidSnapshotEntries(raftConf.WALDir)
+ assert.Nil(t, err)
+ if len(snaps2) < len(snaps)-1 {
+ t.Logf("snaps in the wal after truncate: %v", snaps2)
+ break
+ }
+ }
+ // wait namespace deleted by callback
+ time.Sleep(time.Second)
+
+ for _, s := range kvs {
+ node, err := s.server.InitKVNamespace(s.replicaID, s.nsConf, true)
+ assert.Nil(t, err)
+ assert.NotNil(t, node)
+ err = node.Start(false)
+ assert.Nil(t, err)
+ if err != nil {
+ return
+ }
+ }
+ time.Sleep(time.Second * 2)
+
+ hasLeader := false
+ leaderNode, err = waitForLeaderForClusters(time.Second*10, kvs)
+ assert.Nil(t, err)
+ assert.NotNil(t, leaderNode)
+ newci := uint64(0)
+ for _, s := range kvs {
+ replicaNode := s.server.GetNamespaceFromFullName("default-0")
+ assert.NotNil(t, replicaNode)
+ newci = replicaNode.Node.GetAppliedIndex()
+ assert.Equal(t, ci+1, newci)
+ if replicaNode.Node.IsLead() {
+ hasLeader = true
+ }
+ }
+ assert.Equal(t, true, hasLeader)
+ rsp, err = goredis.String(c.Do("stale.get", key))
+ assert.Nil(t, err)
+ assert.Equal(t, strconv.Itoa(testSnap*100-1)+"aaaaa", rsp)
+}
+
+func BenchmarkWriteToClusterWithEmptySM(b *testing.B) {
+ costStatsLevel = 3
+ sLog.SetLevel(int32(common.LOG_WARN))
+ rockredis.SetLogger(int32(common.LOG_ERR), nil)
+ node.SetLogLevel(int(common.LOG_WARN))
+ raft.SetLogger(nil)
+ rafthttp.SetLogLevel(0)
+
+ kvs, _, _, dir, err := startTestClusterWithEmptySM(3)
+ if err != nil {
+ panic(err)
+ }
+ defer os.RemoveAll(dir)
+ defer func() {
+ for _, v := range kvs {
+ v.server.Stop()
+ }
+ }()
+ _, err = waitForLeaderForClusters(time.Minute, kvs)
+ if err != nil {
+ panic(err)
+ }
+ rport := 0
+ for _, n := range kvs {
+ replicaNode := n.server.GetNamespaceFromFullName("default-0")
+ if replicaNode.Node.IsLead() {
+ rport = n.redisPort
+ break
+ }
+ }
+ c := goredis.NewClient("127.0.0.1:"+strconv.Itoa(rport), "")
+ c.SetMaxIdleConns(10)
+
+ b.ResetTimer()
+ b.Run("bench-set", func(b *testing.B) {
+ b.ReportAllocs()
+ start := time.Now()
+ var wg sync.WaitGroup
+ for i := 0; i < 30; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ conn, err := c.Get()
+ if err != nil {
+ panic(err)
+ }
+ defer conn.Close()
+ for i := 0; i < b.N; i++ {
+ key := "default:test-cluster:a"
+ _, err := goredis.String(conn.Do("set", key, "1234"))
+ if err != nil {
+ panic(err)
+ }
+ }
+ }()
+ }
+ wg.Wait()
+ cost := time.Since(start)
+ b.Logf("cost time: %v for %v", cost, b.N)
+ })
+
+ b.StopTimer()
+}
+
+func BenchmarkGetOpWithLockAndNoLock(b *testing.B) {
+ costStatsLevel = 3
+ sLog.SetLevel(int32(common.LOG_WARN))
+ rockredis.SetLogger(int32(common.LOG_ERR), nil)
+ node.SetLogLevel(int(common.LOG_WARN))
+ raft.SetLogger(nil)
+ rafthttp.SetLogLevel(0)
+
+ kvs, _, _, dir, err := startTestClusterWithBasePort("bench-test-get", tmpUsedClusterPortBase, 1, 0, false)
+ if err != nil {
+ panic(err)
+ }
+ defer os.RemoveAll(dir)
+ defer func() {
+ for _, v := range kvs {
+ v.server.Stop()
+ }
+ }()
+ _, err = waitForLeaderForClusters(time.Minute, kvs)
+ if err != nil {
+ panic(err)
+ }
+ rport := 0
+ for _, n := range kvs {
+ replicaNode := n.server.GetNamespaceFromFullName("default-0")
+ if replicaNode.Node.IsLead() {
+ rport = n.redisPort
+ break
+ }
+ }
+ c := goredis.NewClient("127.0.0.1:"+strconv.Itoa(rport), "")
+ c.SetMaxIdleConns(10)
+ key := "default:test-cluster:test-get"
+ goredis.String(c.Do("set", key, "1234"))
+
+ b.Run("bench-get-withlock", func(b *testing.B) {
+ b.ReportAllocs()
+ start := time.Now()
+ var wg sync.WaitGroup
+ for i := 0; i < 30; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ conn, err := c.Get()
+ if err != nil {
+ panic(err)
+ }
+ defer conn.Close()
+ for i := 0; i < b.N; i++ {
+ v, err := goredis.String(conn.Do("get", key))
+ if err != nil {
+ panic(err)
+ }
+ if v != "1234" {
+ panic(v)
+ }
+ }
+ }()
+ }
+ wg.Wait()
+ cost := time.Since(start)
+ b.Logf("cost time: %v for %v", cost, b.N)
+ })
+
+ b.Run("bench-get-nolock", func(b *testing.B) {
+ b.ReportAllocs()
+ start := time.Now()
+ var wg sync.WaitGroup
+ for i := 0; i < 30; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ conn, err := c.Get()
+ if err != nil {
+ panic(err)
+ }
+ defer conn.Close()
+ for i := 0; i < b.N; i++ {
+ v, err := goredis.String(conn.Do("getnolock", key))
+ if err != nil {
+ panic(err)
+ }
+ if v != "1234" {
+ panic(v)
+ }
+ }
+ }()
+ }
+ wg.Wait()
+ cost := time.Since(start)
+ b.Logf("get nolock cost time: %v for %v", cost, b.N)
+ })
+}
diff --git a/server/util.go b/server/util.go
index 8d8b053a..a093d7fb 100644
--- a/server/util.go
+++ b/server/util.go
@@ -9,6 +9,7 @@ import (
"strings"
"github.com/absolute8511/redcon"
+ "github.com/youzan/ZanRedisDB/common"
)
var (
@@ -18,22 +19,24 @@ var (
)
func GetIPv4ForInterfaceName(ifname string) string {
- interfaces, _ := net.Interfaces()
- for _, inter := range interfaces {
- //log.Printf("found interface: %s\n", inter.Name)
- if inter.Name == ifname {
- if addrs, err := inter.Addrs(); err == nil {
- for _, addr := range addrs {
- switch ip := addr.(type) {
- case *net.IPNet:
- if ip.IP.DefaultMask() != nil {
- return ip.IP.String()
- }
- }
- }
+ inter, err := net.InterfaceByName(ifname)
+ if err != nil {
+ return ""
+ }
+
+ addrs, err := inter.Addrs()
+ if err != nil {
+ return ""
+ }
+
+ for _, addr := range addrs {
+ if ip, ok := addr.(*net.IPNet); ok {
+ if ip.IP.DefaultMask() != nil {
+ return ip.IP.String()
}
}
}
+
return ""
}
@@ -76,31 +79,9 @@ func pipelineCommand(conn redcon.Conn, cmd redcon.Command) (int, redcon.Command,
ncmd := buildCommand(args)
return len(pcmds) + 1, ncmd, nil
}
-func buildCommand(args [][]byte) redcon.Command {
- // build a pipeline command
- buf := make([]byte, 0, 128)
- buf = append(buf, '*')
- buf = append(buf, strconv.FormatInt(int64(len(args)), 10)...)
- buf = append(buf, '\r', '\n')
-
- poss := make([]int, 0, len(args)*2)
- for _, arg := range args {
- buf = append(buf, '$')
- buf = append(buf, strconv.FormatInt(int64(len(arg)), 10)...)
- buf = append(buf, '\r', '\n')
- poss = append(poss, len(buf), len(buf)+len(arg))
- buf = append(buf, arg...)
- buf = append(buf, '\r', '\n')
- }
- // reformat a new command
- var ncmd redcon.Command
- ncmd.Raw = buf
- ncmd.Args = make([][]byte, len(poss)/2)
- for i, j := 0, 0; i < len(poss); i, j = i+2, j+1 {
- ncmd.Args[j] = ncmd.Raw[poss[i]:poss[i+1]]
- }
- return ncmd
+func buildCommand(args [][]byte) redcon.Command {
+ return common.BuildCommand(args)
}
func parseCommand(raw []byte) (redcon.Command, error) {
diff --git a/settings/overwrite.go b/settings/overwrite.go
new file mode 100644
index 00000000..14721e62
--- /dev/null
+++ b/settings/overwrite.go
@@ -0,0 +1,76 @@
+package settings
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "strings"
+)
+
+// RootSettingDir is the setting config file root directory
+var RootSettingDir string
+
+func getConfig(fn string) map[string]interface{} {
+ if _, err := os.Stat(fn); os.IsNotExist(err) {
+ return nil
+ }
+ m := map[string]interface{}{}
+ b, err := ioutil.ReadFile(filepath.Clean(fn))
+ if err != nil {
+ panic(err)
+ }
+ if err := json.Unmarshal(b, &m); err != nil {
+ panic(err)
+ }
+ return m
+}
+
+func overwriteSettingsWithFile(s interface{}, fn string) {
+ cfg := getConfig(path.Join(RootSettingDir, fn))
+ rd := reflect.Indirect(reflect.ValueOf(s))
+ rt := rd.Type()
+ tagMapper := make(map[string]string, rt.NumField())
+ for i := 0; i < rt.NumField(); i++ {
+ f := rt.Field(i)
+ jn := f.Tag.Get("json")
+ pos := strings.Index(jn, ",")
+ if pos != -1 {
+ jn = jn[:pos]
+ }
+ tagMapper[jn] = f.Name
+ }
+ overwriteSettings(cfg, rd, tagMapper)
+}
+
+func getField(key string, rd reflect.Value, tagMapper map[string]string) reflect.Value {
+ field := rd.FieldByName(key)
+ if field.IsValid() {
+ return field
+ }
+ // find field from json tag
+ tn, ok := tagMapper[key]
+ if ok {
+ field = rd.FieldByName(tn)
+ }
+ return field
+}
+
+func overwriteSettings(cfg map[string]interface{}, rd reflect.Value, tagMapper map[string]string) {
+ for key, val := range cfg {
+ field := getField(key, rd, tagMapper)
+ if field.IsValid() {
+ switch field.Type().String() {
+ case "uint64":
+ field.SetUint(uint64(val.(float64)))
+ case "bool":
+ field.SetBool(val.(bool))
+ case "string":
+ field.SetString(val.(string))
+ default:
+ }
+ }
+ }
+}
diff --git a/settings/overwrite_test.go b/settings/overwrite_test.go
new file mode 100644
index 00000000..f8ad497f
--- /dev/null
+++ b/settings/overwrite_test.go
@@ -0,0 +1,53 @@
+package settings
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestOverwriteSettings(t *testing.T) {
+ tmpDir, err := ioutil.TempDir("", fmt.Sprintf("settings-%d", time.Now().UnixNano()))
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+ t.Logf("tmp: %v", tmpDir)
+ os.MkdirAll(tmpDir, 0777)
+ RootSettingDir = tmpDir
+ defer os.RemoveAll(tmpDir)
+ testSettings := `{"TestInt":1, "TestStr":"str", "TestBool":true}`
+ err = ioutil.WriteFile(path.Join(tmpDir, "soft-settings.json"), []byte(testSettings), 0777)
+ assert.Nil(t, err)
+ err = ioutil.WriteFile(path.Join(tmpDir, "static-settings.json"), []byte(testSettings), 0777)
+ assert.Nil(t, err)
+
+ s1 := getSoftSettings()
+ s2 := getStaticSettings()
+ assert.Equal(t, uint64(1), s1.TestInt)
+ assert.Equal(t, true, s1.TestBool)
+ assert.Equal(t, "str", s1.TestStr)
+ assert.Equal(t, uint64(1), s2.TestInt)
+ assert.Equal(t, true, s2.TestBool)
+ assert.Equal(t, "str", s2.TestStr)
+
+ testSettings = `{"test_int":1, "test_str":"str", "test_bool":true}`
+ err = ioutil.WriteFile(path.Join(tmpDir, "soft-settings.json"), []byte(testSettings), 0777)
+ assert.Nil(t, err)
+ err = ioutil.WriteFile(path.Join(tmpDir, "static-settings.json"), []byte(testSettings), 0777)
+ assert.Nil(t, err)
+
+ s3 := getSoftSettings()
+ s4 := getStaticSettings()
+ assert.Equal(t, uint64(1), s3.TestInt)
+ assert.Equal(t, true, s3.TestBool)
+ assert.Equal(t, "str", s3.TestStr)
+ assert.Equal(t, uint64(1), s4.TestInt)
+ assert.Equal(t, true, s4.TestBool)
+ assert.Equal(t, "str", s4.TestStr)
+}
diff --git a/settings/soft.go b/settings/soft.go
new file mode 100644
index 00000000..baa9cf50
--- /dev/null
+++ b/settings/soft.go
@@ -0,0 +1,58 @@
+package settings
+
+import "runtime"
+
+// Soft settings are some configurations than can be safely changed and
+// the app need to be restarted to apply such configuration changes.
+var Soft = getSoftSettings()
+
+type soft struct {
+ // test
+ TestInt uint64 `json:"test_int,omitempty"`
+ TestBool bool `json:"test_bool"`
+ TestStr string `json:"test_str"`
+ // raft
+ MaxCommittedSizePerReady uint64 `json:"max_committed_size_per_ready"`
+ MaxSizePerMsg uint64 `json:"max_size_per_msg"`
+ MaxInflightMsgs uint64 `json:"max_inflight_msgs"`
+ DefaultSnapCount uint64 `json:"default_snap_count"`
+ MaxInFlightMsgSnap uint64 `json:"max_in_flight_msg_snap"`
+ LeaderTransferLag uint64 `json:"leader_transfer_lag"`
+ // HealthInterval is the minimum time the cluster should be healthy
+ // before accepting add member requests.
+ HealthIntervalSec uint64 `json:"health_interval_sec"`
+
+ // transport
+
+ // statemachine
+ CommitBufferLen uint64 `json:"commit_buffer_len"`
+
+ // server
+
+ // raft proposal queue length for client queue loop (1024*4 for default, suggest use default)
+ ProposalQueueLen uint64 `json:"proposal_queue_len"`
+ // how many queues used for proposal, suggest use CPU nums
+ ProposalQueueNum uint64 `json:"proposal_queue_num"`
+}
+
+func getSoftSettings() soft {
+ d := defaultSoftSettings()
+ overwriteSettingsWithFile(&d, "soft-settings.json")
+ return d
+}
+
+func defaultSoftSettings() soft {
+ return soft{
+ MaxCommittedSizePerReady: 1024 * 1024 * 16,
+ DefaultSnapCount: 160000,
+ HealthIntervalSec: 5,
+ // max number of in-flight snapshot messages allows to have
+ MaxInFlightMsgSnap: 16,
+ MaxSizePerMsg: 512 * 1024,
+ MaxInflightMsgs: 256,
+ LeaderTransferLag: 64,
+ CommitBufferLen: 1024 * 8,
+ ProposalQueueLen: 1024 * 4,
+ ProposalQueueNum: uint64(runtime.NumCPU()),
+ }
+}
diff --git a/settings/static.go b/settings/static.go
new file mode 100644
index 00000000..9eace84e
--- /dev/null
+++ b/settings/static.go
@@ -0,0 +1,26 @@
+package settings
+
+// some settings which should not be changed after deployed (will corrput data or make
+// the app incompatible )
+// {
+// "xxx": 32,
+// }
+//
+var Static = getStaticSettings()
+
+type static struct {
+ // test
+ TestInt uint64 `json:"test_int"`
+ TestBool bool `json:"test_bool"`
+ TestStr string `json:"test_str,omitempty"`
+}
+
+func getStaticSettings() static {
+ s := defaultStaticSettings()
+ overwriteSettingsWithFile(&s, "static-settings.json")
+ return s
+}
+
+func defaultStaticSettings() static {
+ return static{}
+}
diff --git a/slow/slowlog.go b/slow/slowlog.go
new file mode 100644
index 00000000..3eafd497
--- /dev/null
+++ b/slow/slowlog.go
@@ -0,0 +1,202 @@
+package slow
+
+import (
+ "fmt"
+ "sync/atomic"
+ "time"
+
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/internal/flume_log"
+)
+
+// merged and formatted slow logs for write or large collections
+// use slow-level to control output different slow logs
+const (
+ collectionMinLenForLog = 128
+ collectionLargeLen = 5000
+ dbWriteSlow = time.Millisecond * 100
+)
+
+const (
+ appName = "zankv"
+ logName = "zankv_slowlog"
+)
+
+var remoteLog *flume_log.RemoteLogClient
+
+func SetRemoteLogger(remote string) {
+ if remote != "" {
+ remoteLog = flume_log.NewClient(remote, appName, logName)
+ }
+}
+
+var sl = common.NewLevelLogger(common.LOG_INFO, common.NewLogger())
+
+func SetLogger(level int32, logger common.Logger) {
+ sl.SetLevel(level)
+ sl.Logger = logger
+}
+
+var slowLogLevel int32
+
+func ChangeSlowLogLevel(lv int) {
+ atomic.StoreInt32(&slowLogLevel, int32(lv))
+}
+
+func slowLogLv() int32 {
+ return atomic.LoadInt32(&slowLogLevel)
+}
+
+var logCollTimes [32]*int64
+
+func init() {
+ for i := 0; i < len(logCollTimes); i++ {
+ var d int64
+ logCollTimes[i] = &d
+ }
+}
+
+func getIndex(si SlowLogInfo) int {
+ index := 0
+ total := len(si.Scope)
+ if total > 0 {
+ index += int(si.Scope[total-1])
+ }
+ if total > 1 {
+ index += int(si.Scope[total-2])
+ }
+ return index
+}
+
+func checkLastLogCollTime(si SlowLogInfo) bool {
+ index := getIndex(si)
+ t := logCollTimes[index%len(logCollTimes)]
+ return atomic.LoadInt64(t)+3*time.Second.Nanoseconds() <= time.Now().UnixNano()
+}
+
+func updateLastLogCollTime(si SlowLogInfo) {
+ t := logCollTimes[getIndex(si)%len(logCollTimes)]
+ atomic.StoreInt64(t, time.Now().UnixNano())
+}
+
+type SlowLogInfo struct {
+ Scope string
+ Key string
+ Note string
+}
+
+func NewSlowLogInfo(scope string, key string, note string) SlowLogInfo {
+ return SlowLogInfo{
+ Scope: scope,
+ Key: key,
+ Note: note,
+ }
+}
+
+// LogLargeColl
+// LogSlowWrite
+// LogLargeValue
+// LogLargeBatch
+
+func LogSlowDBWrite(cost time.Duration, si SlowLogInfo) (string, bool) {
+ if slowLogLv() < 0 {
+ return "", false
+ }
+
+ if cost >= dbWriteSlow || slowLogLv() > common.LOG_DETAIL ||
+ (slowLogLv() >= common.LOG_INFO && cost >= dbWriteSlow/2) {
+ str := fmt.Sprintf("[SLOW_LOGS] db slow write command in scope %v, cost: %v, key: %v, note: %v",
+ si.Scope, cost, si.Key, si.Note)
+
+ sl.InfoDepth(1, str)
+ if remoteLog != nil {
+ remoteLog.Warn(str, &si)
+ }
+ return str, true
+ }
+ return "", false
+}
+
+func LogDebugSlowWrite(cost time.Duration, thres time.Duration, lvFor int32, si SlowLogInfo) (string, bool) {
+ if slowLogLv() < common.LOG_DEBUG {
+ return "", false
+ }
+ if cost >= thres && slowLogLv() >= int32(lvFor) {
+ str := fmt.Sprintf("[SLOW_LOGS] debug slow write in scope %v, cost: %v, note: %v",
+ si.Scope, cost, si.Note)
+ sl.InfoDepth(1, str)
+
+ if remoteLog != nil {
+ remoteLog.Info(str, &si)
+ }
+ return str, true
+ }
+ return "", false
+}
+
+func LogSlowForSteps(thres time.Duration, lvFor int32, si SlowLogInfo, costList ...time.Duration) (string, bool) {
+ if len(costList) == 0 {
+ return "", false
+ }
+ if slowLogLv() < 0 {
+ return "", false
+ }
+ if costList[len(costList)-1] >= thres && slowLogLv() >= int32(lvFor) {
+ str := fmt.Sprintf("[SLOW_LOGS] steps slow in scope %v, cost list: %v, note: %v",
+ si.Scope, costList, si.Note)
+ sl.InfoDepth(1, str)
+
+ if remoteLog != nil {
+ remoteLog.Info(str, &si)
+ }
+ return str, true
+ }
+ return "", false
+}
+
+func LogLargeCollection(sz int, si SlowLogInfo) (string, bool) {
+ if sz < collectionMinLenForLog {
+ return "", false
+ }
+ lv := slowLogLv()
+ if lv < 0 {
+ return "", false
+ }
+ if sz >= collectionLargeLen {
+ if lv >= common.LOG_INFO || checkLastLogCollTime(si) {
+ str := fmt.Sprintf("[SLOW_LOGS] large collection in scope %v, size: %v, key: %v, note: %v",
+ si.Scope, sz, si.Key, si.Note)
+ sl.InfoDepth(1, str)
+ updateLastLogCollTime(si)
+
+ if remoteLog != nil {
+ remoteLog.Info(str, &si)
+ }
+ return str, true
+ }
+ }
+ if lv >= common.LOG_DETAIL ||
+ (lv >= common.LOG_INFO && sz >= collectionMinLenForLog*4 && checkLastLogCollTime(si)) ||
+ (lv >= common.LOG_DEBUG && sz >= collectionMinLenForLog*2) {
+ str := fmt.Sprintf("[SLOW_LOGS] maybe large collection in scope %v, size: %v, key: %v, note: %v",
+ si.Scope, sz, si.Key, si.Note)
+ sl.InfoDepth(1, str)
+ if lv <= common.LOG_INFO {
+ updateLastLogCollTime(si)
+ }
+
+ if remoteLog != nil {
+ remoteLog.Info(str, &si)
+ }
+ return str, true
+ }
+ return "", false
+}
+
+func LogLargeValue() (string, bool) {
+ return "", false
+}
+
+func LogLargeBatchWrite() (string, bool) {
+ return "", false
+}
diff --git a/slow/slowlog_test.go b/slow/slowlog_test.go
new file mode 100644
index 00000000..3847ff51
--- /dev/null
+++ b/slow/slowlog_test.go
@@ -0,0 +1,100 @@
+package slow
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/youzan/ZanRedisDB/common"
+)
+
+func TestSlowLogLevel(t *testing.T) {
+ str, logged := LogLargeCollection(collectionLargeLen, NewSlowLogInfo("", "test", ""))
+ t.Log(str)
+ assert.Equal(t, true, logged)
+ str, logged = LogLargeCollection(collectionMinLenForLog*2, NewSlowLogInfo("", "test", ""))
+ t.Log(str)
+ assert.Equal(t, false, logged)
+ str, logged = LogSlowDBWrite(dbWriteSlow, NewSlowLogInfo("test", "testkey", ""))
+ t.Log(str)
+ assert.Equal(t, true, logged)
+ str, logged = LogSlowForSteps(dbWriteSlow, 0, NewSlowLogInfo("test", "testkey", ""), dbWriteSlow/2, dbWriteSlow)
+ t.Log(str)
+ assert.Equal(t, true, logged)
+
+ str, logged = LogSlowForSteps(dbWriteSlow, 1, NewSlowLogInfo("test", "testkey", ""), dbWriteSlow/2, dbWriteSlow)
+ t.Log(str)
+ assert.Equal(t, false, logged)
+
+ ChangeSlowLogLevel(-1)
+ str, logged = LogLargeCollection(collectionLargeLen, NewSlowLogInfo("", "test", ""))
+ t.Log(str)
+ assert.Equal(t, false, logged)
+ assert.Equal(t, "", str)
+ str, logged = LogSlowDBWrite(dbWriteSlow, NewSlowLogInfo("test", "testkey", ""))
+ t.Log(str)
+ assert.Equal(t, "", str)
+
+ str, logged = LogSlowForSteps(dbWriteSlow, 1, NewSlowLogInfo("test", "testkey", ""), dbWriteSlow/2, dbWriteSlow)
+ t.Log(str)
+ assert.Equal(t, "", str)
+ assert.Equal(t, false, logged)
+
+ ChangeSlowLogLevel(int(common.LOG_DEBUG))
+ str, logged = LogLargeCollection(collectionMinLenForLog*2, NewSlowLogInfo("", "test", ""))
+ t.Log(str)
+ assert.Equal(t, true, logged)
+
+ str, logged = LogSlowDBWrite(dbWriteSlow, NewSlowLogInfo("test", "testkey", ""))
+ t.Log(str)
+ assert.Equal(t, true, logged)
+
+ str, logged = LogSlowForSteps(dbWriteSlow, common.LOG_DEBUG, NewSlowLogInfo("test", "testkey", ""), dbWriteSlow/2, dbWriteSlow)
+ t.Log(str)
+ assert.Equal(t, true, logged)
+ ChangeSlowLogLevel(0)
+ // test dump log reduce in second
+ str, logged = LogLargeCollection(collectionLargeLen, NewSlowLogInfo("scope_test", "test", ""))
+ t.Log(str)
+ assert.Equal(t, true, logged)
+ str, logged = LogLargeCollection(collectionLargeLen, NewSlowLogInfo("scope_test", "test", ""))
+ t.Log(str)
+ assert.Equal(t, false, logged)
+ str, logged = LogLargeCollection(collectionLargeLen, NewSlowLogInfo("scope_test", "test", ""))
+ t.Log(str)
+ assert.Equal(t, false, logged)
+ time.Sleep(3 * time.Second)
+ str, logged = LogLargeCollection(collectionLargeLen, NewSlowLogInfo("scope_test", "test", ""))
+ t.Log(str)
+ assert.Equal(t, true, logged)
+ str, logged = LogLargeCollection(collectionLargeLen, NewSlowLogInfo("scope_test", "test", ""))
+ t.Log(str)
+ assert.Equal(t, false, logged)
+}
+
+func TestSlowLogRemote(t *testing.T) {
+ SetRemoteLogger("127.0.0.1:5140")
+ str, logged := LogSlowDBWrite(dbWriteSlow, NewSlowLogInfo("test", "testkey", "slow db write note"))
+ t.Log(str)
+ assert.Equal(t, true, logged)
+ time.Sleep(time.Second * 2)
+ str, logged = LogSlowForSteps(dbWriteSlow/4, common.LOG_ERR, NewSlowLogInfo("test", "testkey", "slow for steps note"), dbWriteSlow/2, dbWriteSlow)
+ t.Log(str)
+ assert.Equal(t, true, logged)
+ time.Sleep(time.Second * 2)
+ str, logged = LogLargeCollection(collectionLargeLen, NewSlowLogInfo("scope_test", "test", "slow for large note"))
+ t.Log(str)
+ assert.Equal(t, true, logged)
+ // wait remote log flush
+ time.Sleep(time.Second * 10)
+}
+
+func BenchmarkLogLarge(b *testing.B) {
+ SetLogger(common.LOG_INFO, common.NewDefaultLogger("test"))
+
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ LogLargeCollection(collectionLargeLen, NewSlowLogInfo("scope_test", "test", ""))
+ }
+}
diff --git a/snap/db.go b/snap/db.go
index f9b6cf49..ca1f5e48 100644
--- a/snap/db.go
+++ b/snap/db.go
@@ -22,8 +22,8 @@ import (
"os"
"path/filepath"
- "github.com/absolute8511/ZanRedisDB/pkg/fileutil"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/pkg/fileutil"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
)
var ErrNoDBSnapshot = errors.New("snap: snapshot file doesn't exist")
diff --git a/snap/message.go b/snap/message.go
index 60a4957e..aa190b66 100644
--- a/snap/message.go
+++ b/snap/message.go
@@ -17,8 +17,8 @@ package snap
import (
"io"
- "github.com/absolute8511/ZanRedisDB/pkg/ioutil"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/pkg/ioutil"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
)
// Message is a struct that contains a raft Message and a ReadCloser. The type
diff --git a/snap/snappb/snap.pb.go b/snap/snappb/snap.pb.go
index 20a44339..6ccda7b2 100644
--- a/snap/snappb/snap.pb.go
+++ b/snap/snappb/snap.pb.go
@@ -1,23 +1,15 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: snap.proto
-/*
- Package snappb is a generated protocol buffer package.
-
- It is generated from these files:
- snap.proto
-
- It has these top-level messages:
- Snapshot
-*/
package snappb
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
-
-import io "io"
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@@ -28,22 +20,65 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type Snapshot struct {
- Crc uint32 `protobuf:"varint,1,opt,name=crc" json:"crc"`
- Data []byte `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Crc uint32 `protobuf:"varint,1,opt,name=crc" json:"crc"`
+ Data []byte `protobuf:"bytes,2,opt,name=data" json:"data"`
+}
+
+func (m *Snapshot) Reset() { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage() {}
+func (*Snapshot) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f2e3c045ebf84d00, []int{0}
+}
+func (m *Snapshot) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Snapshot) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Snapshot.Merge(m, src)
+}
+func (m *Snapshot) XXX_Size() int {
+ return m.Size()
+}
+func (m *Snapshot) XXX_DiscardUnknown() {
+ xxx_messageInfo_Snapshot.DiscardUnknown(m)
}
-func (m *Snapshot) Reset() { *m = Snapshot{} }
-func (m *Snapshot) String() string { return proto.CompactTextString(m) }
-func (*Snapshot) ProtoMessage() {}
-func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorSnap, []int{0} }
+var xxx_messageInfo_Snapshot proto.InternalMessageInfo
func init() {
proto.RegisterType((*Snapshot)(nil), "snappb.snapshot")
}
+
+func init() { proto.RegisterFile("snap.proto", fileDescriptor_f2e3c045ebf84d00) }
+
+var fileDescriptor_f2e3c045ebf84d00 = []byte{
+ // 136 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c,
+ 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0xa4, 0x44, 0xd2, 0xf3,
+ 0xd3, 0xf3, 0xc1, 0x42, 0xfa, 0x20, 0x16, 0x44, 0x56, 0xc9, 0x86, 0x8b, 0x03, 0x24, 0x5f, 0x9c,
+ 0x91, 0x5f, 0x22, 0x24, 0xc6, 0xc5, 0x9c, 0x5c, 0x94, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xeb,
+ 0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, 0x10, 0x48, 0x40, 0x48, 0x82, 0x8b, 0x25, 0x25, 0xb1, 0x24,
+ 0x51, 0x82, 0x49, 0x81, 0x51, 0x83, 0x07, 0x2a, 0x01, 0x16, 0x71, 0x92, 0x39, 0xf1, 0x50, 0x8e,
+ 0xe1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58,
+ 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0x00, 0x01, 0x00, 0x00, 0xff, 0xff,
+ 0x12, 0xb2, 0x34, 0xac, 0x86, 0x00, 0x00, 0x00,
+}
+
func (m *Snapshot) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -68,9 +103,6 @@ func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintSnap(dAtA, i, uint64(len(m.Data)))
i += copy(dAtA[i:], m.Data)
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
return i, nil
}
@@ -84,6 +116,9 @@ func encodeVarintSnap(dAtA []byte, offset int, v uint64) int {
return offset + 1
}
func (m *Snapshot) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
n += 1 + sovSnap(uint64(m.Crc))
@@ -91,9 +126,6 @@ func (m *Snapshot) Size() (n int) {
l = len(m.Data)
n += 1 + l + sovSnap(uint64(l))
}
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
return n
}
@@ -125,7 +157,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -153,7 +185,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Crc |= (uint32(b) & 0x7F) << shift
+ m.Crc |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -172,7 +204,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= (int(b) & 0x7F) << shift
+ byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -181,6 +213,9 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthSnap
}
postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSnap
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -198,10 +233,12 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthSnap
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthSnap
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -265,10 +302,13 @@ func skipSnap(dAtA []byte) (n int, err error) {
break
}
}
- iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthSnap
}
+ iNdEx += length
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthSnap
+ }
return iNdEx, nil
case 3:
for {
@@ -297,6 +337,9 @@ func skipSnap(dAtA []byte) (n int, err error) {
return 0, err
}
iNdEx = start + next
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthSnap
+ }
}
return iNdEx, nil
case 4:
@@ -315,17 +358,3 @@ var (
ErrInvalidLengthSnap = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowSnap = fmt.Errorf("proto: integer overflow")
)
-
-func init() { proto.RegisterFile("snap.proto", fileDescriptorSnap) }
-
-var fileDescriptorSnap = []byte{
- // 126 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c,
- 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0xa4, 0x44, 0xd2, 0xf3,
- 0xd3, 0xf3, 0xc1, 0x42, 0xfa, 0x20, 0x16, 0x44, 0x56, 0xc9, 0x8c, 0x8b, 0x03, 0x24, 0x5f, 0x9c,
- 0x91, 0x5f, 0x22, 0x24, 0xc6, 0xc5, 0x9c, 0x5c, 0x94, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xeb,
- 0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, 0x10, 0x48, 0x40, 0x48, 0x88, 0x8b, 0x25, 0x25, 0xb1, 0x24,
- 0x51, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xcc, 0x76, 0x12, 0x39, 0xf1, 0x50, 0x8e, 0xe1,
- 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf1, 0x58, 0x8e,
- 0x01, 0x10, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x0f, 0x32, 0xb2, 0x78, 0x00, 0x00, 0x00,
-}
diff --git a/snap/snapshotter.go b/snap/snapshotter.go
index ecd40058..c2e197c4 100644
--- a/snap/snapshotter.go
+++ b/snap/snapshotter.go
@@ -23,15 +23,17 @@ import (
"os"
"path/filepath"
"sort"
+ "strconv"
"strings"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
- pioutil "github.com/absolute8511/ZanRedisDB/pkg/ioutil"
- "github.com/absolute8511/ZanRedisDB/pkg/pbutil"
- "github.com/absolute8511/ZanRedisDB/raft"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
- "github.com/absolute8511/ZanRedisDB/snap/snappb"
+ "github.com/youzan/ZanRedisDB/common"
+ pioutil "github.com/youzan/ZanRedisDB/pkg/ioutil"
+ "github.com/youzan/ZanRedisDB/pkg/pbutil"
+ "github.com/youzan/ZanRedisDB/raft"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/snap/snappb"
+ "github.com/youzan/ZanRedisDB/wal/walpb"
)
const (
@@ -79,51 +81,67 @@ func (s *Snapshotter) save(snapshot *raftpb.Snapshot) error {
d, err := snap.Marshal()
if err != nil {
return err
- } else {
- marshallingDurations.Observe(float64(time.Since(start)) / float64(time.Second))
}
+ marshallingDurations.Observe(float64(time.Since(start)) / float64(time.Second))
- err = pioutil.WriteAndSyncFile(filepath.Join(s.dir, fname), d, 0666)
- if err == nil {
- saveDurations.Observe(float64(time.Since(start)) / float64(time.Second))
- } else {
- err1 := os.Remove(filepath.Join(s.dir, fname))
+ spath := filepath.Join(s.dir, fname)
+ err = pioutil.WriteAndSyncFile(spath, d, 0666)
+ if err != nil {
+ plog.Warningf("failed to write a snap file: %s", err)
+ err1 := os.Remove(spath)
if err1 != nil {
- plog.Errorf("failed to remove broken snapshot file %s", filepath.Join(s.dir, fname))
+ plog.Errorf("failed to remove broken snapshot file %s, %s", spath, err1)
}
+ return err
}
- return err
+ saveDurations.Observe(time.Since(start).Seconds())
+ return nil
}
-func (s *Snapshotter) RemoveSnap(snapName string) {
- fpath := filepath.Join(s.dir, snapName)
- renameBroken(fpath)
+// Load returns the newest snapshot.
+func (s *Snapshotter) Load() (*raftpb.Snapshot, error) {
+ return s.loadMatching(func(*raftpb.Snapshot) bool { return true })
}
-func (s *Snapshotter) Load() (*raftpb.Snapshot, string, error) {
+// LoadNewestAvailable loads the newest snapshot available that is in walSnaps.
+func (s *Snapshotter) LoadNewestAvailable(walSnaps []walpb.Snapshot) (*raftpb.Snapshot, error) {
+ return s.loadMatching(func(snapshot *raftpb.Snapshot) bool {
+ m := snapshot.Metadata
+ for i := len(walSnaps) - 1; i >= 0; i-- {
+ if m.Term == walSnaps[i].Term && m.Index == walSnaps[i].Index {
+ return true
+ }
+ }
+ return false
+ })
+}
+
+// loadMatching returns the newest snapshot where matchFn returns true.
+func (s *Snapshotter) loadMatching(matchFn func(*raftpb.Snapshot) bool) (*raftpb.Snapshot, error) {
names, err := s.snapNames()
if err != nil {
- return nil, "", err
+ return nil, err
}
var snap *raftpb.Snapshot
- var snapName string
for _, name := range names {
- if snap, err = loadSnap(s.dir, name); err == nil {
- snapName = name
- break
+ if snap, err = loadSnap(s.dir, name); err == nil && matchFn(snap) {
+ return snap, nil
}
}
- if err != nil {
- return nil, "", ErrNoSnapshot
- }
- return snap, snapName, nil
+ return nil, ErrNoSnapshot
}
func loadSnap(dir, name string) (*raftpb.Snapshot, error) {
fpath := filepath.Join(dir, name)
snap, err := Read(fpath)
if err != nil {
- renameBroken(fpath)
+ brokenPath := fpath + ".broken"
+ plog.Warningf("failed to read a snap file %s, %s", fpath, err)
+ if rerr := os.Rename(fpath, brokenPath); rerr != nil {
+ plog.Warningf("failed to rename a broken snap file %s, %s, %s", fpath, brokenPath, rerr)
+ } else {
+ plog.Warningf("renamed to a broken snap file %s, %s", fpath, brokenPath)
+ }
}
return snap, err
}
@@ -132,35 +150,29 @@ func loadSnap(dir, name string) (*raftpb.Snapshot, error) {
func Read(snapname string) (*raftpb.Snapshot, error) {
b, err := ioutil.ReadFile(snapname)
if err != nil {
- plog.Errorf("cannot read file %v: %v", snapname, err)
return nil, err
}
if len(b) == 0 {
- plog.Errorf("unexpected empty snapshot")
return nil, ErrEmptySnapshot
}
var serializedSnap snappb.Snapshot
if err = serializedSnap.Unmarshal(b); err != nil {
- plog.Errorf("corrupted snapshot file %v: %v", snapname, err)
return nil, err
}
if len(serializedSnap.Data) == 0 || serializedSnap.Crc == 0 {
- plog.Errorf("unexpected empty snapshot")
return nil, ErrEmptySnapshot
}
crc := crc32.Update(0, crcTable, serializedSnap.Data)
if crc != serializedSnap.Crc {
- plog.Errorf("corrupted snapshot file %v: crc mismatch", snapname)
return nil, ErrCRCMismatch
}
var snap raftpb.Snapshot
if err = snap.Unmarshal(serializedSnap.Data); err != nil {
- plog.Errorf("corrupted snapshot file %v: %v", snapname, err)
return nil, err
}
return &snap, nil
@@ -178,7 +190,11 @@ func (s *Snapshotter) snapNames() ([]string, error) {
if err != nil {
return nil, err
}
- snaps := checkSuffix(names)
+ filenames, err := s.cleanupSnapdir(names)
+ if err != nil {
+ return nil, err
+ }
+ snaps := checkSuffix(filenames)
if len(snaps) == 0 {
return nil, ErrNoSnapshot
}
@@ -195,13 +211,59 @@ func checkSuffix(names []string) []string {
// If we find a file which is not a snapshot then check if it's
// a vaild file. If not throw out a warning.
if _, ok := validFiles[names[i]]; !ok {
- plog.Warningf("skipped unexpected non snapshot file %v", names[i])
+ plog.Warningf("found unexpected non-snap file; skipping %s", names[i])
}
}
}
return snaps
}
+// cleanupSnapdir removes any files that should not be in the snapshot directory:
+// - db.tmp prefixed files that can be orphaned by defragmentation
+func (s *Snapshotter) cleanupSnapdir(filenames []string) (names []string, err error) {
+ names = make([]string, 0, len(filenames))
+ for _, filename := range filenames {
+ if strings.HasPrefix(filename, "db.tmp") {
+ plog.Infof("found orphaned defragmentation file; deleting %s", filename)
+ if rmErr := os.Remove(filepath.Join(s.dir, filename)); rmErr != nil && !os.IsNotExist(rmErr) {
+ return names, fmt.Errorf("failed to remove orphaned .snap.db file %s: %v", filename, rmErr)
+ }
+ } else {
+ names = append(names, filename)
+ }
+ }
+ return names, nil
+}
+
+func (s *Snapshotter) ReleaseSnapDBs(snap raftpb.Snapshot) error {
+ dir, err := os.Open(s.dir)
+ if err != nil {
+ return err
+ }
+ defer dir.Close()
+ filenames, err := dir.Readdirnames(-1)
+ if err != nil {
+ return err
+ }
+ for _, filename := range filenames {
+ if strings.HasSuffix(filename, ".snap.db") {
+ hexIndex := strings.TrimSuffix(filepath.Base(filename), ".snap.db")
+ index, err := strconv.ParseUint(hexIndex, 16, 64)
+ if err != nil {
+ plog.Errorf("failed to parse index from filename %s, %s", filename, err.Error())
+ continue
+ }
+ if index < snap.Metadata.Index {
+ plog.Infof("found orphaned .snap.db file; deleting %s", filename)
+ if rmErr := os.Remove(filepath.Join(s.dir, filename)); rmErr != nil && !os.IsNotExist(rmErr) {
+ plog.Errorf("failed to remove orphaned .snap.db file %s, %s", filename, rmErr.Error())
+ }
+ }
+ }
+ }
+ return nil
+}
+
func renameBroken(path string) {
brokenPath := path + ".broken"
if err := os.Rename(path, brokenPath); err != nil {
diff --git a/snap/snapshotter_test.go b/snap/snapshotter_test.go
index 890b3050..a1d7ac74 100644
--- a/snap/snapshotter_test.go
+++ b/snap/snapshotter_test.go
@@ -23,7 +23,9 @@ import (
"reflect"
"testing"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/pkg/fileutil"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/wal/walpb"
)
var testSnap = &raftpb.Snapshot{
@@ -50,7 +52,7 @@ func TestSaveAndLoad(t *testing.T) {
t.Fatal(err)
}
- g, _, err := ss.Load()
+ g, err := ss.Load()
if err != nil {
t.Errorf("err = %v, want nil", err)
}
@@ -102,7 +104,7 @@ func TestFailback(t *testing.T) {
t.Fatal(err)
}
- g, _, err := ss.Load()
+ g, err := ss.Load()
if err != nil {
t.Errorf("err = %v, want nil", err)
}
@@ -165,12 +167,47 @@ func TestLoadNewestSnap(t *testing.T) {
t.Fatal(err)
}
- g, _, err := ss.Load()
- if err != nil {
- t.Errorf("err = %v, want nil", err)
+ cases := []struct {
+ name string
+ availableWalSnaps []walpb.Snapshot
+ expected *raftpb.Snapshot
+ }{
+ {
+ name: "load-newest",
+ expected: &newSnap,
+ },
+ {
+ name: "loadnewestavailable-newest",
+ availableWalSnaps: []walpb.Snapshot{{Index: 0, Term: 0}, {Index: 1, Term: 1}, {Index: 5, Term: 1}},
+ expected: &newSnap,
+ },
+ {
+ name: "loadnewestavailable-newest-unsorted",
+ availableWalSnaps: []walpb.Snapshot{{Index: 5, Term: 1}, {Index: 1, Term: 1}, {Index: 0, Term: 0}},
+ expected: &newSnap,
+ },
+ {
+ name: "loadnewestavailable-previous",
+ availableWalSnaps: []walpb.Snapshot{{Index: 0, Term: 0}, {Index: 1, Term: 1}},
+ expected: testSnap,
+ },
}
- if !reflect.DeepEqual(g, &newSnap) {
- t.Errorf("snap = %#v, want %#v", g, &newSnap)
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ var err error
+ var g *raftpb.Snapshot
+ if tc.availableWalSnaps != nil {
+ g, err = ss.LoadNewestAvailable(tc.availableWalSnaps)
+ } else {
+ g, err = ss.Load()
+ }
+ if err != nil {
+ t.Errorf("err = %v, want nil", err)
+ }
+ if !reflect.DeepEqual(g, tc.expected) {
+ t.Errorf("snap = %#v, want %#v", g, tc.expected)
+ }
+ })
}
}
@@ -182,7 +219,7 @@ func TestNoSnapshot(t *testing.T) {
}
defer os.RemoveAll(dir)
ss := New(dir)
- _, _, err = ss.Load()
+ _, err = ss.Load()
if err != ErrNoSnapshot {
t.Errorf("err = %v, want %v", err, ErrNoSnapshot)
}
@@ -223,8 +260,47 @@ func TestAllSnapshotBroken(t *testing.T) {
}
ss := New(dir)
- _, _, err = ss.Load()
+ _, err = ss.Load()
if err != ErrNoSnapshot {
t.Errorf("err = %v, want %v", err, ErrNoSnapshot)
}
}
+
+func TestReleaseSnapDBs(t *testing.T) {
+ dir := filepath.Join(os.TempDir(), "snapshot")
+ err := os.Mkdir(dir, 0700)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ snapIndices := []uint64{100, 200, 300, 400}
+ for _, index := range snapIndices {
+ filename := filepath.Join(dir, fmt.Sprintf("%016x.snap.db", index))
+ if err := ioutil.WriteFile(filename, []byte("snap file\n"), 0644); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ ss := New(dir)
+
+ if err := ss.ReleaseSnapDBs(raftpb.Snapshot{Metadata: raftpb.SnapshotMetadata{Index: 300}}); err != nil {
+ t.Fatal(err)
+ }
+
+ deleted := []uint64{100, 200}
+ for _, index := range deleted {
+ filename := filepath.Join(dir, fmt.Sprintf("%016x.snap.db", index))
+ if fileutil.Exist(filename) {
+ t.Errorf("expected %s (index: %d) to be deleted, but it still exists", filename, index)
+ }
+ }
+
+ retained := []uint64{300, 400}
+ for _, index := range retained {
+ filename := filepath.Join(dir, fmt.Sprintf("%016x.snap.db", index))
+ if !fileutil.Exist(filename) {
+ t.Errorf("expected %s (index: %d) to be retained, but it no longer exists", filename, index)
+ }
+ }
+}
diff --git a/stats/server.go b/stats/server.go
index 2d0e37fe..177b6263 100644
--- a/stats/server.go
+++ b/stats/server.go
@@ -20,7 +20,7 @@ import (
"sync"
"time"
- "github.com/absolute8511/ZanRedisDB/raft"
+ "github.com/youzan/ZanRedisDB/raft"
)
// TransportStats encapsulates various statistics about an raft Server and its
diff --git a/syncerpb/syncer.pb.go b/syncerpb/syncer.pb.go
index cc5e2d59..5fe62455 100644
--- a/syncerpb/syncer.pb.go
+++ b/syncerpb/syncer.pb.go
@@ -1,33 +1,17 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: syncer.proto
-/*
- Package syncerpb is a generated protocol buffer package.
-
- It is generated from these files:
- syncer.proto
-
- It has these top-level messages:
- RpcErr
- RaftLogData
- RaftReqs
- RaftApplySnapReq
- RaftApplySnapStatusReq
- RaftApplySnapStatusRsp
- SyncedRaftReq
- SyncedRaftRsp
-*/
package syncerpb
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
-
-import context "golang.org/x/net/context"
-import grpc "google.golang.org/grpc"
-
-import io "io"
+import (
+ context "context"
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ grpc "google.golang.org/grpc"
+ io "io"
+ math "math"
+)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@@ -38,7 +22,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type RaftLogType int32
@@ -52,6 +36,7 @@ var RaftLogType_name = map[int32]string{
0: "EntryNormalRaw",
1: "EntrySnapshotRawKV",
}
+
var RaftLogType_value = map[string]int32{
"EntryNormalRaw": 0,
"EntrySnapshotRawKV": 1,
@@ -60,7 +45,10 @@ var RaftLogType_value = map[string]int32{
func (x RaftLogType) String() string {
return proto.EnumName(RaftLogType_name, int32(x))
}
-func (RaftLogType) EnumDescriptor() ([]byte, []int) { return fileDescriptorSyncer, []int{0} }
+
+func (RaftLogType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_9577b640f2aab197, []int{0}
+}
type RaftApplySnapStatus int32
@@ -73,6 +61,7 @@ const (
ApplyFailed RaftApplySnapStatus = 5
ApplyMissing RaftApplySnapStatus = 6
ApplyOutofdate RaftApplySnapStatus = 7
+ ApplyWaitingBegin RaftApplySnapStatus = 8
)
var RaftApplySnapStatus_name = map[int32]string{
@@ -84,7 +73,9 @@ var RaftApplySnapStatus_name = map[int32]string{
5: "ApplyFailed",
6: "ApplyMissing",
7: "ApplyOutofdate",
+ 8: "ApplyWaitingBegin",
}
+
var RaftApplySnapStatus_value = map[string]int32{
"ApplyUnknown": 0,
"ApplyWaitingTransfer": 1,
@@ -94,12 +85,16 @@ var RaftApplySnapStatus_value = map[string]int32{
"ApplyFailed": 5,
"ApplyMissing": 6,
"ApplyOutofdate": 7,
+ "ApplyWaitingBegin": 8,
}
func (x RaftApplySnapStatus) String() string {
return proto.EnumName(RaftApplySnapStatus_name, int32(x))
}
-func (RaftApplySnapStatus) EnumDescriptor() ([]byte, []int) { return fileDescriptorSyncer, []int{1} }
+
+func (RaftApplySnapStatus) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_9577b640f2aab197, []int{1}
+}
type RaftApplySnapType int32
@@ -112,6 +107,7 @@ var RaftApplySnapType_name = map[int32]string{
0: "NormalSnap",
1: "SkippedSnap",
}
+
var RaftApplySnapType_value = map[string]int32{
"NormalSnap": 0,
"SkippedSnap": 1,
@@ -120,7 +116,10 @@ var RaftApplySnapType_value = map[string]int32{
func (x RaftApplySnapType) String() string {
return proto.EnumName(RaftApplySnapType_name, int32(x))
}
-func (RaftApplySnapType) EnumDescriptor() ([]byte, []int) { return fileDescriptorSyncer, []int{2} }
+
+func (RaftApplySnapType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_9577b640f2aab197, []int{2}
+}
type RpcErr struct {
ErrType int32 `protobuf:"varint,1,opt,name=err_type,json=errType,proto3" json:"err_type,omitempty"`
@@ -128,10 +127,38 @@ type RpcErr struct {
ErrMsg string `protobuf:"bytes,3,opt,name=err_msg,json=errMsg,proto3" json:"err_msg,omitempty"`
}
-func (m *RpcErr) Reset() { *m = RpcErr{} }
-func (m *RpcErr) String() string { return proto.CompactTextString(m) }
-func (*RpcErr) ProtoMessage() {}
-func (*RpcErr) Descriptor() ([]byte, []int) { return fileDescriptorSyncer, []int{0} }
+func (m *RpcErr) Reset() { *m = RpcErr{} }
+func (m *RpcErr) String() string { return proto.CompactTextString(m) }
+func (*RpcErr) ProtoMessage() {}
+func (*RpcErr) Descriptor() ([]byte, []int) {
+ return fileDescriptor_9577b640f2aab197, []int{0}
+}
+func (m *RpcErr) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RpcErr) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_RpcErr.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *RpcErr) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RpcErr.Merge(m, src)
+}
+func (m *RpcErr) XXX_Size() int {
+ return m.Size()
+}
+func (m *RpcErr) XXX_DiscardUnknown() {
+ xxx_messageInfo_RpcErr.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RpcErr proto.InternalMessageInfo
type RaftLogData struct {
Type RaftLogType `protobuf:"varint,1,opt,name=type,proto3,enum=syncerpb.RaftLogType" json:"type,omitempty"`
@@ -148,19 +175,75 @@ type RaftLogData struct {
Data []byte `protobuf:"bytes,15,opt,name=data,proto3" json:"data,omitempty"`
}
-func (m *RaftLogData) Reset() { *m = RaftLogData{} }
-func (m *RaftLogData) String() string { return proto.CompactTextString(m) }
-func (*RaftLogData) ProtoMessage() {}
-func (*RaftLogData) Descriptor() ([]byte, []int) { return fileDescriptorSyncer, []int{1} }
+func (m *RaftLogData) Reset() { *m = RaftLogData{} }
+func (m *RaftLogData) String() string { return proto.CompactTextString(m) }
+func (*RaftLogData) ProtoMessage() {}
+func (*RaftLogData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_9577b640f2aab197, []int{1}
+}
+func (m *RaftLogData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RaftLogData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_RaftLogData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *RaftLogData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RaftLogData.Merge(m, src)
+}
+func (m *RaftLogData) XXX_Size() int {
+ return m.Size()
+}
+func (m *RaftLogData) XXX_DiscardUnknown() {
+ xxx_messageInfo_RaftLogData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RaftLogData proto.InternalMessageInfo
type RaftReqs struct {
- RaftLog []*RaftLogData `protobuf:"bytes,1,rep,name=raft_log,json=raftLog" json:"raft_log,omitempty"`
+ RaftLog []RaftLogData `protobuf:"bytes,1,rep,name=raft_log,json=raftLog,proto3" json:"raft_log"`
+}
+
+func (m *RaftReqs) Reset() { *m = RaftReqs{} }
+func (m *RaftReqs) String() string { return proto.CompactTextString(m) }
+func (*RaftReqs) ProtoMessage() {}
+func (*RaftReqs) Descriptor() ([]byte, []int) {
+ return fileDescriptor_9577b640f2aab197, []int{2}
+}
+func (m *RaftReqs) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RaftReqs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_RaftReqs.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *RaftReqs) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RaftReqs.Merge(m, src)
+}
+func (m *RaftReqs) XXX_Size() int {
+ return m.Size()
+}
+func (m *RaftReqs) XXX_DiscardUnknown() {
+ xxx_messageInfo_RaftReqs.DiscardUnknown(m)
}
-func (m *RaftReqs) Reset() { *m = RaftReqs{} }
-func (m *RaftReqs) String() string { return proto.CompactTextString(m) }
-func (*RaftReqs) ProtoMessage() {}
-func (*RaftReqs) Descriptor() ([]byte, []int) { return fileDescriptorSyncer, []int{2} }
+var xxx_messageInfo_RaftReqs proto.InternalMessageInfo
type RaftApplySnapReq struct {
Type RaftApplySnapType `protobuf:"varint,1,opt,name=type,proto3,enum=syncerpb.RaftApplySnapType" json:"type,omitempty"`
@@ -179,10 +262,38 @@ type RaftApplySnapReq struct {
Data []byte `protobuf:"bytes,9,opt,name=data,proto3" json:"data,omitempty"`
}
-func (m *RaftApplySnapReq) Reset() { *m = RaftApplySnapReq{} }
-func (m *RaftApplySnapReq) String() string { return proto.CompactTextString(m) }
-func (*RaftApplySnapReq) ProtoMessage() {}
-func (*RaftApplySnapReq) Descriptor() ([]byte, []int) { return fileDescriptorSyncer, []int{3} }
+func (m *RaftApplySnapReq) Reset() { *m = RaftApplySnapReq{} }
+func (m *RaftApplySnapReq) String() string { return proto.CompactTextString(m) }
+func (*RaftApplySnapReq) ProtoMessage() {}
+func (*RaftApplySnapReq) Descriptor() ([]byte, []int) {
+ return fileDescriptor_9577b640f2aab197, []int{3}
+}
+func (m *RaftApplySnapReq) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RaftApplySnapReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_RaftApplySnapReq.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *RaftApplySnapReq) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RaftApplySnapReq.Merge(m, src)
+}
+func (m *RaftApplySnapReq) XXX_Size() int {
+ return m.Size()
+}
+func (m *RaftApplySnapReq) XXX_DiscardUnknown() {
+ xxx_messageInfo_RaftApplySnapReq.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RaftApplySnapReq proto.InternalMessageInfo
type RaftApplySnapStatusReq struct {
// the name for source cluster, if there are multi different source clusters for syncer,
@@ -196,10 +307,38 @@ type RaftApplySnapStatusReq struct {
Index uint64 `protobuf:"varint,5,opt,name=index,proto3" json:"index,omitempty"`
}
-func (m *RaftApplySnapStatusReq) Reset() { *m = RaftApplySnapStatusReq{} }
-func (m *RaftApplySnapStatusReq) String() string { return proto.CompactTextString(m) }
-func (*RaftApplySnapStatusReq) ProtoMessage() {}
-func (*RaftApplySnapStatusReq) Descriptor() ([]byte, []int) { return fileDescriptorSyncer, []int{4} }
+func (m *RaftApplySnapStatusReq) Reset() { *m = RaftApplySnapStatusReq{} }
+func (m *RaftApplySnapStatusReq) String() string { return proto.CompactTextString(m) }
+func (*RaftApplySnapStatusReq) ProtoMessage() {}
+func (*RaftApplySnapStatusReq) Descriptor() ([]byte, []int) {
+ return fileDescriptor_9577b640f2aab197, []int{4}
+}
+func (m *RaftApplySnapStatusReq) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RaftApplySnapStatusReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_RaftApplySnapStatusReq.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *RaftApplySnapStatusReq) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RaftApplySnapStatusReq.Merge(m, src)
+}
+func (m *RaftApplySnapStatusReq) XXX_Size() int {
+ return m.Size()
+}
+func (m *RaftApplySnapStatusReq) XXX_DiscardUnknown() {
+ xxx_messageInfo_RaftApplySnapStatusReq.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RaftApplySnapStatusReq proto.InternalMessageInfo
type RaftApplySnapStatusRsp struct {
Status RaftApplySnapStatus `protobuf:"varint,1,opt,name=status,proto3,enum=syncerpb.RaftApplySnapStatus" json:"status,omitempty"`
@@ -207,20 +346,76 @@ type RaftApplySnapStatusRsp struct {
StatusMsg string `protobuf:"bytes,3,opt,name=status_msg,json=statusMsg,proto3" json:"status_msg,omitempty"`
}
-func (m *RaftApplySnapStatusRsp) Reset() { *m = RaftApplySnapStatusRsp{} }
-func (m *RaftApplySnapStatusRsp) String() string { return proto.CompactTextString(m) }
-func (*RaftApplySnapStatusRsp) ProtoMessage() {}
-func (*RaftApplySnapStatusRsp) Descriptor() ([]byte, []int) { return fileDescriptorSyncer, []int{5} }
+func (m *RaftApplySnapStatusRsp) Reset() { *m = RaftApplySnapStatusRsp{} }
+func (m *RaftApplySnapStatusRsp) String() string { return proto.CompactTextString(m) }
+func (*RaftApplySnapStatusRsp) ProtoMessage() {}
+func (*RaftApplySnapStatusRsp) Descriptor() ([]byte, []int) {
+ return fileDescriptor_9577b640f2aab197, []int{5}
+}
+func (m *RaftApplySnapStatusRsp) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RaftApplySnapStatusRsp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_RaftApplySnapStatusRsp.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *RaftApplySnapStatusRsp) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RaftApplySnapStatusRsp.Merge(m, src)
+}
+func (m *RaftApplySnapStatusRsp) XXX_Size() int {
+ return m.Size()
+}
+func (m *RaftApplySnapStatusRsp) XXX_DiscardUnknown() {
+ xxx_messageInfo_RaftApplySnapStatusRsp.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RaftApplySnapStatusRsp proto.InternalMessageInfo
type SyncedRaftReq struct {
ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
RaftGroupName string `protobuf:"bytes,2,opt,name=raft_group_name,json=raftGroupName,proto3" json:"raft_group_name,omitempty"`
}
-func (m *SyncedRaftReq) Reset() { *m = SyncedRaftReq{} }
-func (m *SyncedRaftReq) String() string { return proto.CompactTextString(m) }
-func (*SyncedRaftReq) ProtoMessage() {}
-func (*SyncedRaftReq) Descriptor() ([]byte, []int) { return fileDescriptorSyncer, []int{6} }
+func (m *SyncedRaftReq) Reset() { *m = SyncedRaftReq{} }
+func (m *SyncedRaftReq) String() string { return proto.CompactTextString(m) }
+func (*SyncedRaftReq) ProtoMessage() {}
+func (*SyncedRaftReq) Descriptor() ([]byte, []int) {
+ return fileDescriptor_9577b640f2aab197, []int{6}
+}
+func (m *SyncedRaftReq) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SyncedRaftReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_SyncedRaftReq.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *SyncedRaftReq) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SyncedRaftReq.Merge(m, src)
+}
+func (m *SyncedRaftReq) XXX_Size() int {
+ return m.Size()
+}
+func (m *SyncedRaftReq) XXX_DiscardUnknown() {
+ xxx_messageInfo_SyncedRaftReq.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SyncedRaftReq proto.InternalMessageInfo
type SyncedRaftRsp struct {
Term uint64 `protobuf:"varint,1,opt,name=term,proto3" json:"term,omitempty"`
@@ -228,12 +423,43 @@ type SyncedRaftRsp struct {
Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
}
-func (m *SyncedRaftRsp) Reset() { *m = SyncedRaftRsp{} }
-func (m *SyncedRaftRsp) String() string { return proto.CompactTextString(m) }
-func (*SyncedRaftRsp) ProtoMessage() {}
-func (*SyncedRaftRsp) Descriptor() ([]byte, []int) { return fileDescriptorSyncer, []int{7} }
+func (m *SyncedRaftRsp) Reset() { *m = SyncedRaftRsp{} }
+func (m *SyncedRaftRsp) String() string { return proto.CompactTextString(m) }
+func (*SyncedRaftRsp) ProtoMessage() {}
+func (*SyncedRaftRsp) Descriptor() ([]byte, []int) {
+ return fileDescriptor_9577b640f2aab197, []int{7}
+}
+func (m *SyncedRaftRsp) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SyncedRaftRsp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_SyncedRaftRsp.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *SyncedRaftRsp) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SyncedRaftRsp.Merge(m, src)
+}
+func (m *SyncedRaftRsp) XXX_Size() int {
+ return m.Size()
+}
+func (m *SyncedRaftRsp) XXX_DiscardUnknown() {
+ xxx_messageInfo_SyncedRaftRsp.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SyncedRaftRsp proto.InternalMessageInfo
func init() {
+ proto.RegisterEnum("syncerpb.RaftLogType", RaftLogType_name, RaftLogType_value)
+ proto.RegisterEnum("syncerpb.RaftApplySnapStatus", RaftApplySnapStatus_name, RaftApplySnapStatus_value)
+ proto.RegisterEnum("syncerpb.RaftApplySnapType", RaftApplySnapType_name, RaftApplySnapType_value)
proto.RegisterType((*RpcErr)(nil), "syncerpb.RpcErr")
proto.RegisterType((*RaftLogData)(nil), "syncerpb.RaftLogData")
proto.RegisterType((*RaftReqs)(nil), "syncerpb.RaftReqs")
@@ -242,9 +468,62 @@ func init() {
proto.RegisterType((*RaftApplySnapStatusRsp)(nil), "syncerpb.RaftApplySnapStatusRsp")
proto.RegisterType((*SyncedRaftReq)(nil), "syncerpb.SyncedRaftReq")
proto.RegisterType((*SyncedRaftRsp)(nil), "syncerpb.SyncedRaftRsp")
- proto.RegisterEnum("syncerpb.RaftLogType", RaftLogType_name, RaftLogType_value)
- proto.RegisterEnum("syncerpb.RaftApplySnapStatus", RaftApplySnapStatus_name, RaftApplySnapStatus_value)
- proto.RegisterEnum("syncerpb.RaftApplySnapType", RaftApplySnapType_name, RaftApplySnapType_value)
+}
+
+func init() { proto.RegisterFile("syncer.proto", fileDescriptor_9577b640f2aab197) }
+
+var fileDescriptor_9577b640f2aab197 = []byte{
+ // 800 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x55, 0xdd, 0x6e, 0x23, 0x35,
+ 0x14, 0x1e, 0xe7, 0x3f, 0x27, 0x7f, 0xb3, 0xa6, 0xbb, 0x3b, 0x64, 0xd9, 0x61, 0x18, 0x09, 0x14,
+ 0x7a, 0xd1, 0x95, 0x96, 0x3f, 0x71, 0x99, 0x64, 0x97, 0x0a, 0xc1, 0x96, 0xd5, 0xa4, 0x50, 0xa9,
+ 0x37, 0x91, 0x9b, 0x71, 0xa6, 0xa3, 0x26, 0x63, 0xd7, 0x76, 0x54, 0xf2, 0x0a, 0x48, 0x48, 0xbc,
+ 0x08, 0xef, 0xd1, 0x1b, 0xa4, 0x5e, 0x72, 0x85, 0x68, 0x7b, 0xc3, 0x03, 0xf0, 0x00, 0xc8, 0x9e,
+ 0x64, 0x92, 0xa8, 0x29, 0x3f, 0x57, 0x68, 0xef, 0xec, 0xef, 0x7c, 0xfe, 0x8e, 0xcf, 0xe7, 0x63,
+ 0x1b, 0xea, 0x72, 0x9e, 0x8c, 0xa8, 0xd8, 0xe3, 0x82, 0x29, 0x86, 0x2b, 0xe9, 0x8c, 0x9f, 0xb4,
+ 0x77, 0x22, 0x16, 0x31, 0x03, 0x3e, 0xd3, 0xa3, 0x34, 0xee, 0x1f, 0x41, 0x29, 0xe0, 0xa3, 0x97,
+ 0x42, 0xe0, 0xb7, 0xa1, 0x42, 0x85, 0x18, 0xaa, 0x39, 0xa7, 0x0e, 0xf2, 0x50, 0xa7, 0x18, 0x94,
+ 0xa9, 0x10, 0x87, 0x73, 0x4e, 0x97, 0xa1, 0x11, 0x0b, 0xa9, 0x93, 0xcb, 0x42, 0x7d, 0x16, 0x52,
+ 0xfc, 0x18, 0xf4, 0x70, 0x38, 0x95, 0x91, 0x93, 0xf7, 0x50, 0xa7, 0x1a, 0x94, 0xa8, 0x10, 0xaf,
+ 0x64, 0xe4, 0xff, 0x81, 0xa0, 0x16, 0x90, 0xb1, 0xfa, 0x9a, 0x45, 0x2f, 0x88, 0x22, 0xf8, 0x43,
+ 0x28, 0x64, 0xd2, 0xcd, 0xe7, 0x0f, 0xf7, 0x96, 0xfb, 0xda, 0x5b, 0x90, 0x74, 0xa2, 0xc0, 0x50,
+ 0xf0, 0x7b, 0x50, 0x1f, 0x4d, 0x66, 0x52, 0x51, 0x31, 0x4c, 0xc8, 0x34, 0x4d, 0x59, 0x0d, 0x6a,
+ 0x0b, 0xec, 0x80, 0x4c, 0x29, 0xfe, 0x00, 0x5a, 0x82, 0x8c, 0xd5, 0x30, 0x12, 0x6c, 0xc6, 0x53,
+ 0x56, 0x9a, 0xbe, 0xa1, 0xe1, 0x7d, 0x8d, 0x1a, 0x1e, 0x86, 0x82, 0xa2, 0x62, 0xea, 0x14, 0x3c,
+ 0xd4, 0x29, 0x04, 0x66, 0x8c, 0x77, 0xa0, 0x18, 0x27, 0x21, 0xfd, 0xde, 0x29, 0x1a, 0x30, 0x9d,
+ 0xe0, 0xf7, 0xa1, 0x69, 0x14, 0x55, 0x3c, 0xa5, 0x52, 0x91, 0x29, 0x77, 0x4a, 0x1e, 0xea, 0xe4,
+ 0x53, 0xc1, 0xc3, 0x25, 0xa8, 0x05, 0x43, 0xa2, 0x88, 0xd3, 0xf2, 0x50, 0xa7, 0x1e, 0x98, 0xb1,
+ 0xdf, 0x83, 0x8a, 0x2e, 0x22, 0xa0, 0xe7, 0x12, 0x7f, 0x0a, 0x15, 0x23, 0x33, 0x61, 0x91, 0x83,
+ 0xbc, 0x7c, 0xa7, 0xb6, 0xa5, 0x54, 0xed, 0x47, 0xaf, 0x70, 0xf9, 0xdb, 0xbb, 0x56, 0x50, 0x16,
+ 0x29, 0xe4, 0xff, 0x9c, 0x03, 0x5b, 0x87, 0xbb, 0x9c, 0x4f, 0xe6, 0x83, 0x84, 0xf0, 0x80, 0x9e,
+ 0xe3, 0x67, 0x1b, 0x9e, 0x3d, 0xd9, 0x14, 0xca, 0x98, 0x6f, 0x88, 0x73, 0x4f, 0xa0, 0xaa, 0xf7,
+ 0x3f, 0x24, 0x61, 0x28, 0x9c, 0xb2, 0x49, 0x69, 0x9a, 0xb3, 0x1b, 0x86, 0x22, 0x0b, 0x72, 0xa2,
+ 0x4e, 0x9d, 0xca, 0x2a, 0xf8, 0x9a, 0xa8, 0xd3, 0xcc, 0xf3, 0xea, 0x9a, 0xe7, 0x3f, 0x22, 0x78,
+ 0xb4, 0xe1, 0xc2, 0x40, 0x11, 0x35, 0x93, 0xda, 0xb5, 0xff, 0xc3, 0x04, 0xff, 0x87, 0x7b, 0xf6,
+ 0x23, 0x39, 0xfe, 0x04, 0x4a, 0xd2, 0x4c, 0x16, 0xe7, 0xf8, 0xf4, 0x9e, 0x73, 0x5c, 0xac, 0x58,
+ 0x90, 0x71, 0x1b, 0x2a, 0x5c, 0xb0, 0x48, 0x50, 0x29, 0x4d, 0x09, 0x85, 0x20, 0x9b, 0xe3, 0xa7,
+ 0x00, 0x29, 0x6b, 0xed, 0xe2, 0x55, 0x53, 0x44, 0xdf, 0xbd, 0x63, 0x68, 0x0c, 0x74, 0x8a, 0x70,
+ 0xd1, 0x96, 0x77, 0x2c, 0x41, 0xff, 0xca, 0x92, 0xdc, 0x16, 0x4b, 0xfc, 0xa3, 0x0d, 0x6d, 0xc9,
+ 0x33, 0x8f, 0xd0, 0x36, 0x8f, 0x72, 0xeb, 0x8d, 0xf2, 0x0e, 0x54, 0x57, 0x3d, 0x92, 0x37, 0x3d,
+ 0xb2, 0x02, 0x76, 0x3f, 0xcf, 0xde, 0x0b, 0xf3, 0xe6, 0x60, 0x68, 0xbe, 0x4c, 0x94, 0x98, 0x1f,
+ 0x30, 0x31, 0x25, 0x93, 0x80, 0x5c, 0xd8, 0x16, 0x7e, 0x04, 0xd8, 0x60, 0xda, 0x2d, 0x79, 0xca,
+ 0x54, 0x40, 0x2e, 0xbe, 0xfa, 0xce, 0x46, 0xbb, 0xbf, 0x20, 0x78, 0x6b, 0x8b, 0x95, 0xd8, 0x86,
+ 0xba, 0x81, 0xbe, 0x4d, 0xce, 0x12, 0x76, 0x91, 0xd8, 0x16, 0x76, 0x60, 0xc7, 0x20, 0x47, 0x24,
+ 0x56, 0x71, 0x12, 0x1d, 0x0a, 0x92, 0xc8, 0x31, 0x15, 0x36, 0xca, 0x22, 0x4b, 0x68, 0x30, 0x1b,
+ 0x8d, 0xa8, 0x94, 0x76, 0x2e, 0x53, 0x59, 0xac, 0xb1, 0xf3, 0x19, 0xb2, 0xe4, 0x14, 0x70, 0x0b,
+ 0x6a, 0x06, 0xf9, 0x82, 0xc4, 0x13, 0x1a, 0xda, 0xc5, 0x8c, 0xf2, 0x2a, 0x96, 0x52, 0x2f, 0x2a,
+ 0xe9, 0x82, 0x0c, 0xf2, 0xcd, 0x4c, 0xb1, 0x71, 0x48, 0x14, 0xb5, 0xcb, 0xf8, 0x21, 0x3c, 0x58,
+ 0x97, 0xee, 0xd1, 0x28, 0x4e, 0xec, 0xca, 0xee, 0xc7, 0xf0, 0xe0, 0xce, 0x0d, 0xc7, 0x4d, 0x80,
+ 0xd4, 0x0b, 0x8d, 0xd8, 0x96, 0x4e, 0x39, 0x38, 0x8b, 0x39, 0xa7, 0xa1, 0x01, 0xd0, 0xf3, 0x3f,
+ 0x73, 0xd0, 0xea, 0x0b, 0x26, 0x65, 0x3f, 0x3d, 0xd6, 0xee, 0xeb, 0x2f, 0xf1, 0x67, 0xd0, 0x30,
+ 0x2a, 0xd9, 0xfb, 0x84, 0x37, 0x9b, 0x4f, 0x63, 0x6d, 0x7b, 0x0d, 0x33, 0x7f, 0x81, 0x6f, 0xe1,
+ 0x3e, 0x34, 0xf6, 0xa9, 0x5a, 0x9d, 0x34, 0x7e, 0xbc, 0x22, 0x6d, 0xf4, 0x56, 0x7b, 0x7b, 0x40,
+ 0x72, 0xdf, 0xc2, 0x2f, 0x00, 0x1f, 0x30, 0x15, 0x8f, 0x57, 0xa6, 0x26, 0x84, 0xe3, 0xf6, 0x3d,
+ 0xfd, 0xaf, 0xc5, 0xb6, 0x6d, 0xa5, 0x0b, 0xad, 0x54, 0x25, 0x63, 0xfe, 0x67, 0x89, 0x63, 0xc0,
+ 0xfb, 0xf4, 0x4e, 0x7b, 0x78, 0x7f, 0x7f, 0x11, 0xe9, 0x79, 0xfb, 0x1f, 0x18, 0xba, 0xc8, 0x9e,
+ 0x77, 0x79, 0xed, 0x5a, 0x57, 0xd7, 0xae, 0x75, 0x79, 0xe3, 0xa2, 0xab, 0x1b, 0x17, 0xfd, 0x7e,
+ 0xe3, 0xa2, 0x9f, 0x6e, 0x5d, 0xeb, 0xea, 0xd6, 0xb5, 0x7e, 0xbd, 0x75, 0xad, 0x93, 0x92, 0xf9,
+ 0x6a, 0x3f, 0xfa, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x13, 0xd9, 0x12, 0x33, 0x9a, 0x07, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -255,8 +534,9 @@ var _ grpc.ClientConn
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
-// Client API for CrossClusterAPI service
-
+// CrossClusterAPIClient is the client API for CrossClusterAPI service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type CrossClusterAPIClient interface {
ApplyRaftReqs(ctx context.Context, in *RaftReqs, opts ...grpc.CallOption) (*RpcErr, error)
GetSyncedRaft(ctx context.Context, in *SyncedRaftReq, opts ...grpc.CallOption) (*SyncedRaftRsp, error)
@@ -275,7 +555,7 @@ func NewCrossClusterAPIClient(cc *grpc.ClientConn) CrossClusterAPIClient {
func (c *crossClusterAPIClient) ApplyRaftReqs(ctx context.Context, in *RaftReqs, opts ...grpc.CallOption) (*RpcErr, error) {
out := new(RpcErr)
- err := grpc.Invoke(ctx, "/syncerpb.CrossClusterAPI/ApplyRaftReqs", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/syncerpb.CrossClusterAPI/ApplyRaftReqs", in, out, opts...)
if err != nil {
return nil, err
}
@@ -284,7 +564,7 @@ func (c *crossClusterAPIClient) ApplyRaftReqs(ctx context.Context, in *RaftReqs,
func (c *crossClusterAPIClient) GetSyncedRaft(ctx context.Context, in *SyncedRaftReq, opts ...grpc.CallOption) (*SyncedRaftRsp, error) {
out := new(SyncedRaftRsp)
- err := grpc.Invoke(ctx, "/syncerpb.CrossClusterAPI/GetSyncedRaft", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/syncerpb.CrossClusterAPI/GetSyncedRaft", in, out, opts...)
if err != nil {
return nil, err
}
@@ -293,7 +573,7 @@ func (c *crossClusterAPIClient) GetSyncedRaft(ctx context.Context, in *SyncedRaf
func (c *crossClusterAPIClient) NotifyTransferSnap(ctx context.Context, in *RaftApplySnapReq, opts ...grpc.CallOption) (*RpcErr, error) {
out := new(RpcErr)
- err := grpc.Invoke(ctx, "/syncerpb.CrossClusterAPI/NotifyTransferSnap", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/syncerpb.CrossClusterAPI/NotifyTransferSnap", in, out, opts...)
if err != nil {
return nil, err
}
@@ -302,7 +582,7 @@ func (c *crossClusterAPIClient) NotifyTransferSnap(ctx context.Context, in *Raft
func (c *crossClusterAPIClient) NotifyApplySnap(ctx context.Context, in *RaftApplySnapReq, opts ...grpc.CallOption) (*RpcErr, error) {
out := new(RpcErr)
- err := grpc.Invoke(ctx, "/syncerpb.CrossClusterAPI/NotifyApplySnap", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/syncerpb.CrossClusterAPI/NotifyApplySnap", in, out, opts...)
if err != nil {
return nil, err
}
@@ -311,15 +591,14 @@ func (c *crossClusterAPIClient) NotifyApplySnap(ctx context.Context, in *RaftApp
func (c *crossClusterAPIClient) GetApplySnapStatus(ctx context.Context, in *RaftApplySnapStatusReq, opts ...grpc.CallOption) (*RaftApplySnapStatusRsp, error) {
out := new(RaftApplySnapStatusRsp)
- err := grpc.Invoke(ctx, "/syncerpb.CrossClusterAPI/GetApplySnapStatus", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/syncerpb.CrossClusterAPI/GetApplySnapStatus", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-// Server API for CrossClusterAPI service
-
+// CrossClusterAPIServer is the server API for CrossClusterAPI service.
type CrossClusterAPIServer interface {
ApplyRaftReqs(context.Context, *RaftReqs) (*RpcErr, error)
GetSyncedRaft(context.Context, *SyncedRaftReq) (*SyncedRaftRsp, error)
@@ -786,6 +1065,9 @@ func encodeVarintSyncer(dAtA []byte, offset int, v uint64) int {
return offset + 1
}
func (m *RpcErr) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
if m.ErrType != 0 {
@@ -802,6 +1084,9 @@ func (m *RpcErr) Size() (n int) {
}
func (m *RaftLogData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
if m.Type != 0 {
@@ -832,6 +1117,9 @@ func (m *RaftLogData) Size() (n int) {
}
func (m *RaftReqs) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
if len(m.RaftLog) > 0 {
@@ -844,6 +1132,9 @@ func (m *RaftReqs) Size() (n int) {
}
func (m *RaftApplySnapReq) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
if m.Type != 0 {
@@ -882,6 +1173,9 @@ func (m *RaftApplySnapReq) Size() (n int) {
}
func (m *RaftApplySnapStatusReq) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
l = len(m.ClusterName)
@@ -902,6 +1196,9 @@ func (m *RaftApplySnapStatusReq) Size() (n int) {
}
func (m *RaftApplySnapStatusRsp) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
if m.Status != 0 {
@@ -918,6 +1215,9 @@ func (m *RaftApplySnapStatusRsp) Size() (n int) {
}
func (m *SyncedRaftReq) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
l = len(m.ClusterName)
@@ -932,6 +1232,9 @@ func (m *SyncedRaftReq) Size() (n int) {
}
func (m *SyncedRaftRsp) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
if m.Term != 0 {
@@ -974,7 +1277,7 @@ func (m *RpcErr) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1002,7 +1305,7 @@ func (m *RpcErr) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.ErrType |= (int32(b) & 0x7F) << shift
+ m.ErrType |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1021,7 +1324,7 @@ func (m *RpcErr) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.ErrCode |= (int32(b) & 0x7F) << shift
+ m.ErrCode |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1040,7 +1343,7 @@ func (m *RpcErr) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1050,6 +1353,9 @@ func (m *RpcErr) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthSyncer
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSyncer
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -1064,6 +1370,9 @@ func (m *RpcErr) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthSyncer
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthSyncer
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -1091,7 +1400,7 @@ func (m *RaftLogData) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1119,7 +1428,7 @@ func (m *RaftLogData) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Type |= (RaftLogType(b) & 0x7F) << shift
+ m.Type |= RaftLogType(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1138,7 +1447,7 @@ func (m *RaftLogData) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1148,6 +1457,9 @@ func (m *RaftLogData) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthSyncer
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSyncer
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -1167,7 +1479,7 @@ func (m *RaftLogData) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1177,6 +1489,9 @@ func (m *RaftLogData) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthSyncer
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSyncer
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -1196,7 +1511,7 @@ func (m *RaftLogData) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Term |= (uint64(b) & 0x7F) << shift
+ m.Term |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1215,7 +1530,7 @@ func (m *RaftLogData) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Index |= (uint64(b) & 0x7F) << shift
+ m.Index |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1234,7 +1549,7 @@ func (m *RaftLogData) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.RaftTimestamp |= (int64(b) & 0x7F) << shift
+ m.RaftTimestamp |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1253,7 +1568,7 @@ func (m *RaftLogData) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= (int(b) & 0x7F) << shift
+ byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1262,6 +1577,9 @@ func (m *RaftLogData) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthSyncer
}
postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSyncer
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -1279,6 +1597,9 @@ func (m *RaftLogData) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthSyncer
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthSyncer
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -1306,7 +1627,7 @@ func (m *RaftReqs) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1334,7 +1655,7 @@ func (m *RaftReqs) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1343,10 +1664,13 @@ func (m *RaftReqs) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthSyncer
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthSyncer
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.RaftLog = append(m.RaftLog, &RaftLogData{})
+ m.RaftLog = append(m.RaftLog, RaftLogData{})
if err := m.RaftLog[len(m.RaftLog)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
@@ -1360,6 +1684,9 @@ func (m *RaftReqs) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthSyncer
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthSyncer
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -1387,7 +1714,7 @@ func (m *RaftApplySnapReq) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1415,7 +1742,7 @@ func (m *RaftApplySnapReq) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Type |= (RaftApplySnapType(b) & 0x7F) << shift
+ m.Type |= RaftApplySnapType(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1434,7 +1761,7 @@ func (m *RaftApplySnapReq) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1444,6 +1771,9 @@ func (m *RaftApplySnapReq) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthSyncer
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSyncer
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -1463,7 +1793,7 @@ func (m *RaftApplySnapReq) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1473,6 +1803,9 @@ func (m *RaftApplySnapReq) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthSyncer
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSyncer
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -1492,7 +1825,7 @@ func (m *RaftApplySnapReq) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Term |= (uint64(b) & 0x7F) << shift
+ m.Term |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1511,7 +1844,7 @@ func (m *RaftApplySnapReq) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Index |= (uint64(b) & 0x7F) << shift
+ m.Index |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1530,7 +1863,7 @@ func (m *RaftApplySnapReq) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.RaftTimestamp |= (int64(b) & 0x7F) << shift
+ m.RaftTimestamp |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1549,7 +1882,7 @@ func (m *RaftApplySnapReq) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1559,6 +1892,9 @@ func (m *RaftApplySnapReq) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthSyncer
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSyncer
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -1578,7 +1914,7 @@ func (m *RaftApplySnapReq) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1588,6 +1924,9 @@ func (m *RaftApplySnapReq) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthSyncer
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSyncer
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -1607,7 +1946,7 @@ func (m *RaftApplySnapReq) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= (int(b) & 0x7F) << shift
+ byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1616,6 +1955,9 @@ func (m *RaftApplySnapReq) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthSyncer
}
postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSyncer
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -1633,6 +1975,9 @@ func (m *RaftApplySnapReq) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthSyncer
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthSyncer
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -1660,7 +2005,7 @@ func (m *RaftApplySnapStatusReq) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1688,7 +2033,7 @@ func (m *RaftApplySnapStatusReq) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1698,6 +2043,9 @@ func (m *RaftApplySnapStatusReq) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthSyncer
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSyncer
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -1717,7 +2065,7 @@ func (m *RaftApplySnapStatusReq) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1727,6 +2075,9 @@ func (m *RaftApplySnapStatusReq) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthSyncer
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSyncer
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -1746,7 +2097,7 @@ func (m *RaftApplySnapStatusReq) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Term |= (uint64(b) & 0x7F) << shift
+ m.Term |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1765,7 +2116,7 @@ func (m *RaftApplySnapStatusReq) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Index |= (uint64(b) & 0x7F) << shift
+ m.Index |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1779,6 +2130,9 @@ func (m *RaftApplySnapStatusReq) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthSyncer
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthSyncer
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -1806,7 +2160,7 @@ func (m *RaftApplySnapStatusRsp) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1834,7 +2188,7 @@ func (m *RaftApplySnapStatusRsp) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Status |= (RaftApplySnapStatus(b) & 0x7F) << shift
+ m.Status |= RaftApplySnapStatus(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1853,7 +2207,7 @@ func (m *RaftApplySnapStatusRsp) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Progress |= (uint64(b) & 0x7F) << shift
+ m.Progress |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1872,7 +2226,7 @@ func (m *RaftApplySnapStatusRsp) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1882,6 +2236,9 @@ func (m *RaftApplySnapStatusRsp) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthSyncer
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSyncer
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -1896,6 +2253,9 @@ func (m *RaftApplySnapStatusRsp) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthSyncer
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthSyncer
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -1923,7 +2283,7 @@ func (m *SyncedRaftReq) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1951,7 +2311,7 @@ func (m *SyncedRaftReq) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1961,6 +2321,9 @@ func (m *SyncedRaftReq) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthSyncer
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSyncer
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -1980,7 +2343,7 @@ func (m *SyncedRaftReq) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1990,6 +2353,9 @@ func (m *SyncedRaftReq) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthSyncer
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSyncer
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -2004,6 +2370,9 @@ func (m *SyncedRaftReq) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthSyncer
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthSyncer
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -2031,7 +2400,7 @@ func (m *SyncedRaftRsp) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -2059,7 +2428,7 @@ func (m *SyncedRaftRsp) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Term |= (uint64(b) & 0x7F) << shift
+ m.Term |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -2078,7 +2447,7 @@ func (m *SyncedRaftRsp) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Index |= (uint64(b) & 0x7F) << shift
+ m.Index |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -2097,7 +2466,7 @@ func (m *SyncedRaftRsp) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Timestamp |= (int64(b) & 0x7F) << shift
+ m.Timestamp |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -2111,6 +2480,9 @@ func (m *SyncedRaftRsp) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthSyncer
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthSyncer
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -2177,10 +2549,13 @@ func skipSyncer(dAtA []byte) (n int, err error) {
break
}
}
- iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthSyncer
}
+ iNdEx += length
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthSyncer
+ }
return iNdEx, nil
case 3:
for {
@@ -2209,6 +2584,9 @@ func skipSyncer(dAtA []byte) (n int, err error) {
return 0, err
}
iNdEx = start + next
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthSyncer
+ }
}
return iNdEx, nil
case 4:
@@ -2227,58 +2605,3 @@ var (
ErrInvalidLengthSyncer = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowSyncer = fmt.Errorf("proto: integer overflow")
)
-
-func init() { proto.RegisterFile("syncer.proto", fileDescriptorSyncer) }
-
-var fileDescriptorSyncer = []byte{
- // 776 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x55, 0xcd, 0x6e, 0x2b, 0x35,
- 0x14, 0x1e, 0xe7, 0x3f, 0x27, 0x7f, 0x83, 0x29, 0xed, 0x90, 0xd2, 0x28, 0x8c, 0x04, 0x0a, 0x5d,
- 0xb4, 0xa8, 0x80, 0x10, 0x12, 0x9b, 0x90, 0x96, 0x0a, 0x41, 0x4b, 0x35, 0x29, 0x54, 0xea, 0x26,
- 0x72, 0x33, 0xce, 0x74, 0xd4, 0x64, 0xec, 0xda, 0x8e, 0x4a, 0x5e, 0x01, 0x89, 0x3d, 0x2f, 0xc1,
- 0x96, 0x67, 0xe8, 0xb2, 0x8f, 0x70, 0xdb, 0xbb, 0xb9, 0x0f, 0x70, 0x1f, 0xe0, 0xca, 0x9e, 0x64,
- 0x92, 0xa8, 0xe9, 0xfd, 0x59, 0x5d, 0xdd, 0x9d, 0xcf, 0x77, 0xbe, 0xf9, 0x8e, 0xcf, 0xe7, 0x63,
- 0x0f, 0x94, 0xe5, 0x24, 0xea, 0x53, 0xb1, 0xc3, 0x05, 0x53, 0x0c, 0x17, 0xe2, 0x88, 0x5f, 0xd4,
- 0xd7, 0x02, 0x16, 0x30, 0x03, 0xee, 0xea, 0x55, 0x9c, 0x77, 0xcf, 0x20, 0xe7, 0xf1, 0xfe, 0x81,
- 0x10, 0xf8, 0x53, 0x28, 0x50, 0x21, 0x7a, 0x6a, 0xc2, 0xa9, 0x83, 0x9a, 0xa8, 0x95, 0xf5, 0xf2,
- 0x54, 0x88, 0xd3, 0x09, 0xa7, 0xb3, 0x54, 0x9f, 0xf9, 0xd4, 0x49, 0x25, 0xa9, 0x0e, 0xf3, 0x29,
- 0xde, 0x00, 0xbd, 0xec, 0x8d, 0x64, 0xe0, 0xa4, 0x9b, 0xa8, 0x55, 0xf4, 0x72, 0x54, 0x88, 0x23,
- 0x19, 0xb8, 0x2f, 0x10, 0x94, 0x3c, 0x32, 0x50, 0xbf, 0xb1, 0x60, 0x9f, 0x28, 0x82, 0xbf, 0x82,
- 0x4c, 0x22, 0x5d, 0xdd, 0xfb, 0x64, 0x67, 0xb6, 0xaf, 0x9d, 0x29, 0x49, 0x17, 0xf2, 0x0c, 0x05,
- 0x7f, 0x0e, 0xe5, 0xfe, 0x70, 0x2c, 0x15, 0x15, 0xbd, 0x88, 0x8c, 0xe2, 0x92, 0x45, 0xaf, 0x34,
- 0xc5, 0x8e, 0xc9, 0x88, 0xe2, 0x2f, 0xa1, 0x26, 0xc8, 0x40, 0xf5, 0x02, 0xc1, 0xc6, 0x3c, 0x66,
- 0xc5, 0xe5, 0x2b, 0x1a, 0x3e, 0xd4, 0xa8, 0xe1, 0x61, 0xc8, 0x28, 0x2a, 0x46, 0x4e, 0xa6, 0x89,
- 0x5a, 0x19, 0xcf, 0xac, 0xf1, 0x1a, 0x64, 0xc3, 0xc8, 0xa7, 0x7f, 0x39, 0x59, 0x03, 0xc6, 0x01,
- 0xfe, 0x02, 0xaa, 0x46, 0x51, 0x85, 0x23, 0x2a, 0x15, 0x19, 0x71, 0x27, 0xd7, 0x44, 0xad, 0x74,
- 0x2c, 0x78, 0x3a, 0x03, 0xb5, 0xa0, 0x4f, 0x14, 0x71, 0x6a, 0x4d, 0xd4, 0x2a, 0x7b, 0x66, 0xed,
- 0xfe, 0x08, 0x05, 0xdd, 0x84, 0x47, 0xaf, 0x25, 0xfe, 0x1a, 0x0a, 0x46, 0x66, 0xc8, 0x02, 0x07,
- 0x35, 0xd3, 0xad, 0xd2, 0x8a, 0x56, 0xb5, 0x1f, 0x5e, 0x5e, 0xc4, 0x81, 0xfb, 0x5f, 0x0a, 0x6c,
- 0x9d, 0x68, 0x73, 0x3e, 0x9c, 0x74, 0x23, 0xc2, 0x3d, 0x7a, 0x8d, 0x77, 0x97, 0xdc, 0xda, 0x5c,
- 0x96, 0x48, 0x98, 0x1f, 0x88, 0x67, 0x9b, 0x50, 0xd4, 0xfb, 0xef, 0x11, 0xdf, 0x17, 0x4e, 0xde,
- 0x94, 0x34, 0x63, 0xd9, 0xf6, 0x7d, 0x91, 0x24, 0x39, 0x51, 0x97, 0x4e, 0x61, 0x9e, 0x3c, 0x21,
- 0xea, 0x32, 0x71, 0xbb, 0xb8, 0xe0, 0xf6, 0x3f, 0x08, 0xd6, 0x97, 0x5c, 0xe8, 0x2a, 0xa2, 0xc6,
- 0x52, 0xbb, 0xf6, 0x3e, 0x4c, 0x70, 0xff, 0x7e, 0x62, 0x3f, 0x92, 0xe3, 0xef, 0x20, 0x27, 0x4d,
- 0x30, 0x3d, 0xc7, 0xad, 0x27, 0xce, 0x71, 0xfa, 0xc5, 0x94, 0x8c, 0xeb, 0x50, 0xe0, 0x82, 0x05,
- 0x82, 0x4a, 0x69, 0x5a, 0xc8, 0x78, 0x49, 0x8c, 0xb7, 0x00, 0x62, 0xd6, 0xc2, 0x95, 0x2b, 0xc6,
- 0x88, 0xbe, 0x75, 0xe7, 0x50, 0xe9, 0xea, 0x12, 0xfe, 0x74, 0x20, 0x1f, 0x59, 0x82, 0xde, 0xca,
- 0x92, 0xd4, 0x0a, 0x4b, 0xdc, 0xb3, 0x25, 0x6d, 0xc9, 0x13, 0x8f, 0xd0, 0x2a, 0x8f, 0x52, 0x8b,
- 0x83, 0xf2, 0x19, 0x14, 0xe7, 0x33, 0x92, 0x36, 0x33, 0x32, 0x07, 0xb6, 0x7f, 0x48, 0x5e, 0x0a,
- 0xf3, 0xda, 0x60, 0xa8, 0x1e, 0x44, 0x4a, 0x4c, 0x8e, 0x99, 0x18, 0x91, 0xa1, 0x47, 0x6e, 0x6c,
- 0x0b, 0xaf, 0x03, 0x36, 0x98, 0x76, 0x4b, 0x5e, 0x32, 0xe5, 0x91, 0x9b, 0x5f, 0xff, 0xb4, 0xd1,
- 0xf6, 0xff, 0x08, 0x3e, 0x5e, 0x61, 0x25, 0xb6, 0xa1, 0x6c, 0xa0, 0x3f, 0xa2, 0xab, 0x88, 0xdd,
- 0x44, 0xb6, 0x85, 0x1d, 0x58, 0x33, 0xc8, 0x19, 0x09, 0x55, 0x18, 0x05, 0xa7, 0x82, 0x44, 0x72,
- 0x40, 0x85, 0x8d, 0x92, 0xcc, 0x0c, 0xea, 0x8e, 0xfb, 0x7d, 0x2a, 0xa5, 0x9d, 0x4a, 0x54, 0xa6,
- 0xdf, 0xd8, 0xe9, 0x04, 0x99, 0x71, 0x32, 0xb8, 0x06, 0x25, 0x83, 0xfc, 0x4c, 0xc2, 0x21, 0xf5,
- 0xed, 0x6c, 0x42, 0x39, 0x0a, 0xa5, 0xd4, 0x1f, 0xe5, 0x74, 0x43, 0x06, 0xf9, 0x7d, 0xac, 0xd8,
- 0xc0, 0x27, 0x8a, 0xda, 0xf9, 0xed, 0x6f, 0xe1, 0xa3, 0x47, 0x57, 0x19, 0x57, 0x01, 0xe2, 0xa6,
- 0x35, 0x62, 0x5b, 0x5a, 0xbb, 0x7b, 0x15, 0x72, 0x4e, 0x7d, 0x03, 0xa0, 0xbd, 0x97, 0x29, 0xa8,
- 0x75, 0x04, 0x93, 0xb2, 0x13, 0x9f, 0x5f, 0xfb, 0xe4, 0x17, 0xfc, 0x3d, 0x54, 0x8c, 0x4a, 0xf2,
- 0x04, 0xe1, 0xe5, 0x29, 0xd3, 0x58, 0xdd, 0x5e, 0xc0, 0xcc, 0x73, 0xef, 0x5a, 0xb8, 0x03, 0x95,
- 0x43, 0xaa, 0xe6, 0x47, 0x8a, 0x37, 0xe6, 0xa4, 0xa5, 0x21, 0xaa, 0xaf, 0x4e, 0x48, 0xee, 0x5a,
- 0x78, 0x1f, 0xf0, 0x31, 0x53, 0xe1, 0x60, 0xee, 0x5e, 0x44, 0x38, 0xae, 0x3f, 0x31, 0xe8, 0x5a,
- 0x6c, 0xd5, 0x56, 0xda, 0x50, 0x8b, 0x55, 0x12, 0xe6, 0x3b, 0x4b, 0x9c, 0x03, 0x3e, 0xa4, 0x8f,
- 0xe6, 0xa0, 0xf9, 0xfa, 0x1b, 0x47, 0xaf, 0xeb, 0x6f, 0x60, 0xe8, 0x26, 0x7f, 0x72, 0x6e, 0xef,
- 0x1b, 0xd6, 0xdd, 0x7d, 0xc3, 0xba, 0x7d, 0x68, 0xa0, 0xbb, 0x87, 0x06, 0x7a, 0xf6, 0xd0, 0x40,
- 0xff, 0x3e, 0x6f, 0x58, 0x17, 0x39, 0xf3, 0x17, 0xfd, 0xe6, 0x55, 0x00, 0x00, 0x00, 0xff, 0xff,
- 0xd1, 0x15, 0x01, 0x7f, 0x75, 0x07, 0x00, 0x00,
-}
diff --git a/syncerpb/syncer.proto b/syncerpb/syncer.proto
index 2ea4b5c7..6f328bc7 100644
--- a/syncerpb/syncer.proto
+++ b/syncerpb/syncer.proto
@@ -38,6 +38,7 @@ enum RaftApplySnapStatus {
ApplyFailed = 5;
ApplyMissing = 6;
ApplyOutofdate = 7;
+ ApplyWaitingBegin = 8;
}
message RaftLogData {
@@ -56,7 +57,7 @@ message RaftLogData {
}
message RaftReqs {
- repeated RaftLogData raft_log = 1;
+ repeated RaftLogData raft_log = 1 [(gogoproto.nullable) = false];
}
enum RaftApplySnapType {
diff --git a/test.sh b/test.sh
index a152adbe..0f603382 100755
--- a/test.sh
+++ b/test.sh
@@ -2,21 +2,31 @@
set -e
echo "" > coverage.txt
echo $TEST_RACE
+os=$(go env GOOS)
if [ "$TEST_PD" = "true" ]; then
- TESTDIRS=`go list ./... | grep -v vendor`
+ TESTDIRS=$(go list ./... | grep -v vendor)
else
- TESTDIRS=`go list ./... | grep -v pdserver | grep -v vendor`
+ TESTDIRS=$(go list ./... | grep -v pdserver | grep -v vendor)
fi
echo $TESTDIRS
+CGO_CFLAGS="-I${ROCKSDB}/include"
+CGO_LDFLAGS="-L${ROCKSDB} -lrocksdb -lstdc++ -lm -lsnappy -ljemalloc"
+
+if [ "$os" == "linux" ]; then
+ CGO_LDFLAGS="-L${ROCKSDB} -lrocksdb -lstdc++ -lm -lsnappy -lrt -ljemalloc -ldl"
+fi
+
+echo $CGO_LDFLAGS
+
if [ "$TEST_RACE" = "false" ]; then
- GOMAXPROCS=1 go test -timeout 900s $TESTDIRS
+ GOMAXPROCS=1 CGO_CFLAGS=${CGO_CFLAGS} CGO_LDFLAGS=${CGO_LDFLAGS} go test -timeout 1500s $TESTDIRS
else
- GOMAXPROCS=4 go test -i -timeout 900s -race $TESTDIRS
+ GOMAXPROCS=4 CGO_CFLAGS=${CGO_CFLAGS} CGO_LDFLAGS=${CGO_LDFLAGS} go test -timeout 1500s -race $TESTDIRS
for d in $TESTDIRS; do
- GOMAXPROCS=4 go test -timeout 900s -race -coverprofile=profile.out -covermode=atomic $d
+ GOMAXPROCS=4 CGO_CFLAGS=${CGO_CFLAGS} CGO_LDFLAGS=${CGO_LDFLAGS} go test -timeout 1500s -race -coverprofile=profile.out -covermode=atomic $d
if [ -f profile.out ]; then
cat profile.out >> coverage.txt
rm profile.out
@@ -28,7 +38,7 @@ fi
for dir in $(find apps tools -maxdepth 1 -type d) ; do
if grep -q '^package main$' $dir/*.go 2>/dev/null; then
echo "building $dir"
- go build -o $dir/$(basename $dir) ./$dir
+ CGO_CFLAGS=${CGO_CFLAGS} CGO_LDFLAGS=${CGO_LDFLAGS} go build -o $dir/$(basename $dir) ./$dir
else
echo "(skipped $dir)"
fi
diff --git a/tools/bench/main.go b/tools/bench/main.go
index 8317c3c4..b28a8fac 100644
--- a/tools/bench/main.go
+++ b/tools/bench/main.go
@@ -11,6 +11,7 @@ import (
"sync/atomic"
"time"
+ "github.com/absolute8511/redigo/redis"
"github.com/siddontang/goredis"
)
@@ -20,7 +21,7 @@ var number = flag.Int("n", 1000, "request number")
var clients = flag.Int("c", 50, "number of clients")
var round = flag.Int("r", 1, "benchmark round number")
var valueSize = flag.Int("vsize", 100, "kv value size")
-var tests = flag.String("t", "set,get,randget,del,lpush,lrange,lpop,hset,hget,hdel,zadd,zrange,zrevrange,zdel", "only run the comma separated list of tests")
+var tests = flag.String("t", "set,incr,get,randget,del,lpush,lrange,lpop,hset,hget,hdel,zadd,zrange,zrevrange,zdel", "only run the comma separated list of tests")
var primaryKeyCnt = flag.Int("pkn", 100, "primary key count for hash,list,set,zset")
var namespace = flag.String("namespace", "default", "the prefix namespace")
var table = flag.String("table", "test", "the table to write")
@@ -30,8 +31,13 @@ var wg sync.WaitGroup
var client *goredis.Client
var loop int
+var latencyDistribute []int64
-func waitBench(c *goredis.PoolConn, cmd string, args ...interface{}) error {
+func init() {
+ latencyDistribute = make([]int64, 32)
+}
+
+func waitBench(c redis.Conn, cmd string, args ...interface{}) error {
v := args[0]
prefix := *namespace + ":" + *table + ":"
switch vt := v.(type) {
@@ -44,36 +50,54 @@ func waitBench(c *goredis.PoolConn, cmd string, args ...interface{}) error {
case int64:
args[0] = prefix + strconv.Itoa(int(vt))
}
+ s := time.Now()
_, err := c.Do(strings.ToUpper(cmd), args...)
if err != nil {
fmt.Printf("do %s error %s\n", cmd, err.Error())
return err
}
+ cost := time.Since(s).Nanoseconds()
+ index := cost / 1000 / 1000
+ if index < 100 {
+ index = index / 10
+ } else if index < 1000 {
+ index = 9 + index/100
+ } else if index < 10000 {
+ index = 19 + index/1000
+ } else {
+ index = 29
+ }
+ atomic.AddInt64(&latencyDistribute[index], 1)
return nil
}
-func bench(cmd string, f func(c *goredis.PoolConn, cindex int, loopIter int) error) {
+func bench(cmd string, f func(c redis.Conn, cindex int, loopIter int) error) {
wg.Add(*clients)
done := int32(0)
+ addr := fmt.Sprintf("%s:%d", *ip, *port)
currentNumList := make([]int64, *clients)
errCnt := int64(0)
t1 := time.Now()
for i := 0; i < *clients; i++ {
go func(clientIndex int) {
- var err error
- c, _ := client.Get()
+ defer wg.Done()
+ c, err := redis.Dial("tcp", addr, redis.DialConnectTimeout(time.Second*3),
+ redis.DialReadTimeout(time.Second),
+ redis.DialWriteTimeout(time.Second),
+ )
+ if err != nil {
+ fmt.Printf("failed to dial: %v\n", err.Error())
+ return
+ }
for j := 0; j < loop; j++ {
err = f(c, clientIndex, j)
if err != nil {
- if atomic.AddInt64(&errCnt, 1) > int64(*clients)*100 {
- break
- }
+ atomic.AddInt64(&errCnt, 1)
}
atomic.AddInt64(¤tNumList[clientIndex], 1)
}
c.Close()
- wg.Done()
}(i)
}
@@ -106,16 +130,27 @@ func bench(cmd string, f func(c *goredis.PoolConn, cindex int, loopIter int) err
wg.Wait()
atomic.StoreInt32(&done, 1)
-
t2 := time.Now()
-
d := t2.Sub(t1)
- fmt.Printf("%s: %s %0.3f micros/op, %0.2fop/s\n",
+ fmt.Printf("%s: %s %0.3f micros/op, %0.2fop/s, err: %v, num:%v\n",
cmd,
d.String(),
float64(d.Nanoseconds()/1e3)/float64(*number),
- float64(*number)/d.Seconds())
+ float64(*number)/d.Seconds(),
+ atomic.LoadInt64(&errCnt),
+ *number,
+ )
+ for i, v := range latencyDistribute {
+ if i == 0 {
+ fmt.Printf("latency below 100ms\n")
+ } else if i == 10 {
+ fmt.Printf("latency between 100ms ~ 999ms\n")
+ } else if i == 20 {
+ fmt.Printf("latency above 1s\n")
+ }
+ fmt.Printf("latency interval %d: %v\n", i, v)
+ }
}
var kvSetBase int64
@@ -136,10 +171,10 @@ func benchSet() {
magicIdentify[i] = byte(i % 3)
}
}
- f := func(c *goredis.PoolConn, cindex int, loopi int) error {
+ f := func(c redis.Conn, cindex int, loopi int) error {
value := make([]byte, *valueSize)
copy(value, valueSample)
- n := atomic.AddInt64(&kvSetBase, 1)
+ n := atomic.AddInt64(&kvSetBase, 1) % int64(*primaryKeyCnt)
tmp := fmt.Sprintf("%010d", int(n))
ts := time.Now().String()
index := 0
@@ -151,6 +186,7 @@ func benchSet() {
}
if index < *valueSize {
copy(value[index:], tmp)
+ index += len(tmp)
}
if *valueSize > len(magicIdentify) {
copy(value[len(value)-len(magicIdentify):], magicIdentify)
@@ -176,10 +212,10 @@ func benchSetEx() {
magicIdentify[i] = byte(i % 3)
}
}
- f := func(c *goredis.PoolConn, cindex int, loopi int) error {
+ f := func(c redis.Conn, cindex int, loopi int) error {
value := make([]byte, *valueSize)
copy(value, valueSample)
- n := atomic.AddInt64(&kvSetBase, 1)
+ n := atomic.AddInt64(&kvSetBase, 1) % int64(*primaryKeyCnt)
ttl := rand.Int31n(int32(*maxExpireSecs-*minExpireSecs)) + int32(*minExpireSecs)
tmp := fmt.Sprintf("%010d-%d-%s", int(n), ttl, time.Now().String())
ts := time.Now().String()
@@ -192,6 +228,7 @@ func benchSetEx() {
}
if index < *valueSize {
copy(value[index:], tmp)
+ index += len(tmp)
}
if *valueSize > len(magicIdentify) {
copy(value[len(value)-len(magicIdentify):], magicIdentify)
@@ -202,28 +239,41 @@ func benchSetEx() {
bench("setex", f)
}
+func benchIncr() {
+ f := func(c redis.Conn, cindex int, loopi int) error {
+ n := atomic.AddInt64(&kvSetBase, 1) % int64(*primaryKeyCnt)
+ tmp := fmt.Sprintf("%010d", int(n))
+ return waitBench(c, "incr", tmp)
+ }
+
+ bench("incr", f)
+}
+
func benchGet() {
- f := func(c *goredis.PoolConn, cindex int, loopi int) error {
- n := atomic.AddInt64(&kvGetBase, 1)
- return waitBench(c, "GET", n)
+ f := func(c redis.Conn, cindex int, loopi int) error {
+ n := atomic.AddInt64(&kvGetBase, 1) % int64(*primaryKeyCnt)
+ k := fmt.Sprintf("%010d", int(n))
+ return waitBench(c, "GET", k)
}
bench("get", f)
}
func benchRandGet() {
- f := func(c *goredis.PoolConn, cindex int, loopi int) error {
- n := rand.Int() % *number
- return waitBench(c, "GET", n)
+ f := func(c redis.Conn, cindex int, loopi int) error {
+ n := rand.Int() % int(*primaryKeyCnt)
+ k := fmt.Sprintf("%010d", int(n))
+ return waitBench(c, "GET", k)
}
bench("randget", f)
}
func benchDel() {
- f := func(c *goredis.PoolConn, cindex int, loopi int) error {
- n := atomic.AddInt64(&kvDelBase, 1)
- return waitBench(c, "DEL", n)
+ f := func(c redis.Conn, cindex int, loopi int) error {
+ n := atomic.AddInt64(&kvDelBase, 1) % int64(*primaryKeyCnt)
+ k := fmt.Sprintf("%010d", int(n))
+ return waitBench(c, "DEL", k)
}
bench("del", f)
@@ -235,62 +285,103 @@ var listRange50Base int64
var listRange100Base int64
var listPopBase int64
-func benchPushList() {
+func benchLPushList() {
+ benchPushList("lpush")
+}
+func benchRPushList() {
+ benchPushList("rpush")
+}
+
+func benchPushList(pushCmd string) {
valueSample := make([]byte, *valueSize)
for i := 0; i < len(valueSample); i++ {
valueSample[i] = byte(i % 255)
}
-
- f := func(c *goredis.PoolConn, cindex int, loopi int) error {
+ magicIdentify := make([]byte, 9+3+3)
+ for i := 0; i < len(magicIdentify); i++ {
+ if i < 3 || i > len(magicIdentify)-3 {
+ magicIdentify[i] = 0
+ } else {
+ magicIdentify[i] = byte(i % 3)
+ }
+ }
+ f := func(c redis.Conn, cindex int, loopi int) error {
value := make([]byte, *valueSize)
copy(value, valueSample)
n := atomic.AddInt64(&listPushBase, 1) % int64(*primaryKeyCnt)
+ tmp := fmt.Sprintf("%010d", int(n))
ts := time.Now().String()
- copy(value[0:], ts)
- copy(value[len(ts):], strconv.Itoa(int(n)))
- return waitBench(c, "RPUSH", "mytestlist"+strconv.Itoa(int(n)), value)
+ index := 0
+ copy(value[index:], magicIdentify)
+ index += len(magicIdentify)
+ if index < *valueSize {
+ copy(value[index:], ts)
+ index += len(ts)
+ }
+ if index < *valueSize {
+ copy(value[index:], tmp)
+ index += len(tmp)
+ }
+ if *valueSize > len(magicIdentify) {
+ copy(value[len(value)-len(magicIdentify):], magicIdentify)
+ }
+ return waitBench(c, pushCmd, "mytestlist"+tmp, value)
}
- bench("rpush", f)
+ bench(pushCmd, f)
}
func benchRangeList10() {
- f := func(c *goredis.PoolConn, cindex int, loopi int) error {
+ f := func(c redis.Conn, cindex int, loopi int) error {
n := atomic.AddInt64(&listRange10Base, 1) % int64(*primaryKeyCnt)
- return waitBench(c, "LRANGE", "mytestlist"+strconv.Itoa(int(n)), 0, 10)
+ tmp := fmt.Sprintf("%010d", int(n))
+ return waitBench(c, "LRANGE", "mytestlist"+tmp, 0, 10)
}
bench("lrange10", f)
}
func benchRangeList50() {
- f := func(c *goredis.PoolConn, cindex int, loopi int) error {
+ f := func(c redis.Conn, cindex int, loopi int) error {
n := atomic.AddInt64(&listRange50Base, 1) % int64(*primaryKeyCnt)
if n%10 != 0 {
return nil
}
- return waitBench(c, "LRANGE", "mytestlist"+strconv.Itoa(int(n)), 0, 50)
+ tmp := fmt.Sprintf("%010d", int(n))
+ return waitBench(c, "LRANGE", "mytestlist"+tmp, 0, 50)
}
bench("lrange50", f)
}
func benchRangeList100() {
- f := func(c *goredis.PoolConn, cindex int, loopi int) error {
+ f := func(c redis.Conn, cindex int, loopi int) error {
n := atomic.AddInt64(&listRange100Base, 1) % int64(*primaryKeyCnt)
if n%10 != 0 {
return nil
}
- return waitBench(c, "LRANGE", "mytestlist"+strconv.Itoa(int(n)), 0, 100)
+ tmp := fmt.Sprintf("%010d", int(n))
+ return waitBench(c, "LRANGE", "mytestlist"+tmp, 0, 100)
}
bench("lrange100", f)
}
-func benchPopList() {
- f := func(c *goredis.PoolConn, cindex int, loopi int) error {
+func benchRPopList() {
+ f := func(c redis.Conn, cindex int, loopi int) error {
+ n := atomic.AddInt64(&listPopBase, 1) % int64(*primaryKeyCnt)
+ tmp := fmt.Sprintf("%010d", int(n))
+ return waitBench(c, "RPOP", "mytestlist"+tmp)
+ }
+
+ bench("rpop", f)
+}
+
+func benchLPopList() {
+ f := func(c redis.Conn, cindex int, loopi int) error {
n := atomic.AddInt64(&listPopBase, 1) % int64(*primaryKeyCnt)
- return waitBench(c, "LPOP", "mytestlist"+strconv.Itoa(int(n)))
+ tmp := fmt.Sprintf("%010d", int(n))
+ return waitBench(c, "LPOP", "mytestlist"+tmp)
}
bench("lpop", f)
@@ -307,33 +398,55 @@ func benchHset() {
for i := 0; i < len(valueSample); i++ {
valueSample[i] = byte(i % 255)
}
-
+ magicIdentify := make([]byte, 9+3+3)
+ for i := 0; i < len(magicIdentify); i++ {
+ if i < 3 || i > len(magicIdentify)-3 {
+ magicIdentify[i] = 0
+ } else {
+ magicIdentify[i] = byte(i % 3)
+ }
+ }
atomic.StoreInt64(&hashPKBase, 0)
subKeyCnt := int64(*number / (*primaryKeyCnt))
- f := func(c *goredis.PoolConn, cindex int, loopi int) error {
+ f := func(c redis.Conn, cindex int, loopi int) error {
value := make([]byte, *valueSize)
copy(value, valueSample)
n := atomic.AddInt64(&hashSetBase, 1)
pk := n / subKeyCnt
+ tmp := fmt.Sprintf("%010d", int(pk))
subkey := n - pk*subKeyCnt
ts := time.Now().String()
- copy(value[0:], ts)
- copy(value[len(ts):], strconv.Itoa(int(n)))
- return waitBench(c, "HSET", "myhashkey"+strconv.Itoa(int(pk)), subkey, value)
+
+ index := 0
+ copy(value[index:], magicIdentify)
+ index += len(magicIdentify)
+ if index < *valueSize {
+ copy(value[index:], ts)
+ index += len(ts)
+ }
+ if index < *valueSize {
+ copy(value[index:], tmp)
+ index += len(tmp)
+ }
+ if *valueSize > len(magicIdentify) {
+ copy(value[len(value)-len(magicIdentify):], magicIdentify)
+ }
+ return waitBench(c, "HMSET", "myhashkey"+tmp, subkey, value, "intv", subkey, "strv", tmp)
}
- bench("hset", f)
+ bench("hmset", f)
}
func benchHGet() {
atomic.StoreInt64(&hashPKBase, 0)
subKeyCnt := int64(*number / (*primaryKeyCnt))
- f := func(c *goredis.PoolConn, cindex int, loopi int) error {
+ f := func(c redis.Conn, cindex int, loopi int) error {
n := atomic.AddInt64(&hashGetBase, 1)
pk := n / subKeyCnt
+ tmp := fmt.Sprintf("%010d", int(pk))
subkey := n - pk*subKeyCnt
- return waitBench(c, "HGET", "myhashkey"+strconv.Itoa(int(pk)), subkey)
+ return waitBench(c, "HGET", "myhashkey"+tmp, subkey)
}
bench("hget", f)
@@ -342,11 +455,12 @@ func benchHGet() {
func benchHRandGet() {
atomic.StoreInt64(&hashPKBase, 0)
subKeyCnt := int64(*number / (*primaryKeyCnt))
- f := func(c *goredis.PoolConn, cindex int, loopi int) error {
+ f := func(c redis.Conn, cindex int, loopi int) error {
n := int64(rand.Int() % *number)
pk := n / subKeyCnt
+ tmp := fmt.Sprintf("%010d", int(pk))
subkey := n - pk*subKeyCnt
- return waitBench(c, "HGET", "myhashkey"+strconv.Itoa(int(pk)), subkey)
+ return waitBench(c, "HGET", "myhashkey"+tmp, subkey)
}
bench("hrandget", f)
@@ -355,11 +469,12 @@ func benchHRandGet() {
func benchHDel() {
atomic.StoreInt64(&hashPKBase, 0)
subKeyCnt := int64(*number / (*primaryKeyCnt))
- f := func(c *goredis.PoolConn, cindex int, loopi int) error {
+ f := func(c redis.Conn, cindex int, loopi int) error {
n := atomic.AddInt64(&hashDelBase, 1)
pk := n / subKeyCnt
+ tmp := fmt.Sprintf("%010d", int(pk))
subkey := n - pk*subKeyCnt
- return waitBench(c, "HDEL", "myhashkey"+strconv.Itoa(int(pk)), subkey)
+ return waitBench(c, "HDEL", "myhashkey"+tmp, subkey)
}
bench("hdel", f)
@@ -372,14 +487,16 @@ var zsetDelBase int64
func benchZAdd() {
atomic.StoreInt64(&zsetPKBase, 0)
subKeyCnt := int64(*number / (*primaryKeyCnt))
- f := func(c *goredis.PoolConn, cindex int, loopi int) error {
+ f := func(c redis.Conn, cindex int, loopi int) error {
n := atomic.AddInt64(&zsetAddBase, 1)
pk := n / subKeyCnt
+ tmp := fmt.Sprintf("%010d", int(pk))
subkey := n - pk*subKeyCnt
member := strconv.Itoa(int(subkey))
+ member += tmp
ts := time.Now().String()
member = member + ts
- return waitBench(c, "ZADD", "myzsetkey"+strconv.Itoa(int(pk)), subkey, member)
+ return waitBench(c, "ZADD", "myzsetkey"+tmp, subkey, member)
}
bench("zadd", f)
@@ -388,11 +505,12 @@ func benchZAdd() {
func benchZDel() {
atomic.StoreInt64(&zsetPKBase, 0)
subKeyCnt := int64(*number / (*primaryKeyCnt))
- f := func(c *goredis.PoolConn, cindex int, loopi int) error {
+ f := func(c redis.Conn, cindex int, loopi int) error {
n := atomic.AddInt64(&zsetDelBase, 1)
pk := n / subKeyCnt
+ tmp := fmt.Sprintf("%010d", int(pk))
subkey := n - pk*subKeyCnt
- return waitBench(c, "ZREM", "myzsetkey"+strconv.Itoa(int(pk)), subkey)
+ return waitBench(c, "ZREM", "myzsetkey"+tmp, subkey)
}
bench("zrem", f)
@@ -401,13 +519,14 @@ func benchZDel() {
func benchZRangeByScore() {
atomic.StoreInt64(&zsetPKBase, 0)
subKeyCnt := int64(*number / (*primaryKeyCnt))
- f := func(c *goredis.PoolConn, cindex int, loopi int) error {
+ f := func(c redis.Conn, cindex int, loopi int) error {
n := atomic.AddInt64(&zsetPKBase, 1)
pk := n / subKeyCnt
if n%5 != 0 {
return nil
}
- return waitBench(c, "ZRANGEBYSCORE", "myzsetkey"+strconv.Itoa(int(pk)), 0, rand.Int(), "limit", rand.Int()%100, 100)
+ tmp := fmt.Sprintf("%010d", int(pk))
+ return waitBench(c, "ZRANGEBYSCORE", "myzsetkey"+tmp, 0, rand.Int(), "limit", rand.Int()%100, 100)
}
bench("zrangebyscore", f)
@@ -416,13 +535,14 @@ func benchZRangeByScore() {
func benchZRangeByRank() {
atomic.StoreInt64(&zsetPKBase, 0)
subKeyCnt := int64(*number / (*primaryKeyCnt))
- f := func(c *goredis.PoolConn, cindex int, loopi int) error {
+ f := func(c redis.Conn, cindex int, loopi int) error {
n := atomic.AddInt64(&zsetPKBase, 1)
pk := n / subKeyCnt
if n%5 != 0 {
return nil
}
- return waitBench(c, "ZRANGE", "myzsetkey"+strconv.Itoa(int(pk)), 0, rand.Int()%100)
+ tmp := fmt.Sprintf("%010d", int(pk))
+ return waitBench(c, "ZRANGE", "myzsetkey"+tmp, 0, rand.Int()%100)
}
bench("zrange", f)
@@ -431,13 +551,14 @@ func benchZRangeByRank() {
func benchZRevRangeByScore() {
atomic.StoreInt64(&zsetPKBase, 0)
subKeyCnt := int64(*number / (*primaryKeyCnt))
- f := func(c *goredis.PoolConn, cindex int, loopi int) error {
+ f := func(c redis.Conn, cindex int, loopi int) error {
n := atomic.AddInt64(&zsetPKBase, 1)
pk := n / subKeyCnt
if n%5 != 0 {
return nil
}
- return waitBench(c, "ZREVRANGEBYSCORE", "myzsetkey"+strconv.Itoa(int(pk)), 0, rand.Int(), "limit", rand.Int()%100, 100)
+ tmp := fmt.Sprintf("%010d", int(pk))
+ return waitBench(c, "ZREVRANGEBYSCORE", "myzsetkey"+tmp, 0, rand.Int(), "limit", rand.Int()%100, 100)
}
bench("zrevrangebyscore", f)
@@ -446,13 +567,14 @@ func benchZRevRangeByScore() {
func benchZRevRangeByRank() {
atomic.StoreInt64(&zsetPKBase, 0)
subKeyCnt := int64(*number / (*primaryKeyCnt))
- f := func(c *goredis.PoolConn, cindex int, loopi int) error {
+ f := func(c redis.Conn, cindex int, loopi int) error {
n := atomic.AddInt64(&zsetPKBase, 1)
pk := n / subKeyCnt
if n%5 != 0 {
return nil
}
- return waitBench(c, "ZREVRANGE", "myzsetkey"+strconv.Itoa(int(pk)), 0, rand.Int()%100)
+ tmp := fmt.Sprintf("%010d", int(pk))
+ return waitBench(c, "ZREVRANGE", "myzsetkey"+tmp, 0, rand.Int()%100)
}
bench("zrevrange", f)
@@ -472,19 +594,6 @@ func main() {
}
loop = *number / *clients
-
- addr := fmt.Sprintf("%s:%d", *ip, *port)
-
- client = goredis.NewClient(addr, "")
- client.SetReadBufferSize(10240)
- client.SetWriteBufferSize(10240)
- client.SetMaxIdleConns(16)
-
- for i := 0; i < *clients; i++ {
- c, _ := client.Get()
- c.Close()
- }
-
if *round <= 0 {
*round = 1
}
@@ -498,6 +607,8 @@ func main() {
benchSet()
case "setex":
benchSetEx()
+ case "incr":
+ benchIncr()
case "get":
benchGet()
case "randget":
@@ -505,13 +616,17 @@ func main() {
case "del":
benchDel()
case "lpush":
- benchPushList()
+ benchLPushList()
+ case "rpush":
+ benchRPushList()
case "lrange":
benchRangeList10()
benchRangeList50()
benchRangeList100()
case "lpop":
- benchPopList()
+ benchLPopList()
+ case "rpop":
+ benchRPopList()
case "hset":
benchHset()
case "hget":
diff --git a/tools/clustersync/main.go b/tools/clustersync/main.go
new file mode 100644
index 00000000..70e401e8
--- /dev/null
+++ b/tools/clustersync/main.go
@@ -0,0 +1,193 @@
+package main
+
+import (
+ "encoding/json"
+ "errors"
+ "flag"
+ "fmt"
+ "log"
+ "net/http"
+ "time"
+
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/metric"
+)
+
+var ip = flag.String("ip", "127.0.0.1", "server ip")
+var port = flag.Int("port", 6380, "server port")
+var task = flag.String("t", "", "check task type supported: check-sync-normal|check-sync-init")
+var ignoreNamespace = flag.String("ignore_namespace", "", "the namespace can be ignored")
+var stopWriteTs = flag.Int64("stopts", 0, "stop timestamps for check write syncer, the timestamps should be checked while actually stop")
+var minRaftGroups = flag.Int64("min_groups", 0, "the number desired for all raft groups, it should be added for all namespace partitions")
+
+type RaftProgress struct {
+ Match uint64 `json:"match"`
+ Next uint64 `json:"next"`
+ State string `json:"state"`
+}
+
+// raft status in raft can not marshal/unmarshal correctly, we redefine it
+type CustomRaftStatus struct {
+ ID uint64 `json:"id,omitempty"`
+ Term uint64 `json:"term,omitempty"`
+ Vote uint64 `json:"vote"`
+ Commit uint64 `json:"commit"`
+ Lead uint64 `json:"lead"`
+ RaftState string `json:"raft_state"`
+ Applied uint64 `json:"applied"`
+ Progress map[uint64]RaftProgress `json:"progress,omitempty"`
+ LeadTransferee uint64 `json:"lead_transferee"`
+}
+
+type LogSyncStats struct {
+ SyncRecvLatency *metric.WriteStats `json:"sync_net_latency"`
+ SyncAllLatency *metric.WriteStats `json:"sync_all_latency"`
+ LogReceived []metric.LogSyncStats `json:"log_received,omitempty"`
+ LogSynced []metric.LogSyncStats `json:"log_synced,omitempty"`
+ LeaderRaftStats map[string]CustomRaftStatus `json:"leader_raft_stats,omitempty"`
+}
+
+func JsonString(v interface{}) string {
+ d, _ := json.MarshalIndent(v, "", " ")
+ return string(d)
+}
+
+func checkLogSync(ss LogSyncStats) (int64, bool) {
+ // note: it may happen one namespace is still waiting init, so
+ // this uninit namespace sync stats may be ignored in any stats.
+ // we should wait until all the namespace inited.
+ checkOK := true
+ maxTs := int64(0)
+ if *ignoreNamespace != "" {
+ for nspid, _ := range ss.LeaderRaftStats {
+ ns, _ := common.GetNamespaceAndPartition(nspid)
+ if ns == *ignoreNamespace {
+ delete(ss.LeaderRaftStats, nspid)
+ }
+ }
+ }
+ if len(ss.LogSynced) != len(ss.LeaderRaftStats) {
+ log.Printf("namespace partitions not match: %v, %v\n", len(ss.LeaderRaftStats), len(ss.LogSynced))
+ checkOK = false
+ }
+ if len(ss.LogSynced) < int(*minRaftGroups) {
+ log.Printf("namespace partitions too less: %v, %v\n", len(ss.LogSynced), *minRaftGroups)
+ checkOK = false
+ }
+ for _, logsynced := range ss.LogSynced {
+ leaderRS, ok := ss.LeaderRaftStats[logsynced.Name]
+ if !ok {
+ log.Printf("namespace missing in syncer leader: %v\n", JsonString(logsynced))
+ checkOK = false
+ continue
+ }
+ delete(ss.LeaderRaftStats, logsynced.Name)
+ if leaderRS.Term != logsynced.Term || leaderRS.Commit != logsynced.Index {
+ log.Printf("namespace %v not synced with leader: %v, %v\n", logsynced.Name, JsonString(logsynced), JsonString(leaderRS))
+ checkOK = false
+ continue
+ }
+ if logsynced.Timestamp > maxTs {
+ maxTs = logsynced.Timestamp
+ }
+ log.Printf("namespace %v synced(%v-%v-%v)\n", logsynced.Name, logsynced.Term, logsynced.Index, logsynced.Timestamp)
+ }
+ if *stopWriteTs > 0 && maxTs <= *stopWriteTs {
+ log.Printf("get log sync stats timestamps old %v, %v\n", maxTs, *stopWriteTs)
+ checkOK = false
+ }
+ if len(ss.LeaderRaftStats) > 0 {
+ log.Printf("namespace partitions not match: %v\n", ss.LeaderRaftStats)
+ checkOK = false
+ }
+ return maxTs, checkOK
+}
+
+func getLogSyncStats() (LogSyncStats, error) {
+ url := fmt.Sprintf("http://%s:%v/logsync/stats", *ip, *port)
+ var ss LogSyncStats
+ code, err := common.APIRequest("GET", url, nil, time.Second, &ss)
+ if err != nil {
+ log.Printf("get log sync stats err %v\n", err)
+ return ss, err
+ }
+ if code != http.StatusOK {
+ log.Printf("get log sync stats err code %v\n", code)
+ return ss, errors.New("error code for request")
+ }
+ return ss, nil
+}
+
+func main() {
+ flag.Parse()
+
+ ss, err := getLogSyncStats()
+ if err != nil {
+ return
+ }
+
+ checkOK := true
+ switch *task {
+ case "check-sync-normal":
+ syncTs, ok := checkLogSync(ss)
+ if !ok {
+ checkOK = false
+ break
+ }
+ // try get log syncer again to check if any timestamps changed.
+ time.Sleep(time.Second / 2)
+ ss, err := getLogSyncStats()
+ if err != nil {
+ log.Printf("get log sync stats err %v\n", err)
+ checkOK = false
+ return
+ }
+ syncTs2, ok := checkLogSync(ss)
+ if !ok {
+ checkOK = false
+ break
+ }
+
+ if syncTs2 != syncTs {
+ log.Printf("log sync stats timestamps changed %v, %v\n", syncTs, syncTs2)
+ checkOK = false
+ break
+ }
+ log.Printf("log sync max timestamps %v\n", syncTs)
+ case "check-sync-init":
+ syncTs, ok := checkLogSync(ss)
+ if !ok {
+ checkOK = false
+ break
+ }
+
+ // try get log syncer again to check if any timestamps changed.
+ time.Sleep(time.Second / 2)
+ ss, err := getLogSyncStats()
+ if err != nil {
+ log.Printf("get log sync stats err %v\n", err)
+ checkOK = false
+ return
+ }
+
+ syncTs2, ok := checkLogSync(ss)
+ if !ok {
+ checkOK = false
+ break
+ }
+
+ if syncTs2 != syncTs {
+ log.Printf("log sync stats timestamps changed %v, %v\n", syncTs, syncTs2)
+ checkOK = false
+ break
+ }
+ log.Printf("log sync max timestamps %v\n", syncTs)
+ default:
+ log.Printf("unknown task %v\n", *task)
+ return
+ }
+ if !checkOK {
+ return
+ }
+ log.Printf("all check are ok, cluster synced!!\n")
+}
diff --git a/tools/data_tool/main.go b/tools/data_tool/main.go
new file mode 100644
index 00000000..4702eeef
--- /dev/null
+++ b/tools/data_tool/main.go
@@ -0,0 +1,219 @@
+package main
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "flag"
+ "fmt"
+ "log"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/absolute8511/redigo/redis"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/rockredis"
+)
+
+var ipProxy = flag.String("ip_proxy", "127.0.0.1", "server ip")
+var portProxy = flag.Int("port_proxy", 3803, "server port")
+var toolType = flag.String("type", "", "data tool type: gen_delrange/compactdb/scan_localttl")
+var ns = flag.String("ns", "", "namespace for data")
+var table = flag.String("table", "", "table for data")
+var key = flag.String("key", "", "key for action")
+var cleanMatched = flag.Bool("clean_matched", false, "do del to proxy if matched")
+var maxScan = flag.Int("max_scan", 10000, "max scan db keys")
+var sleepBetween = flag.Duration("sleep_between", time.Millisecond, "")
+var startFrom = flag.String("start_from", "", "key range for data")
+var endTo = flag.String("end_to", "", "key range for data")
+var dbFile = flag.String("dbfile", "", "file path for rocksdb parent, the final will be dbFile/rocksdb")
+
+type DeleteTableRange struct {
+ Table string `json:"table,omitempty"`
+ StartFrom []byte `json:"start_from,omitempty"`
+ EndTo []byte `json:"end_to,omitempty"`
+ // to avoid drop all table data, this is needed to delete all data in table
+ DeleteAll bool `json:"delete_all,omitempty"`
+ Dryrun bool `json:"dryrun,omitempty"`
+}
+
+func getRangeStr(dr DeleteTableRange) string {
+ b, _ := json.Marshal(dr)
+ return string(b)
+}
+
+func main() {
+ flag.Parse()
+ defer common.FlushZapDefault()
+
+ switch *toolType {
+ case "gen_delrange":
+ log.Printf("test gen del range")
+ dr := DeleteTableRange{
+ StartFrom: []byte(*startFrom),
+ EndTo: []byte(*endTo),
+ }
+ log.Printf("%s", getRangeStr(dr))
+ case "compact_db":
+ compactDB()
+ case "scan_localttl":
+ checkLocalTTL()
+ default:
+ log.Printf("unknown action: %v", *toolType)
+ }
+}
+
+var (
+ errExpTimeKey = errors.New("invalid expire time key")
+)
+
+const (
+ logTimeFormatStr = "2006-01-02 15:04:05"
+)
+
+/*
+the coded format of expire time key:
+bytes: -0-|-1-2-3-4-5-6-7-8-|----9---|-10-11--------x-|
+data : 103| when |dataType| key |
+*/
+func expEncodeTimeKey(dataType byte, key []byte, when int64) []byte {
+ buf := make([]byte, len(key)+1+8+1)
+
+ pos := 0
+ buf[pos] = rockredis.ExpTimeType
+ pos++
+
+ binary.BigEndian.PutUint64(buf[pos:], uint64(when))
+ pos += 8
+
+ buf[pos] = dataType
+ pos++
+
+ copy(buf[pos:], key)
+
+ return buf
+}
+
+//decode the expire 'time key', the return values are: dataType, key, whenToExpire, error
+func expDecodeTimeKey(tk []byte) (byte, []byte, int64, error) {
+ pos := 0
+ if pos+10 > len(tk) || tk[pos] != rockredis.ExpTimeType {
+ return 0, nil, 0, errExpTimeKey
+ }
+
+ return tk[pos+9], tk[pos+10:], int64(binary.BigEndian.Uint64(tk[pos+1:])), nil
+}
+
+func checkLocalTTL() {
+ log.Printf("begin check ttl")
+ now := time.Now().Unix() - 3600
+
+ cfg := rockredis.NewRockRedisDBConfig()
+ cfg.DataDir = *dbFile
+ cfg.ReadOnly = true
+ cfg.DataTool = true
+ db, err := rockredis.OpenRockDB(cfg)
+ if err != nil {
+ log.Printf("open db failed: %s", err.Error())
+ return
+ }
+ defer db.Close()
+ minKey := expEncodeTimeKey(rockredis.NoneType, nil, 0)
+ maxKey := expEncodeTimeKey(100, nil, now)
+
+ var eCount int64
+ var scanned int64
+ checkStart := time.Now()
+
+ it, err := db.NewDBRangeLimitIterator(minKey, maxKey,
+ common.RangeROpen, 0, -1, false)
+ if err != nil {
+ log.Printf("open db iterator failed: %s", err.Error())
+ return
+ }
+ defer it.Close()
+
+ clientConn, err := redis.Dial("tcp", net.JoinHostPort(*ipProxy, strconv.Itoa(*portProxy)), redis.DialConnectTimeout(time.Second))
+ if *cleanMatched {
+ if err != nil {
+ log.Printf("connect redis failed: %s", err.Error())
+ return
+ }
+ }
+
+ missed := 0
+ cleaned := 0
+ for ; it.Valid(); it.Next() {
+ if scanned > int64(*maxScan) {
+ break
+ }
+ tk := it.Key()
+ if tk == nil {
+ continue
+ }
+
+ dt, k, nt, dErr := expDecodeTimeKey(tk)
+ if dErr != nil {
+ continue
+ }
+
+ scanned += 1
+ if nt > now {
+ //the next ttl check time is nt!
+ log.Printf("ttl check end at key:[%s] of type:%s whose expire time is: %s", string(k),
+ rockredis.TypeName[dt], time.Unix(nt, 0).Format(logTimeFormatStr))
+ break
+ }
+
+ eCount += 1
+
+ if *key != "" {
+ if strings.HasPrefix(string(k), *key) {
+ log.Printf("found key %s(type: %v), expire time: %s\n", string(k), rockredis.TypeName[dt],
+ time.Unix(nt, 0).Format(logTimeFormatStr))
+ if *cleanMatched && dt == rockredis.KVType {
+ delKey := fmt.Sprintf("%s:%s", *ns, string(k))
+ n, err := redis.Int(clientConn.Do("exists", delKey))
+ if err != nil {
+ log.Printf("del %s failed: %s", delKey, err.Error())
+ } else if n == 1 {
+ n, err := redis.Int(clientConn.Do("del", delKey))
+ if err != nil {
+ log.Printf("del %s failed: %s", delKey, err.Error())
+ } else if n == 0 {
+ log.Printf("del %s not exist: %v", delKey, n)
+ missed++
+ } else if n == 1 {
+ cleaned++
+ }
+ if *sleepBetween > 0 {
+ time.Sleep(*sleepBetween)
+ }
+ } else if n == 0 {
+ missed++
+ }
+ }
+ }
+ continue
+ }
+ log.Printf("scanned ttl: key %s(type: %v), expire time: %s\n", string(k), rockredis.TypeName[dt],
+ time.Unix(nt, 0).Format(logTimeFormatStr))
+ }
+
+ checkCost := time.Since(checkStart)
+ log.Printf("[%d/%d] keys have expired during ttl checking, cost:%s, clean %v, %v", eCount, scanned, checkCost, cleaned, missed)
+}
+
+func compactDB() {
+ cfg := rockredis.NewRockRedisDBConfig()
+ cfg.DataDir = *dbFile
+ db, err := rockredis.OpenRockDB(cfg)
+ if err != nil {
+ log.Printf("open db failed: %s", err.Error())
+ return
+ }
+ defer db.Close()
+ db.CompactAllRange()
+}
diff --git a/tools/data_tool/main_test.go b/tools/data_tool/main_test.go
new file mode 100644
index 00000000..4c14ff8e
--- /dev/null
+++ b/tools/data_tool/main_test.go
@@ -0,0 +1,20 @@
+package main
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestRangeMarshal(t *testing.T) {
+ dt := DeleteTableRange{
+ StartFrom: []byte("teststart"),
+ EndTo: []byte("testend"),
+ }
+ t.Logf("%s", getRangeStr(dt))
+ out := DeleteTableRange{}
+ json.Unmarshal([]byte(getRangeStr(dt)), &out)
+ assert.Equal(t, dt, out)
+ assert.Equal(t, []byte("teststart"), out.StartFrom)
+}
diff --git a/transport/rafthttp/coder.go b/transport/rafthttp/coder.go
index 7076b2ad..182cbb9e 100644
--- a/transport/rafthttp/coder.go
+++ b/transport/rafthttp/coder.go
@@ -14,7 +14,7 @@
package rafthttp
-import "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+import "github.com/youzan/ZanRedisDB/raft/raftpb"
type encoder interface {
// encode encodes the given message to an output stream.
diff --git a/transport/rafthttp/functional_test.go b/transport/rafthttp/functional_test.go
index d7c5df9c..4289c847 100644
--- a/transport/rafthttp/functional_test.go
+++ b/transport/rafthttp/functional_test.go
@@ -20,10 +20,10 @@ import (
"testing"
"time"
- "github.com/absolute8511/ZanRedisDB/pkg/types"
- "github.com/absolute8511/ZanRedisDB/raft"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
- "github.com/absolute8511/ZanRedisDB/stats"
+ "github.com/youzan/ZanRedisDB/pkg/types"
+ "github.com/youzan/ZanRedisDB/raft"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/stats"
"golang.org/x/net/context"
)
diff --git a/transport/rafthttp/http.go b/transport/rafthttp/http.go
index 4d150b50..594344a0 100644
--- a/transport/rafthttp/http.go
+++ b/transport/rafthttp/http.go
@@ -22,11 +22,13 @@ import (
"net/http"
"path"
"strings"
+ "sync"
+ "time"
- pioutil "github.com/absolute8511/ZanRedisDB/pkg/ioutil"
- "github.com/absolute8511/ZanRedisDB/pkg/types"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
"github.com/coreos/etcd/version"
+ pioutil "github.com/youzan/ZanRedisDB/pkg/ioutil"
+ "github.com/youzan/ZanRedisDB/pkg/types"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
"golang.org/x/net/context"
)
@@ -41,10 +43,11 @@ const (
)
var (
- RaftPrefix = "/raft"
- ProbingPrefix = path.Join(RaftPrefix, "probing")
- RaftStreamPrefix = path.Join(RaftPrefix, "stream")
- RaftSnapshotPrefix = path.Join(RaftPrefix, "snapshot")
+ RaftPrefix = "/raft"
+ ProbingPrefix = path.Join(RaftPrefix, "probing")
+ RaftStreamPrefix = path.Join(RaftPrefix, "stream")
+ RaftSnapshotPrefix = path.Join(RaftPrefix, "snapshot")
+ RaftSnapshotCheckPrefix = path.Join(RaftPrefix, "snapshot_check")
errIncompatibleVersion = errors.New("incompatible version")
errClusterIDMismatch = errors.New("cluster ID mismatch")
@@ -141,6 +144,8 @@ type snapshotHandler struct {
r Raft
snapshotter ISnapSaver
cid string
+ m sync.Mutex
+ snapStatus map[uint64]raftpb.Message
}
func newSnapshotHandler(tr Transporter, r Raft, snapshotter ISnapSaver, cid string) http.Handler {
@@ -149,6 +154,7 @@ func newSnapshotHandler(tr Transporter, r Raft, snapshotter ISnapSaver, cid stri
r: r,
snapshotter: snapshotter,
cid: cid,
+ snapStatus: make(map[uint64]raftpb.Message),
}
}
@@ -177,6 +183,11 @@ func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
addRemoteFromRequest(h.tr, r)
+ if r.URL.Path == RaftSnapshotCheckPrefix {
+ h.handleSnapshotCheck(w, r)
+ return
+ }
+
dec := &messageDecoder{r: r.Body}
m, err := dec.decode()
if err != nil {
@@ -196,35 +207,91 @@ func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
plog.Infof("receiving database snapshot [index:%d, from %s] ...", m.Snapshot.Metadata.Index, types.ID(m.From))
+ if !h.insertSnapshotState(m) {
+ http.Error(w, "already running another snapshot", http.StatusBadRequest)
+ // fixme: old version may send multi snapshot, so we need ignore for low version
+ return
+ }
+
// save incoming database snapshot.
n, err := h.snapshotter.SaveDBFrom(r.Body, m)
if err != nil {
msg := fmt.Sprintf("failed to save KV snapshot (%v)", err)
plog.Error(msg)
http.Error(w, msg, http.StatusInternalServerError)
+ h.removeSnapshotState(m)
return
}
receivedBytes.WithLabelValues(m.FromGroup.String()).Add(float64(n))
- plog.Infof("received and saved database snapshot [index: %d, from: %s] successfully", m.Snapshot.Metadata.Index, types.ID(m.From))
-
- if err := h.r.Process(context.TODO(), m); err != nil {
- switch v := err.(type) {
- // Process may return writerToResponse error when doing some
- // additional checks before calling raft.Node.Step.
- case writerToResponse:
- v.WriteTo(w)
- default:
+ go func() {
+ defer h.removeSnapshotState(m)
+ if err := h.r.Process(context.TODO(), m); err != nil {
msg := fmt.Sprintf("failed to process raft message (%v)", err)
plog.Warningf(msg)
- http.Error(w, msg, http.StatusInternalServerError)
}
- return
- }
+ plog.Infof("received and saved database snapshot [index: %d, from: %s] successfully", m.Snapshot.Metadata.Index, types.ID(m.From))
+ }()
+ // in old version, the sender will report snapshot finish when we response ok.
+ // Here we wait snapshot transfer state changed, so we will not
+ // send msgapp resp to leader to avoid send another snapshot from sender
+ time.Sleep(time.Second)
+
// Write StatusNoContent header after the message has been processed by
// raft, which facilitates the client to report MsgSnap status.
w.WriteHeader(http.StatusNoContent)
}
+func (h *snapshotHandler) insertSnapshotState(m raftpb.Message) bool {
+ h.m.Lock()
+ defer h.m.Unlock()
+ _, ok := h.snapStatus[m.ToGroup.GroupId]
+ if ok {
+ return false
+ }
+ h.snapStatus[m.ToGroup.GroupId] = m
+ return true
+}
+
+func (h *snapshotHandler) removeSnapshotState(m raftpb.Message) {
+ h.m.Lock()
+ defer h.m.Unlock()
+ delete(h.snapStatus, m.ToGroup.GroupId)
+}
+
+func (h *snapshotHandler) checkSnapshotState(m raftpb.Message) bool {
+ h.m.Lock()
+ defer h.m.Unlock()
+ _, ok := h.snapStatus[m.ToGroup.GroupId]
+ return ok
+}
+
+func (h *snapshotHandler) handleSnapshotCheck(w http.ResponseWriter, r *http.Request) {
+ dec := &messageDecoder{r: r.Body}
+ m, err := dec.decode()
+ if err != nil {
+ msg := fmt.Sprintf("failed to decode raft message (%v)", err)
+ plog.Errorf(msg)
+ http.Error(w, msg, http.StatusBadRequest)
+ recvFailures.WithLabelValues(r.RemoteAddr).Inc()
+ return
+ }
+ receivedBytes.WithLabelValues(m.FromGroup.String()).Add(float64(m.Size()))
+
+ if m.Type != raftpb.MsgSnap {
+ plog.Errorf("unexpected raft message type %s on snapshot path", m.Type)
+ http.Error(w, "wrong raft message type", http.StatusBadRequest)
+ return
+ }
+ running := h.checkSnapshotState(m)
+ if running {
+ plog.Infof("snapshot check for [index: %d, from: %s-%v] still waiting transfer", m.Snapshot.Metadata.Index, types.ID(m.From), m.ToGroup)
+ http.Error(w, "snapshot transfer still running", http.StatusAlreadyReported)
+ return
+ }
+ w.WriteHeader(http.StatusNoContent)
+ plog.Infof("snapshot check for [index: %d, from: %s-%v] success", m.Snapshot.Metadata.Index, types.ID(m.From), m.ToGroup)
+}
+
type streamHandler struct {
tr *Transport
peerGetter peerGetter
diff --git a/transport/rafthttp/http_test.go b/transport/rafthttp/http_test.go
index be6e8227..ccfaeba8 100644
--- a/transport/rafthttp/http_test.go
+++ b/transport/rafthttp/http_test.go
@@ -26,10 +26,10 @@ import (
"testing"
"time"
- "github.com/absolute8511/ZanRedisDB/pkg/pbutil"
- "github.com/absolute8511/ZanRedisDB/pkg/types"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
- "github.com/absolute8511/ZanRedisDB/snap"
+ "github.com/youzan/ZanRedisDB/pkg/pbutil"
+ "github.com/youzan/ZanRedisDB/pkg/types"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/snap"
"github.com/coreos/etcd/version"
)
diff --git a/transport/rafthttp/msg_codec.go b/transport/rafthttp/msg_codec.go
index d3334c5e..dd562519 100644
--- a/transport/rafthttp/msg_codec.go
+++ b/transport/rafthttp/msg_codec.go
@@ -19,8 +19,8 @@ import (
"errors"
"io"
- "github.com/absolute8511/ZanRedisDB/pkg/pbutil"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/pkg/pbutil"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
)
// messageEncoder is a encoder that can encode all kinds of messages.
@@ -39,7 +39,8 @@ func (enc *messageEncoder) encode(m *raftpb.Message) error {
// messageDecoder is a decoder that can decode all kinds of messages.
type messageDecoder struct {
- r io.Reader
+ r io.Reader
+ buf []byte
}
var (
@@ -47,6 +48,12 @@ var (
ErrExceedSizeLimit = errors.New("rafthttp: error limit exceeded")
)
+func newMessageDecoder(r io.Reader) *messageDecoder {
+ return &messageDecoder{
+ r: r,
+ buf: make([]byte, msgAppV2BufSize),
+ }
+}
func (dec *messageDecoder) decode() (raftpb.Message, error) {
var m raftpb.Message
var l uint64
@@ -56,7 +63,12 @@ func (dec *messageDecoder) decode() (raftpb.Message, error) {
if l > readBytesLimit {
return m, ErrExceedSizeLimit
}
- buf := make([]byte, int(l))
+ var buf []byte
+ if l < uint64(len(dec.buf)) {
+ buf = dec.buf[:l]
+ } else {
+ buf = make([]byte, int(l))
+ }
if _, err := io.ReadFull(dec.r, buf); err != nil {
return m, err
}
diff --git a/transport/rafthttp/msg_codec_test.go b/transport/rafthttp/msg_codec_test.go
index dc810b6b..8e479534 100644
--- a/transport/rafthttp/msg_codec_test.go
+++ b/transport/rafthttp/msg_codec_test.go
@@ -19,7 +19,7 @@ import (
"reflect"
"testing"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
)
func TestMessage(t *testing.T) {
diff --git a/transport/rafthttp/msgappv2_codec.go b/transport/rafthttp/msgappv2_codec.go
index 00b293b6..1ee1fea6 100644
--- a/transport/rafthttp/msgappv2_codec.go
+++ b/transport/rafthttp/msgappv2_codec.go
@@ -20,10 +20,10 @@ import (
"io"
"time"
- "github.com/absolute8511/ZanRedisDB/pkg/pbutil"
- "github.com/absolute8511/ZanRedisDB/pkg/types"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
- "github.com/absolute8511/ZanRedisDB/stats"
+ "github.com/youzan/ZanRedisDB/pkg/pbutil"
+ "github.com/youzan/ZanRedisDB/pkg/types"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/stats"
)
const (
@@ -118,7 +118,7 @@ func (enc *msgAppV2Encoder) encode(m *raftpb.Message) error {
if _, err := enc.w.Write(enc.uint64buf); err != nil {
return err
}
- if n := m.Entries[i].Size(); n < msgAppV2BufSize {
+ if n := m.Entries[i].Size(); n <= msgAppV2BufSize {
if _, err := m.Entries[i].MarshalTo(enc.buf[:n]); err != nil {
return err
}
@@ -143,12 +143,22 @@ func (enc *msgAppV2Encoder) encode(m *raftpb.Message) error {
return err
}
// write size of message
- if err := binary.Write(enc.w, binary.BigEndian, uint64(m.Size())); err != nil {
+ ns := m.Size()
+ if err := binary.Write(enc.w, binary.BigEndian, uint64(ns)); err != nil {
return err
}
// write message
- if _, err := enc.w.Write(pbutil.MustMarshal(m)); err != nil {
- return err
+ if ns <= msgAppV2BufSize {
+ if _, err := m.MarshalTo(enc.buf[:ns]); err != nil {
+ return err
+ }
+ if _, err := enc.w.Write(enc.buf[:ns]); err != nil {
+ return err
+ }
+ } else {
+ if _, err := enc.w.Write(pbutil.MustMarshal(m)); err != nil {
+ return err
+ }
}
enc.term = m.Term
@@ -228,7 +238,7 @@ func (dec *msgAppV2Decoder) decode() (raftpb.Message, error) {
}
size := binary.BigEndian.Uint64(dec.uint64buf)
var buf []byte
- if size < msgAppV2BufSize {
+ if size <= msgAppV2BufSize {
buf = dec.buf[:size]
if _, err := io.ReadFull(dec.r, buf); err != nil {
return m, err
@@ -241,7 +251,10 @@ func (dec *msgAppV2Decoder) decode() (raftpb.Message, error) {
}
dec.index++
// 1 alloc
- pbutil.MustUnmarshal(&m.Entries[i], buf)
+ err := pbutil.MaybeUnmarshal(&m.Entries[i], buf)
+ if err != nil {
+ return m, err
+ }
}
// decode commit index
if _, err := io.ReadFull(dec.r, dec.uint64buf); err != nil {
@@ -253,11 +266,19 @@ func (dec *msgAppV2Decoder) decode() (raftpb.Message, error) {
if err := binary.Read(dec.r, binary.BigEndian, &size); err != nil {
return m, err
}
- buf := make([]byte, int(size))
+ var buf []byte
+ if size <= msgAppV2BufSize {
+ buf = dec.buf[:size]
+ } else {
+ buf = make([]byte, int(size))
+ }
if _, err := io.ReadFull(dec.r, buf); err != nil {
return m, err
}
- pbutil.MustUnmarshal(&m, buf)
+ err := pbutil.MaybeUnmarshal(&m, buf)
+ if err != nil {
+ return m, err
+ }
dec.term = m.Term
dec.index = m.Index
diff --git a/transport/rafthttp/msgappv2_codec_test.go b/transport/rafthttp/msgappv2_codec_test.go
index cdfb4fe3..f53a0c43 100644
--- a/transport/rafthttp/msgappv2_codec_test.go
+++ b/transport/rafthttp/msgappv2_codec_test.go
@@ -19,9 +19,9 @@ import (
"reflect"
"testing"
- "github.com/absolute8511/ZanRedisDB/pkg/types"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
- "github.com/absolute8511/ZanRedisDB/stats"
+ "github.com/youzan/ZanRedisDB/pkg/types"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/stats"
)
func TestMsgAppV2(t *testing.T) {
diff --git a/transport/rafthttp/peer.go b/transport/rafthttp/peer.go
index e97ea8f9..3eed1778 100644
--- a/transport/rafthttp/peer.go
+++ b/transport/rafthttp/peer.go
@@ -16,14 +16,14 @@ package rafthttp
import (
"sync"
+ "sync/atomic"
"time"
- "github.com/absolute8511/ZanRedisDB/pkg/types"
- "github.com/absolute8511/ZanRedisDB/raft"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
- "github.com/absolute8511/ZanRedisDB/snap"
- "github.com/absolute8511/ZanRedisDB/stats"
- "golang.org/x/net/context"
+ "github.com/youzan/ZanRedisDB/pkg/types"
+ "github.com/youzan/ZanRedisDB/raft"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/snap"
+ "github.com/youzan/ZanRedisDB/stats"
)
const (
@@ -109,14 +109,10 @@ type peer struct {
msgAppV2Reader *streamReader
msgAppReader *streamReader
- recvc chan raftpb.Message
- propc chan raftpb.Message
-
mu sync.Mutex
- paused bool
+ paused int32
- cancel context.CancelFunc // cancel pending works in go routine created by peer.
- stopc chan struct{}
+ stopc chan struct{}
}
func startPeer(transport *Transport, urls types.URLs, peerID types.ID, ps *stats.PeerStats) *peer {
@@ -147,72 +143,18 @@ func startPeer(transport *Transport, urls types.URLs, peerID types.ID, ps *stats
writer: startStreamWriter(peerID, status, ps, r),
pipeline: pipeline,
snapSender: newSnapshotSender(transport, picker, peerID, status),
- recvc: make(chan raftpb.Message, recvBufSize),
- propc: make(chan raftpb.Message, maxPendingProposals),
stopc: make(chan struct{}),
}
- ctx, cancel := context.WithCancel(context.Background())
- p.cancel = cancel
- go func() {
- for {
- select {
- case mm := <-p.recvc:
- if err := r.Process(ctx, mm); err != nil {
- plog.Warningf("failed to process raft message (%v)", err)
- }
- case <-p.stopc:
- return
- }
- }
- }()
-
- // r.Process might block for processing proposal when there is no leader.
- // Thus propc must be put into a separate routine with recvc to avoid blocking
- // processing other raft messages.
- go func() {
- for {
- select {
- case mm := <-p.propc:
- if err := r.Process(ctx, mm); err != nil {
- plog.Warningf("failed to process raft message (%v)", err)
- }
- case <-p.stopc:
- return
- }
- }
- }()
-
- p.msgAppV2Reader = &streamReader{
- peerID: peerID,
- typ: streamTypeMsgAppV2,
- tr: transport,
- picker: picker,
- status: status,
- recvc: p.recvc,
- propc: p.propc,
- }
- p.msgAppReader = &streamReader{
- peerID: peerID,
- typ: streamTypeMessage,
- tr: transport,
- picker: picker,
- status: status,
- recvc: p.recvc,
- propc: p.propc,
- }
- p.msgAppV2Reader.start()
- p.msgAppReader.start()
+ p.msgAppV2Reader = startStreamReader(peerID, streamTypeMsgAppV2, transport, picker, status, r)
+ p.msgAppReader = startStreamReader(peerID, streamTypeMessage, transport, picker, status, r)
return p
}
func (p *peer) send(m raftpb.Message) {
- p.mu.Lock()
- paused := p.paused
- p.mu.Unlock()
-
- if paused {
+ paused := atomic.LoadInt32(&p.paused)
+ if paused == 1 {
return
}
@@ -261,7 +203,7 @@ func (p *peer) activeSince() time.Time { return p.status.activeSince() }
func (p *peer) Pause() {
p.mu.Lock()
defer p.mu.Unlock()
- p.paused = true
+ atomic.StoreInt32(&p.paused, 1)
p.msgAppReader.pause()
p.msgAppV2Reader.pause()
}
@@ -270,7 +212,7 @@ func (p *peer) Pause() {
func (p *peer) Resume() {
p.mu.Lock()
defer p.mu.Unlock()
- p.paused = false
+ atomic.StoreInt32(&p.paused, 0)
p.msgAppReader.resume()
p.msgAppV2Reader.resume()
}
@@ -280,7 +222,6 @@ func (p *peer) stop() {
defer plog.Infof("stopped peer %s", p.id)
close(p.stopc)
- p.cancel()
p.msgAppV2Writer.stop()
p.writer.stop()
p.pipeline.stop()
diff --git a/transport/rafthttp/peer_status.go b/transport/rafthttp/peer_status.go
index 140778c4..e1026f64 100644
--- a/transport/rafthttp/peer_status.go
+++ b/transport/rafthttp/peer_status.go
@@ -19,7 +19,7 @@ import (
"sync"
"time"
- "github.com/absolute8511/ZanRedisDB/pkg/types"
+ "github.com/youzan/ZanRedisDB/pkg/types"
)
type failureType struct {
diff --git a/transport/rafthttp/peer_test.go b/transport/rafthttp/peer_test.go
index 47a521ea..232c11bc 100644
--- a/transport/rafthttp/peer_test.go
+++ b/transport/rafthttp/peer_test.go
@@ -17,7 +17,7 @@ package rafthttp
import (
"testing"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
)
func TestPeerPick(t *testing.T) {
diff --git a/transport/rafthttp/pipeline.go b/transport/rafthttp/pipeline.go
index 6f421ce8..eb216de9 100644
--- a/transport/rafthttp/pipeline.go
+++ b/transport/rafthttp/pipeline.go
@@ -17,16 +17,17 @@ package rafthttp
import (
"bytes"
"errors"
- "golang.org/x/net/context"
"io/ioutil"
"sync"
"time"
- "github.com/absolute8511/ZanRedisDB/pkg/pbutil"
- "github.com/absolute8511/ZanRedisDB/pkg/types"
- "github.com/absolute8511/ZanRedisDB/raft"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
- "github.com/absolute8511/ZanRedisDB/stats"
+ "golang.org/x/net/context"
+
+ "github.com/youzan/ZanRedisDB/pkg/pbutil"
+ "github.com/youzan/ZanRedisDB/pkg/types"
+ "github.com/youzan/ZanRedisDB/raft"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/stats"
)
const (
diff --git a/transport/rafthttp/pipeline_test.go b/transport/rafthttp/pipeline_test.go
index cacd4b29..1e30222c 100644
--- a/transport/rafthttp/pipeline_test.go
+++ b/transport/rafthttp/pipeline_test.go
@@ -24,10 +24,10 @@ import (
"testing"
"time"
- "github.com/absolute8511/ZanRedisDB/pkg/testutil"
- "github.com/absolute8511/ZanRedisDB/pkg/types"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
- "github.com/absolute8511/ZanRedisDB/stats"
+ "github.com/youzan/ZanRedisDB/pkg/testutil"
+ "github.com/youzan/ZanRedisDB/pkg/types"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/stats"
"github.com/coreos/etcd/version"
)
diff --git a/transport/rafthttp/remote.go b/transport/rafthttp/remote.go
index ebc1994b..61e601b4 100644
--- a/transport/rafthttp/remote.go
+++ b/transport/rafthttp/remote.go
@@ -15,8 +15,8 @@
package rafthttp
import (
- "github.com/absolute8511/ZanRedisDB/pkg/types"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/pkg/types"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
)
type remote struct {
diff --git a/transport/rafthttp/snapshot_sender.go b/transport/rafthttp/snapshot_sender.go
index 04d8cf55..00e2457d 100644
--- a/transport/rafthttp/snapshot_sender.go
+++ b/transport/rafthttp/snapshot_sender.go
@@ -16,22 +16,26 @@ package rafthttp
import (
"bytes"
- "golang.org/x/net/context"
+ "errors"
"io"
"io/ioutil"
"net/http"
"time"
- "github.com/absolute8511/ZanRedisDB/pkg/httputil"
- pioutil "github.com/absolute8511/ZanRedisDB/pkg/ioutil"
- "github.com/absolute8511/ZanRedisDB/pkg/types"
- "github.com/absolute8511/ZanRedisDB/raft"
- "github.com/absolute8511/ZanRedisDB/snap"
+ "golang.org/x/net/context"
+
+ "github.com/youzan/ZanRedisDB/pkg/httputil"
+ pioutil "github.com/youzan/ZanRedisDB/pkg/ioutil"
+ "github.com/youzan/ZanRedisDB/pkg/types"
+ "github.com/youzan/ZanRedisDB/raft"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/snap"
)
var (
// timeout for reading snapshot response body
- snapResponseReadTimeout = 5 * time.Second
+ snapResponseReadTimeout = 5 * time.Second
+ snapTransferCheckInterval = 5 * time.Second
)
type snapshotSender struct {
@@ -61,7 +65,9 @@ func newSnapshotSender(tr *Transport, picker *urlPicker, to types.ID, status *pe
}
}
-func (s *snapshotSender) stop() { close(s.stopc) }
+func (s *snapshotSender) stop() {
+ close(s.stopc)
+}
func (s *snapshotSender) send(merged snap.Message) {
m := merged.Message
@@ -97,12 +103,72 @@ func (s *snapshotSender) send(merged snap.Message) {
sentFailures.WithLabelValues(m.ToGroup.String()).Inc()
return
}
+ sentBytes.WithLabelValues(m.ToGroup.String()).Add(float64(merged.TotalSize))
+ // send health check until all snapshot data is done
+
s.status.activate()
- s.r.ReportSnapshot(m.To, m.ToGroup, raft.SnapshotFinish)
- plog.Infof("database snapshot [index: %d, to: %s] sent out successfully",
- m.Snapshot.Metadata.Index, m.ToGroup.String())
+ go func() {
+ tk := time.NewTicker(snapTransferCheckInterval)
+ defer tk.Stop()
+ done := false
+ var err error
+ for !done {
+ select {
+ case <-s.stopc:
+ s.r.ReportSnapshot(m.To, m.ToGroup, raft.SnapshotFailure)
+ return
+ case <-tk.C:
+ // check snapshot status, consider status code 404/400 as old and skip
+ done, err = s.checkSnapshotStatus(m)
+ if err != nil {
+ plog.Infof("database snapshot [index: %d, to: %s] sent check failed: %v",
+ m.Snapshot.Metadata.Index, m.ToGroup.String(), err.Error())
+ s.r.ReportSnapshot(m.To, m.ToGroup, raft.SnapshotFailure)
+ return
+ }
+ if !done {
+ plog.Infof("database snapshot [index: %d, to: %s] sent still transferring",
+ m.Snapshot.Metadata.Index, m.ToGroup.String())
+ }
+ }
+ }
+ s.r.ReportSnapshot(m.To, m.ToGroup, raft.SnapshotFinish)
+ plog.Infof("database snapshot [index: %d, to: %s] sent out successfully",
+ m.Snapshot.Metadata.Index, m.ToGroup.String())
+ }()
+}
- sentBytes.WithLabelValues(m.ToGroup.String()).Add(float64(merged.TotalSize))
+func (s *snapshotSender) checkSnapshotStatus(m raftpb.Message) (bool, error) {
+ buf := &bytes.Buffer{}
+ enc := &messageEncoder{w: buf}
+ // encode raft message
+ if err := enc.encode(&m); err != nil {
+ return false, err
+ }
+ u := s.picker.pick()
+ req := createPostRequest(u, RaftSnapshotCheckPrefix, buf, "application/octet-stream", s.tr.URLs, s.from, s.cid)
+
+ ctx, cancel := context.WithTimeout(context.Background(), snapTransferCheckInterval)
+ req = req.WithContext(ctx)
+ defer cancel()
+ resp, err := s.tr.pipelineRt.RoundTrip(req)
+ if err != nil {
+ return false, err
+ }
+ switch resp.StatusCode {
+ case http.StatusNoContent:
+ // snapshot transfer finished
+ return true, nil
+ case http.StatusAlreadyReported:
+ // waiting snapshot transfer
+ return false, nil
+ case http.StatusNotFound, http.StatusForbidden, http.StatusMethodNotAllowed, http.StatusBadRequest:
+ // old version, no waiting
+ return true, nil
+ default:
+ plog.Infof("not expected response while check snapshot: %v", resp.Status)
+ return false, errors.New(resp.Status)
+ }
}
// post posts the given request.
diff --git a/transport/rafthttp/snapshot_test.go b/transport/rafthttp/snapshot_test.go
index fe7abcab..2face527 100644
--- a/transport/rafthttp/snapshot_test.go
+++ b/transport/rafthttp/snapshot_test.go
@@ -25,9 +25,9 @@ import (
"testing"
"time"
- "github.com/absolute8511/ZanRedisDB/pkg/types"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
- "github.com/absolute8511/ZanRedisDB/snap"
+ "github.com/youzan/ZanRedisDB/pkg/types"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/snap"
)
type strReaderCloser struct{ *strings.Reader }
@@ -114,7 +114,7 @@ func testSnapshotSend(t *testing.T, sm *snap.Message) (bool, []os.FileInfo) {
sent := false
select {
- case <-time.After(time.Second):
+ case <-time.After(snapTransferCheckInterval * 2):
t.Fatalf("timed out sending snapshot")
case sent = <-sm.CloseNotify():
}
diff --git a/transport/rafthttp/stream.go b/transport/rafthttp/stream.go
index 741dd45d..c7ae1c06 100644
--- a/transport/rafthttp/stream.go
+++ b/transport/rafthttp/stream.go
@@ -26,20 +26,20 @@ import (
"golang.org/x/net/context"
- "github.com/absolute8511/ZanRedisDB/pkg/httputil"
- "github.com/absolute8511/ZanRedisDB/pkg/transport"
- "github.com/absolute8511/ZanRedisDB/pkg/types"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
- "github.com/absolute8511/ZanRedisDB/stats"
"github.com/coreos/etcd/version"
"github.com/coreos/go-semver/semver"
+ "github.com/youzan/ZanRedisDB/pkg/httputil"
+ "github.com/youzan/ZanRedisDB/pkg/transport"
+ "github.com/youzan/ZanRedisDB/pkg/types"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/stats"
)
const (
streamTypeMessage streamType = "message"
streamTypeMsgAppV2 streamType = "msgappv2"
- streamBufSize = 4096
+ streamBufSize = 1024 * 16
)
var (
@@ -171,18 +171,30 @@ func (cw *streamWriter) run() {
heartbeatc, msgc = nil, nil
case m := <-msgc:
- err := enc.encode(&m)
- if err == nil {
+ var err error
+ for done := false; !done; {
+ err = enc.encode(&m)
unflushed += m.Size()
-
- if len(msgc) == 0 || batched > streamBufSize/2 {
- flusher.Flush()
- sentBytes.WithLabelValues(cw.peerID.String()).Add(float64(unflushed))
- unflushed = 0
- batched = 0
- } else {
- batched++
+ batched++
+ m.Entries = nil
+ if err != nil {
+ break
+ }
+ if batched > streamBufSize/2 {
+ done = true
+ break
}
+ select {
+ case m = <-msgc:
+ default:
+ done = true
+ }
+ }
+ if err == nil {
+ flusher.Flush()
+ sentBytes.WithLabelValues(cw.peerID.String()).Add(float64(unflushed))
+ unflushed = 0
+ batched = 0
continue
}
@@ -277,20 +289,34 @@ type streamReader struct {
tr *Transport
picker *urlPicker
status *peerStatus
- recvc chan<- raftpb.Message
- propc chan<- raftpb.Message
errorc chan<- error
- mu sync.Mutex
- paused bool
- cancel func()
- closer io.Closer
+ mu sync.Mutex
+ paused bool
+ cancel func()
+ processCancel func()
+ closer io.Closer
+ r Raft
stopc chan struct{}
done chan struct{}
}
+func startStreamReader(peerID types.ID, typ streamType, tr *Transport,
+ picker *urlPicker, status *peerStatus, r Raft) *streamReader {
+ reader := &streamReader{
+ peerID: peerID,
+ typ: typ,
+ tr: tr,
+ picker: picker,
+ status: status,
+ r: r,
+ }
+ reader.start()
+ return reader
+}
+
func (r *streamReader) start() {
r.stopc = make(chan struct{})
r.done = make(chan struct{})
@@ -350,7 +376,7 @@ func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error {
case streamTypeMsgAppV2:
dec = newMsgAppV2Decoder(rc, cr.tr.ID, cr.peerID)
case streamTypeMessage:
- dec = &messageDecoder{r: rc}
+ dec = newMessageDecoder(rc)
default:
plog.Panicf("unhandled stream type %s", t)
}
@@ -364,6 +390,8 @@ func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error {
default:
cr.closer = rc
}
+ ctx, cancel := context.WithCancel(context.Background())
+ cr.processCancel = cancel
cr.mu.Unlock()
for {
@@ -396,18 +424,13 @@ func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error {
plog.Errorf("receive message not group: %v", m.String())
}
- recvc := cr.recvc
- if m.Type == raftpb.MsgProp {
- recvc = cr.propc
- }
-
- select {
- case recvc <- m:
- default:
+ err = cr.r.Process(ctx, m)
+ if err != nil {
if cr.status.isActive() {
- plog.MergeWarningf("dropped internal raft message from %s since receiving buffer is full (overloaded network)", types.ID(m.From))
+ plog.MergeWarningf("error process raft message from %s, err %v", types.ID(m.From), err.Error())
+ } else {
+ plog.Debugf("dropped %s from %s since err %v", m.Type, types.ID(m.From), err.Error())
}
- plog.Debugf("dropped %s from %s since receiving buffer is full", m.Type, types.ID(m.From))
recvFailures.WithLabelValues(m.FromGroup.Name).Inc()
}
}
@@ -420,6 +443,9 @@ func (cr *streamReader) stop() {
cr.cancel()
}
cr.close()
+ if cr.processCancel != nil {
+ cr.processCancel()
+ }
cr.mu.Unlock()
<-cr.done
}
diff --git a/transport/rafthttp/stream_test.go b/transport/rafthttp/stream_test.go
index 1f9a5099..86917089 100644
--- a/transport/rafthttp/stream_test.go
+++ b/transport/rafthttp/stream_test.go
@@ -25,12 +25,12 @@ import (
"testing"
"time"
- "github.com/absolute8511/ZanRedisDB/pkg/testutil"
- "github.com/absolute8511/ZanRedisDB/pkg/types"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
- "github.com/absolute8511/ZanRedisDB/stats"
"github.com/coreos/etcd/version"
"github.com/coreos/go-semver/semver"
+ "github.com/youzan/ZanRedisDB/pkg/testutil"
+ "github.com/youzan/ZanRedisDB/pkg/types"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/stats"
)
// TestStreamWriterAttachOutgoingConn tests that outgoingConn can be attached
@@ -258,8 +258,6 @@ func TestStreamReaderDialDetectUnsupport(t *testing.T) {
// TestStream tests that streamReader and streamWriter can build stream to
// send messages between each other.
func TestStream(t *testing.T) {
- recvc := make(chan raftpb.Message, streamBufSize)
- propc := make(chan raftpb.Message, streamBufSize)
msgapp := raftpb.Message{
Type: raftpb.MsgApp,
From: 2,
@@ -270,6 +268,8 @@ func TestStream(t *testing.T) {
Entries: []raftpb.Entry{{Term: 1, Index: 4}},
}
+ recvc := make(chan raftpb.Message, streamBufSize)
+ readerFakeRaft := &fakeRaft{recvc: recvc}
tests := []struct {
t streamType
m raftpb.Message
@@ -278,7 +278,7 @@ func TestStream(t *testing.T) {
{
streamTypeMessage,
raftpb.Message{Type: raftpb.MsgProp, To: 2},
- propc,
+ recvc,
},
{
streamTypeMessage,
@@ -303,16 +303,7 @@ func TestStream(t *testing.T) {
picker := mustNewURLPicker(t, []string{srv.URL})
tr := &Transport{streamRt: &http.Transport{}, ClusterID: "1"}
- sr := &streamReader{
- peerID: types.ID(2),
- typ: tt.t,
- tr: tr,
- picker: picker,
- status: newPeerStatus(types.ID(2)),
- recvc: recvc,
- propc: propc,
- }
- sr.start()
+ sr := startStreamReader(types.ID(2), tt.t, tr, picker, newPeerStatus(types.ID(2)), readerFakeRaft)
// wait for stream to work
var writec chan<- raftpb.Message
diff --git a/transport/rafthttp/transport.go b/transport/rafthttp/transport.go
index 415b1cd6..718b2176 100644
--- a/transport/rafthttp/transport.go
+++ b/transport/rafthttp/transport.go
@@ -19,21 +19,27 @@ import (
"sync"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/raft"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
- "github.com/absolute8511/ZanRedisDB/snap"
- "github.com/absolute8511/ZanRedisDB/stats"
- //"github.com/absolute8511/ZanRedisDB/pkg/logutil"
- "github.com/absolute8511/ZanRedisDB/pkg/transport"
- "github.com/absolute8511/ZanRedisDB/pkg/types"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/raft"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/snap"
+ "github.com/youzan/ZanRedisDB/stats"
+
+ //"github.com/youzan/ZanRedisDB/pkg/logutil"
+ "github.com/youzan/ZanRedisDB/pkg/transport"
+ "github.com/youzan/ZanRedisDB/pkg/types"
+
//"github.com/coreos/pkg/capnslog"
"github.com/xiang90/probing"
"golang.org/x/net/context"
)
-//var plog = logutil.NewMergeLogger(capnslog.NewPackageLogger("github.com/absolute8511/ZanRedisDB", "transport/rafthttp"))
-var plog = common.NewMergeLogger(common.NewLevelLogger(common.LOG_INFO, common.NewDefaultLogger("transport/rafthttp")))
+var plog = common.NewMergeLogger(common.NewLevelLogger(common.LOG_INFO, common.NewLogger()))
+
+func SetLogger(level int32, logger common.Logger) {
+ plog.Logger = logger
+ plog.SetLevel(int32(level))
+}
func SetLogLevel(level int) {
plog.SetLevel(int32(level))
@@ -159,6 +165,7 @@ func (t *Transport) Handler() http.Handler {
mux.Handle(RaftPrefix, pipelineHandler)
mux.Handle(RaftStreamPrefix+"/", streamHandler)
mux.Handle(RaftSnapshotPrefix, snapHandler)
+ mux.Handle(RaftSnapshotCheckPrefix, snapHandler)
mux.Handle(ProbingPrefix, probing.NewHandler())
return mux
}
@@ -183,9 +190,9 @@ func (t *Transport) Send(msgs []raftpb.Message) {
t.mu.RUnlock()
if pok {
- if m.Type == raftpb.MsgApp {
- t.TrStats.SendAppendReq(m.Size())
- }
+ //if m.Type == raftpb.MsgApp {
+ // t.TrStats.SendAppendReq(m.Size())
+ //}
p.send(m)
continue
}
@@ -383,12 +390,16 @@ type Pausable interface {
}
func (t *Transport) Pause() {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
for _, p := range t.peers {
p.(Pausable).Pause()
}
}
func (t *Transport) Resume() {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
for _, p := range t.peers {
p.(Pausable).Resume()
}
diff --git a/transport/rafthttp/transport_bench_test.go b/transport/rafthttp/transport_bench_test.go
index b4d35f2d..66de1d36 100644
--- a/transport/rafthttp/transport_bench_test.go
+++ b/transport/rafthttp/transport_bench_test.go
@@ -20,10 +20,10 @@ import (
"testing"
"time"
- "github.com/absolute8511/ZanRedisDB/pkg/types"
- "github.com/absolute8511/ZanRedisDB/raft"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
- "github.com/absolute8511/ZanRedisDB/stats"
+ "github.com/youzan/ZanRedisDB/pkg/types"
+ "github.com/youzan/ZanRedisDB/raft"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/stats"
"golang.org/x/net/context"
)
diff --git a/transport/rafthttp/transport_test.go b/transport/rafthttp/transport_test.go
index a784d8d1..c2e3af9b 100644
--- a/transport/rafthttp/transport_test.go
+++ b/transport/rafthttp/transport_test.go
@@ -16,17 +16,23 @@ package rafthttp
import (
"net/http"
+ "os"
"reflect"
"testing"
"time"
- "github.com/absolute8511/ZanRedisDB/pkg/testutil"
- "github.com/absolute8511/ZanRedisDB/pkg/types"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
- "github.com/absolute8511/ZanRedisDB/stats"
"github.com/xiang90/probing"
+ "github.com/youzan/ZanRedisDB/pkg/testutil"
+ "github.com/youzan/ZanRedisDB/pkg/types"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/stats"
)
+func TestMain(m *testing.M) {
+ ret := m.Run()
+ os.Exit(ret)
+}
+
// TestTransportSend tests that transport can send messages using correct
// underlying peer, and drop local or unknown-target messages.
func TestTransportSend(t *testing.T) {
diff --git a/transport/rafthttp/urlpick.go b/transport/rafthttp/urlpick.go
index b79fac81..502cfb4a 100644
--- a/transport/rafthttp/urlpick.go
+++ b/transport/rafthttp/urlpick.go
@@ -18,7 +18,7 @@ import (
"net/url"
"sync"
- "github.com/absolute8511/ZanRedisDB/pkg/types"
+ "github.com/youzan/ZanRedisDB/pkg/types"
)
type urlPicker struct {
diff --git a/transport/rafthttp/urlpick_test.go b/transport/rafthttp/urlpick_test.go
index 59138a7e..354ec695 100644
--- a/transport/rafthttp/urlpick_test.go
+++ b/transport/rafthttp/urlpick_test.go
@@ -18,7 +18,7 @@ import (
"net/url"
"testing"
- "github.com/absolute8511/ZanRedisDB/pkg/testutil"
+ "github.com/youzan/ZanRedisDB/pkg/testutil"
)
// TestURLPickerPickTwice tests that pick returns a possible url,
diff --git a/transport/rafthttp/util.go b/transport/rafthttp/util.go
index 84476fcf..5dd711d4 100644
--- a/transport/rafthttp/util.go
+++ b/transport/rafthttp/util.go
@@ -23,8 +23,8 @@ import (
"strings"
"time"
- "github.com/absolute8511/ZanRedisDB/pkg/transport"
- "github.com/absolute8511/ZanRedisDB/pkg/types"
+ "github.com/youzan/ZanRedisDB/pkg/transport"
+ "github.com/youzan/ZanRedisDB/pkg/types"
"github.com/coreos/etcd/version"
"github.com/coreos/go-semver/semver"
)
diff --git a/transport/rafthttp/util_test.go b/transport/rafthttp/util_test.go
index 01373799..4e8d4712 100644
--- a/transport/rafthttp/util_test.go
+++ b/transport/rafthttp/util_test.go
@@ -22,7 +22,7 @@ import (
"reflect"
"testing"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
"github.com/coreos/etcd/version"
"github.com/coreos/go-semver/semver"
)
diff --git a/wal/decoder.go b/wal/decoder.go
index 3595fc1f..537a0107 100644
--- a/wal/decoder.go
+++ b/wal/decoder.go
@@ -21,10 +21,10 @@ import (
"io"
"sync"
- "github.com/absolute8511/ZanRedisDB/pkg/crc"
- "github.com/absolute8511/ZanRedisDB/pkg/pbutil"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
- "github.com/absolute8511/ZanRedisDB/wal/walpb"
+ "github.com/youzan/ZanRedisDB/pkg/crc"
+ "github.com/youzan/ZanRedisDB/pkg/pbutil"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/wal/walpb"
)
const minSectorSize = 512
@@ -59,6 +59,11 @@ func (d *decoder) decode(rec *walpb.Record) error {
return d.decodeRecord(rec)
}
+// raft max message size is set to 1 MB in etcd server
+// assume projects set reasonable message size limit,
+// thus entry size should never exceed 100 MB
+const maxWALEntrySizeLimit = int64(100 * 1024 * 1024)
+
func (d *decoder) decodeRecord(rec *walpb.Record) error {
if len(d.brs) == 0 {
return io.EOF
@@ -79,6 +84,9 @@ func (d *decoder) decodeRecord(rec *walpb.Record) error {
}
recBytes, padBytes := decodeFrameSize(l)
+ if recBytes >= maxWALEntrySizeLimit-padBytes {
+ return ErrMaxWALEntrySizeLimitExceeded
+ }
data := make([]byte, recBytes+padBytes)
if _, err = io.ReadFull(d.brs[0], data); err != nil {
diff --git a/wal/encoder.go b/wal/encoder.go
index eac10db8..6e402f2c 100644
--- a/wal/encoder.go
+++ b/wal/encoder.go
@@ -21,9 +21,9 @@ import (
"os"
"sync"
- "github.com/absolute8511/ZanRedisDB/pkg/crc"
- "github.com/absolute8511/ZanRedisDB/pkg/ioutil"
- "github.com/absolute8511/ZanRedisDB/wal/walpb"
+ "github.com/youzan/ZanRedisDB/pkg/crc"
+ "github.com/youzan/ZanRedisDB/pkg/ioutil"
+ "github.com/youzan/ZanRedisDB/wal/walpb"
)
// walPageBytes is the alignment for flushing records to the backing Writer.
diff --git a/wal/file_pipeline.go b/wal/file_pipeline.go
index 793a59f0..a913679b 100644
--- a/wal/file_pipeline.go
+++ b/wal/file_pipeline.go
@@ -19,7 +19,7 @@ import (
"os"
"path/filepath"
- "github.com/absolute8511/ZanRedisDB/pkg/fileutil"
+ "github.com/youzan/ZanRedisDB/pkg/fileutil"
)
// filePipeline pipelines allocating disk space
diff --git a/wal/record_test.go b/wal/record_test.go
index 18a2fa09..e1e21759 100644
--- a/wal/record_test.go
+++ b/wal/record_test.go
@@ -22,7 +22,7 @@ import (
"reflect"
"testing"
- "github.com/absolute8511/ZanRedisDB/wal/walpb"
+ "github.com/youzan/ZanRedisDB/wal/walpb"
)
var (
diff --git a/wal/repair.go b/wal/repair.go
index def99074..b3b8b670 100644
--- a/wal/repair.go
+++ b/wal/repair.go
@@ -19,8 +19,8 @@ import (
"os"
"path/filepath"
- "github.com/absolute8511/ZanRedisDB/pkg/fileutil"
- "github.com/absolute8511/ZanRedisDB/wal/walpb"
+ "github.com/youzan/ZanRedisDB/pkg/fileutil"
+ "github.com/youzan/ZanRedisDB/wal/walpb"
)
// Repair tries to repair ErrUnexpectedEOF in the
@@ -53,8 +53,8 @@ func Repair(dirpath string) bool {
continue
case io.EOF:
return true
- case io.ErrUnexpectedEOF:
- plog.Infof("repairing %v", f.Name())
+ case io.ErrUnexpectedEOF, ErrMaxWALEntrySizeLimitExceeded:
+ plog.Infof("repairing %v for err: %s", f.Name(), err)
bf, bferr := os.Create(f.Name() + ".broken")
if bferr != nil {
plog.Errorf("could not repair %v, failed to create backup file", f.Name())
@@ -90,7 +90,7 @@ func Repair(dirpath string) bool {
// openLast opens the last wal file for read and write.
func openLast(dirpath string) (*fileutil.LockedFile, error) {
- names, err := readWalNames(dirpath)
+ names, err := readWALNames(dirpath)
if err != nil {
return nil, err
}
diff --git a/wal/repair_test.go b/wal/repair_test.go
index 54873fb5..f8d34ec9 100644
--- a/wal/repair_test.go
+++ b/wal/repair_test.go
@@ -21,8 +21,8 @@ import (
"os"
"testing"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
- "github.com/absolute8511/ZanRedisDB/wal/walpb"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/wal/walpb"
)
type corruptFunc func(string, int64) error
@@ -57,11 +57,14 @@ func testRepair(t *testing.T, ents [][]raftpb.Entry, corrupt corruptFunc, expect
if err != nil {
t.Fatal(err)
}
-
- for _, es := range ents {
+ off, _ := w.tail().Seek(0, io.SeekCurrent)
+ t.Logf("offset:%v", off)
+ for i, es := range ents {
if err = w.Save(raftpb.HardState{}, es); err != nil {
t.Fatal(err)
}
+ off, _ := w.tail().Seek(0, io.SeekCurrent)
+ t.Logf("%v offset:%v", i, off)
}
offset, err := w.tail().Seek(0, io.SeekCurrent)
@@ -155,7 +158,9 @@ func TestRepairWriteTearLast(t *testing.T) {
}
return nil
}
- testRepair(t, makeEnts(50), corruptf, 40)
+ ents := makeEnts(50)
+ t.Log(ents[0][0].Size())
+ testRepair(t, ents, corruptf, 30)
}
// TestRepairWriteTearMiddle repairs the WAL when there is write tearing
@@ -182,3 +187,57 @@ func TestRepairWriteTearMiddle(t *testing.T) {
}
testRepair(t, ents, corruptf, 1)
}
+
+func TestRepairFailDeleteDir(t *testing.T) {
+ p, err := ioutil.TempDir(os.TempDir(), "waltest")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(p)
+
+ w, err := Create(p, nil, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ oldSegmentSizeBytes := SegmentSizeBytes
+ SegmentSizeBytes = 64
+ defer func() {
+ SegmentSizeBytes = oldSegmentSizeBytes
+ }()
+ for _, es := range makeEnts(50) {
+ if err = w.Save(raftpb.HardState{}, es); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ _, serr := w.tail().Seek(0, io.SeekCurrent)
+ if serr != nil {
+ t.Fatal(serr)
+ }
+ w.Close()
+
+ f, err := openLast(p)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if terr := f.Truncate(20); terr != nil {
+ t.Fatal(err)
+ }
+ f.Close()
+
+ w, err = Open(p, walpb.Snapshot{}, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, _, _, err = w.ReadAll()
+ if err != io.ErrUnexpectedEOF {
+ t.Fatalf("err = %v, want error %v", err, io.ErrUnexpectedEOF)
+ }
+ w.Close()
+
+ os.RemoveAll(p)
+ if Repair(p) {
+ t.Fatal("expect 'Repair' fail on unexpected directory deletion")
+ }
+}
diff --git a/wal/util.go b/wal/util.go
index c0af5a2f..64bd800a 100644
--- a/wal/util.go
+++ b/wal/util.go
@@ -19,13 +19,14 @@ import (
"fmt"
"strings"
- "github.com/absolute8511/ZanRedisDB/pkg/fileutil"
+ "github.com/youzan/ZanRedisDB/pkg/fileutil"
)
var (
badWalName = errors.New("bad wal name")
)
+// Exist returns true if there are any files in a given directory.
func Exist(dirpath string) bool {
names, err := fileutil.ReadDir(dirpath)
if err != nil {
@@ -40,7 +41,7 @@ func Exist(dirpath string) bool {
func searchIndex(names []string, index uint64) (int, bool) {
for i := len(names) - 1; i >= 0; i-- {
name := names[i]
- _, curIndex, err := parseWalName(name)
+ _, curIndex, err := parseWALName(name)
if err != nil {
plog.Panicf("parse correct name should never fail: %v", err)
}
@@ -56,7 +57,7 @@ func searchIndex(names []string, index uint64) (int, bool) {
func isValidSeq(names []string) bool {
var lastSeq uint64
for _, name := range names {
- curSeq, _, err := parseWalName(name)
+ curSeq, _, err := parseWALName(name)
if err != nil {
plog.Panicf("parse correct name should never fail: %v", err)
}
@@ -67,7 +68,7 @@ func isValidSeq(names []string) bool {
}
return true
}
-func readWalNames(dirpath string) ([]string, error) {
+func readWALNames(dirpath string) ([]string, error) {
names, err := fileutil.ReadDir(dirpath)
if err != nil {
return nil, err
@@ -82,7 +83,7 @@ func readWalNames(dirpath string) ([]string, error) {
func checkWalNames(names []string) []string {
wnames := make([]string, 0)
for _, name := range names {
- if _, _, err := parseWalName(name); err != nil {
+ if _, _, err := parseWALName(name); err != nil {
// don't complain about left over tmp files
if !strings.HasSuffix(name, ".tmp") {
plog.Warningf("ignored file %v in wal", name)
@@ -94,7 +95,7 @@ func checkWalNames(names []string) []string {
return wnames
}
-func parseWalName(str string) (seq, index uint64, err error) {
+func parseWALName(str string) (seq, index uint64, err error) {
if !strings.HasSuffix(str, ".wal") {
return 0, 0, badWalName
}
diff --git a/wal/wal.go b/wal/wal.go
index fa6a1608..e0812c4a 100644
--- a/wal/wal.go
+++ b/wal/wal.go
@@ -25,12 +25,12 @@ import (
"sync"
"time"
- "github.com/absolute8511/ZanRedisDB/common"
- "github.com/absolute8511/ZanRedisDB/pkg/fileutil"
- "github.com/absolute8511/ZanRedisDB/pkg/pbutil"
- "github.com/absolute8511/ZanRedisDB/raft"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
- "github.com/absolute8511/ZanRedisDB/wal/walpb"
+ "github.com/youzan/ZanRedisDB/common"
+ "github.com/youzan/ZanRedisDB/pkg/fileutil"
+ "github.com/youzan/ZanRedisDB/pkg/pbutil"
+ "github.com/youzan/ZanRedisDB/raft"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/wal/walpb"
)
const (
@@ -43,6 +43,7 @@ const (
// warnSyncDuration is the amount of time allotted to an fsync before
// logging a warning
warnSyncDuration = time.Second
+ bufSize = 1024 * 1024
)
var (
@@ -50,16 +51,19 @@ var (
// The actual size might be larger than this. In general, the default
// value should be used, but this is defined as an exported variable
// so that tests can set a different segment size.
- SegmentSizeBytes int64 = 32 * 1000 * 1000
+ SegmentSizeBytes int64 = 64 * 1000 * 1000
plog = common.NewLevelLogger(common.LOG_INFO, common.NewDefaultLogger("wal"))
- ErrMetadataConflict = errors.New("wal: conflicting metadata found")
- ErrFileNotFound = errors.New("wal: file not found")
- ErrCRCMismatch = errors.New("wal: crc mismatch")
- ErrSnapshotMismatch = errors.New("wal: snapshot mismatch")
- ErrSnapshotNotFound = errors.New("wal: snapshot not found")
- crcTable = crc32.MakeTable(crc32.Castagnoli)
+ ErrMetadataConflict = errors.New("wal: conflicting metadata found")
+ ErrFileNotFound = errors.New("wal: file not found")
+ ErrCRCMismatch = errors.New("wal: crc mismatch")
+ ErrSnapshotMismatch = errors.New("wal: snapshot mismatch")
+ ErrSnapshotNotFound = errors.New("wal: snapshot not found")
+ ErrSliceOutOfRange = errors.New("wal: slice bounds out of range")
+ ErrMaxWALEntrySizeLimitExceeded = errors.New("wal: max entry size limit exceeded")
+ ErrDecoderNotFound = errors.New("wal: decoder not found")
+ crcTable = crc32.MakeTable(crc32.Castagnoli)
)
// WAL is a logical representation of the stable storage.
@@ -87,6 +91,7 @@ type WAL struct {
locks []*fileutil.LockedFile // the locked files the WAL holds (the name is increasing)
fp *filePipeline
optimizedFsync bool
+ buf []byte
}
// Create creates a WAL ready for appending records. The given metadata is
@@ -123,6 +128,7 @@ func Create(dirpath string, metadata []byte, optimizedFsync bool) (*WAL, error)
dir: dirpath,
metadata: metadata,
optimizedFsync: optimizedFsync,
+ buf: make([]byte, bufSize),
}
w.encoder, err = newFileEncoder(f.File, 0)
if err != nil {
@@ -139,26 +145,56 @@ func Create(dirpath string, metadata []byte, optimizedFsync bool) (*WAL, error)
return nil, err
}
- if w, err = w.renameWal(tmpdirpath); err != nil {
+ if w, err = w.renameWAL(tmpdirpath); err != nil {
return nil, err
}
+ var perr error
+ defer func() {
+ if perr != nil {
+ w.cleanupWAL()
+ }
+ }()
+
// directory was renamed; sync parent dir to persist rename
pdir, perr := fileutil.OpenDir(filepath.Dir(w.dir))
if perr != nil {
return nil, perr
}
+ dirCloser := func() error {
+ if perr = pdir.Close(); perr != nil {
+ return perr
+ }
+ return nil
+ }
if perr = fileutil.Fsync(pdir); perr != nil {
+ dirCloser()
return nil, perr
}
- if perr = pdir.Close(); err != nil {
+ if perr = dirCloser(); err != nil {
return nil, perr
}
return w, nil
}
-func (w *WAL) renameWal(tmpdirpath string) (*WAL, error) {
+func (w *WAL) cleanupWAL() {
+ var err error
+ if err = w.Close(); err != nil {
+ plog.Panicf("failed to close WAL during cleanup: %s", err)
+ }
+ brokenDirName := fmt.Sprintf("%s.broken.%v", w.dir, time.Now().Format("20060102.150405.999999"))
+ if err = os.Rename(w.dir, brokenDirName); err != nil {
+ plog.Panicf(
+ "failed to rename WAL during cleanup: %v, %v, %v",
+ err,
+ w.dir,
+ brokenDirName,
+ )
+ }
+}
+
+func (w *WAL) renameWAL(tmpdirpath string) (*WAL, error) {
if err := os.RemoveAll(w.dir); err != nil {
return nil, err
}
@@ -170,7 +206,7 @@ func (w *WAL) renameWal(tmpdirpath string) (*WAL, error) {
// process holds the lock.
if err := os.Rename(tmpdirpath, w.dir); err != nil {
if _, ok := err.(*os.LinkError); ok {
- return w.renameWalUnlock(tmpdirpath)
+ return w.renameWALUnlock(tmpdirpath)
}
return nil, err
}
@@ -180,7 +216,7 @@ func (w *WAL) renameWal(tmpdirpath string) (*WAL, error) {
return w, err
}
-func (w *WAL) renameWalUnlock(tmpdirpath string) (*WAL, error) {
+func (w *WAL) renameWALUnlock(tmpdirpath string) (*WAL, error) {
// rename of directory with locked files doesn't work on windows/cifs;
// close the WAL to release the locks so the directory can be renamed.
plog.Infof("releasing file lock to rename %q to %q", tmpdirpath, w.dir)
@@ -224,17 +260,57 @@ func OpenForRead(dirpath string, snap walpb.Snapshot) (*WAL, error) {
}
func openAtIndex(dirpath string, snap walpb.Snapshot, write bool, optimizedFsync bool) (*WAL, error) {
- names, err := readWalNames(dirpath)
+ names, nameIndex, err := selectWALFiles(dirpath, snap)
+ if err != nil {
+ return nil, err
+ }
+
+ rs, ls, closer, err := openWALFiles(dirpath, names, nameIndex, write)
if err != nil {
return nil, err
}
+ // create a WAL ready for reading
+ w := &WAL{
+ dir: dirpath,
+ start: snap,
+ decoder: newDecoder(rs...),
+ readClose: closer,
+ locks: ls,
+ optimizedFsync: optimizedFsync,
+ buf: make([]byte, bufSize),
+ }
+
+ if write {
+ // write reuses the file descriptors from read; don't close so
+ // WAL can append without dropping the file lock
+ w.readClose = nil
+ if _, _, err := parseWALName(filepath.Base(w.tail().Name())); err != nil {
+ closer()
+ return nil, err
+ }
+ w.fp = newFilePipeline(w.dir, SegmentSizeBytes)
+ }
+
+ return w, nil
+}
+
+func selectWALFiles(dirpath string, snap walpb.Snapshot) ([]string, int, error) {
+ names, err := readWALNames(dirpath)
+ if err != nil {
+ return nil, -1, err
+ }
+
nameIndex, ok := searchIndex(names, snap.Index)
if !ok || !isValidSeq(names[nameIndex:]) {
- return nil, ErrFileNotFound
+ err = ErrFileNotFound
+ return nil, -1, err
}
- // open the wal files
+ return names, nameIndex, nil
+}
+
+func openWALFiles(dirpath string, names []string, nameIndex int, write bool) ([]io.Reader, []*fileutil.LockedFile, func() error, error) {
rcs := make([]io.ReadCloser, 0)
rs := make([]io.Reader, 0)
ls := make([]*fileutil.LockedFile, 0)
@@ -244,7 +320,7 @@ func openAtIndex(dirpath string, snap walpb.Snapshot, write bool, optimizedFsync
l, err := fileutil.TryLockFile(p, os.O_RDWR, fileutil.PrivateFileMode)
if err != nil {
closeAll(rcs...)
- return nil, err
+ return nil, nil, nil, err
}
ls = append(ls, l)
rcs = append(rcs, l)
@@ -252,7 +328,7 @@ func openAtIndex(dirpath string, snap walpb.Snapshot, write bool, optimizedFsync
rf, err := os.OpenFile(p, os.O_RDONLY, fileutil.PrivateFileMode)
if err != nil {
closeAll(rcs...)
- return nil, err
+ return nil, nil, nil, err
}
ls = append(ls, nil)
rcs = append(rcs, rf)
@@ -262,28 +338,7 @@ func openAtIndex(dirpath string, snap walpb.Snapshot, write bool, optimizedFsync
closer := func() error { return closeAll(rcs...) }
- // create a WAL ready for reading
- w := &WAL{
- dir: dirpath,
- start: snap,
- decoder: newDecoder(rs...),
- readClose: closer,
- locks: ls,
- optimizedFsync: optimizedFsync,
- }
-
- if write {
- // write reuses the file descriptors from read; don't close so
- // WAL can append without dropping the file lock
- w.readClose = nil
- if _, _, err := parseWalName(filepath.Base(w.tail().Name())); err != nil {
- closer()
- return nil, err
- }
- w.fp = newFilePipeline(w.dir, SegmentSizeBytes)
- }
-
- return w, nil
+ return rs, ls, closer, nil
}
func (w *WAL) ChangeFsyncFlag(optimizeFsync bool) {
@@ -307,6 +362,9 @@ func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.
defer w.mu.Unlock()
rec := &walpb.Record{}
+ if w.decoder == nil {
+ return nil, state, nil, ErrDecoderNotFound
+ }
decoder := w.decoder
var match bool
@@ -315,7 +373,11 @@ func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.
case entryType:
e := mustUnmarshalEntry(rec.Data)
if e.Index > w.start.Index {
- ents = append(ents[:e.Index-w.start.Index-1], e)
+ up := e.Index - w.start.Index - 1
+ if up > uint64(len(ents)) {
+ return nil, state, nil, fmt.Errorf("index out of range, corrupt data: %v-%v", up, len(ents))
+ }
+ ents = append(ents[:up], e)
}
w.enti = e.Index
case stateType:
@@ -406,6 +468,151 @@ func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.
return metadata, state, ents, err
}
+// ValidSnapshotEntries returns all the valid snapshot entries in the wal logs in the given directory.
+// Snapshot entries are valid if their index is less than or equal to the most recent committed hardstate.
+func ValidSnapshotEntries(walDir string) ([]walpb.Snapshot, error) {
+ var snaps []walpb.Snapshot
+ var state raftpb.HardState
+ var err error
+
+ rec := &walpb.Record{}
+ names, err := readWALNames(walDir)
+ if err != nil {
+ return nil, err
+ }
+
+ // open wal files in read mode, so that there is no conflict
+ // when the same WAL is opened elsewhere in write mode
+ rs, _, closer, err := openWALFiles(walDir, names, 0, false)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if closer != nil {
+ closer()
+ }
+ }()
+
+ // create a new decoder from the readers on the WAL files
+ decoder := newDecoder(rs...)
+
+ for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) {
+ switch rec.Type {
+ case snapshotType:
+ var loadedSnap walpb.Snapshot
+ pbutil.MustUnmarshal(&loadedSnap, rec.Data)
+ snaps = append(snaps, loadedSnap)
+ case stateType:
+ state = mustUnmarshalState(rec.Data)
+ case crcType:
+ crc := decoder.crc.Sum32()
+ // current crc of decoder must match the crc of the record.
+ // do no need to match 0 crc, since the decoder is a new one at this case.
+ if crc != 0 && rec.Validate(crc) != nil {
+ return nil, ErrCRCMismatch
+ }
+ decoder.updateCRC(rec.Crc)
+ }
+ }
+ // We do not have to read out all the WAL entries
+ // as the decoder is opened in read mode.
+ if err != io.EOF && err != io.ErrUnexpectedEOF && err != ErrMaxWALEntrySizeLimitExceeded {
+ return nil, err
+ }
+
+ // filter out any snaps that are newer than the committed hardstate
+ n := 0
+ for _, s := range snaps {
+ if s.Index <= state.Commit {
+ snaps[n] = s
+ n++
+ }
+ }
+ snaps = snaps[:n:n]
+
+ return snaps, nil
+}
+
+// Verify reads through the given WAL and verifies that it is not corrupted.
+// It creates a new decoder to read through the records of the given WAL.
+// It does not conflict with any open WAL, but it is recommended not to
+// call this function after opening the WAL for writing.
+// If it cannot read out the expected snap, it will return ErrSnapshotNotFound.
+// If the loaded snap doesn't match with the expected one, it will
+// return error ErrSnapshotMismatch.
+func Verify(walDir string, snap walpb.Snapshot) error {
+ var metadata []byte
+ var err error
+ var match bool
+
+ rec := &walpb.Record{}
+
+ names, nameIndex, err := selectWALFiles(walDir, snap)
+ if err != nil {
+ return err
+ }
+
+ // open wal files in read mode, so that there is no conflict
+ // when the same WAL is opened elsewhere in write mode
+ rs, _, closer, err := openWALFiles(walDir, names, nameIndex, false)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if closer != nil {
+ closer()
+ }
+ }()
+
+ // create a new decoder from the readers on the WAL files
+ decoder := newDecoder(rs...)
+
+ for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) {
+ switch rec.Type {
+ case metadataType:
+ if metadata != nil && !bytes.Equal(metadata, rec.Data) {
+ return ErrMetadataConflict
+ }
+ metadata = rec.Data
+ case crcType:
+ crc := decoder.crc.Sum32()
+ // Current crc of decoder must match the crc of the record.
+ // We need not match 0 crc, since the decoder is a new one at this point.
+ if crc != 0 && rec.Validate(crc) != nil {
+ return ErrCRCMismatch
+ }
+ decoder.updateCRC(rec.Crc)
+ case snapshotType:
+ var loadedSnap walpb.Snapshot
+ pbutil.MustUnmarshal(&loadedSnap, rec.Data)
+ if loadedSnap.Index == snap.Index {
+ if loadedSnap.Term != snap.Term {
+ return ErrSnapshotMismatch
+ }
+ match = true
+ }
+ // We ignore all entry and state type records as these
+ // are not necessary for validating the WAL contents
+ case entryType:
+ case stateType:
+ default:
+ return fmt.Errorf("unexpected block type %d", rec.Type)
+ }
+ }
+
+ // We do not have to read out all the WAL entries
+ // as the decoder is opened in read mode.
+ if err != io.EOF && err != io.ErrUnexpectedEOF {
+ return err
+ }
+
+ if !match {
+ return ErrSnapshotNotFound
+ }
+
+ return nil
+}
+
// cut closes current file written and creates a new one ready to append.
// cut first creates a temp wal file and writes necessary headers into it.
// Then cut atomically rename temp wal file to a wal file.
@@ -418,7 +625,7 @@ func (w *WAL) cut() error {
if err := w.tail().Truncate(off); err != nil {
return err
}
- if err := w.sync(true); err != nil {
+ if err := w.sync(!w.optimizedFsync); err != nil {
return err
}
@@ -447,7 +654,7 @@ func (w *WAL) cut() error {
return err
}
// atomically move temp wal file to wal file
- if err = w.sync(true); err != nil {
+ if err = w.sync(!w.optimizedFsync); err != nil {
return err
}
@@ -505,6 +712,10 @@ func (w *WAL) sync(fsync bool) error {
return err
}
+func (w *WAL) Sync() error {
+ return w.sync(true)
+}
+
// ReleaseLockTo releases the locks, which has smaller index than the given index
// except the largest one among them.
// For example, if WAL is holding lock 1,2,3,4,5,6, ReleaseLockTo(4) will release
@@ -521,7 +732,7 @@ func (w *WAL) ReleaseLockTo(index uint64) error {
found := false
for i, l := range w.locks {
- _, lockIndex, err := parseWalName(filepath.Base(l.Name()))
+ _, lockIndex, err := parseWALName(filepath.Base(l.Name()))
if err != nil {
return err
}
@@ -553,6 +764,7 @@ func (w *WAL) ReleaseLockTo(index uint64) error {
return nil
}
+// Close closes the current WAL file and directory.
func (w *WAL) Close() error {
w.mu.Lock()
defer w.mu.Unlock()
@@ -580,9 +792,19 @@ func (w *WAL) Close() error {
}
func (w *WAL) saveEntry(e *raftpb.Entry) error {
- // TODO: add MustMarshalTo to reduce one allocation.
- b := pbutil.MustMarshal(e)
- rec := &walpb.Record{Type: entryType, Data: b}
+ needSize := e.Size()
+ var data []byte
+ if needSize > len(w.buf) {
+ data = pbutil.MustMarshal(e)
+ } else {
+ n, err := e.MarshalTo(w.buf[:needSize])
+ if err != nil {
+ return err
+ }
+ data = w.buf[:n]
+ }
+
+ rec := &walpb.Record{Type: entryType, Data: data}
if err := w.encoder.encode(rec); err != nil {
return err
}
@@ -653,7 +875,7 @@ func (w *WAL) SaveSnapshot(e walpb.Snapshot) error {
if w.enti < e.Index {
w.enti = e.Index
}
- return w.sync(true)
+ return w.sync(!w.optimizedFsync)
}
func (w *WAL) saveCrc(prevCrc uint32) error {
@@ -672,7 +894,7 @@ func (w *WAL) seq() uint64 {
if t == nil {
return 0
}
- seq, _, err := parseWalName(filepath.Base(t.Name()))
+ seq, _, err := parseWALName(filepath.Base(t.Name()))
if err != nil {
plog.Fatalf("bad wal name %s (%v)", t.Name(), err)
}
@@ -680,10 +902,11 @@ func (w *WAL) seq() uint64 {
}
func closeAll(rcs ...io.ReadCloser) error {
+ var anyerr error
for _, f := range rcs {
if err := f.Close(); err != nil {
- return err
+ anyerr = err
}
}
- return nil
+ return anyerr
}
diff --git a/wal/wal_bench_test.go b/wal/wal_bench_test.go
index 8da82155..f2ec342c 100644
--- a/wal/wal_bench_test.go
+++ b/wal/wal_bench_test.go
@@ -19,7 +19,7 @@ import (
"os"
"testing"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
)
func BenchmarkWrite100EntryWithoutBatch(b *testing.B) { benchmarkWriteEntry(b, 100, 0) }
diff --git a/wal/wal_test.go b/wal/wal_test.go
index d1c22aaf..cb817856 100644
--- a/wal/wal_test.go
+++ b/wal/wal_test.go
@@ -16,17 +16,21 @@ package wal
import (
"bytes"
+ "fmt"
"io"
"io/ioutil"
+ "math"
"os"
+ "path"
"path/filepath"
"reflect"
+ "regexp"
"testing"
- "github.com/absolute8511/ZanRedisDB/pkg/fileutil"
- "github.com/absolute8511/ZanRedisDB/pkg/pbutil"
- "github.com/absolute8511/ZanRedisDB/raft/raftpb"
- "github.com/absolute8511/ZanRedisDB/wal/walpb"
+ "github.com/youzan/ZanRedisDB/pkg/fileutil"
+ "github.com/youzan/ZanRedisDB/pkg/pbutil"
+ "github.com/youzan/ZanRedisDB/raft/raftpb"
+ "github.com/youzan/ZanRedisDB/wal/walpb"
)
func TestNew(t *testing.T) {
@@ -83,6 +87,69 @@ func TestNew(t *testing.T) {
}
}
+func TestCreateFailFromPollutedDir(t *testing.T) {
+ p, err := ioutil.TempDir(os.TempDir(), "waltest")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(p)
+ ioutil.WriteFile(filepath.Join(p, "test.wal"), []byte("data"), os.ModeTemporary)
+
+ _, err = Create(p, []byte("data"), false)
+ if err != os.ErrExist {
+ t.Fatalf("expected %v, got %v", os.ErrExist, err)
+ }
+}
+
+func TestWalCleanup(t *testing.T) {
+ testRoot, err := ioutil.TempDir(os.TempDir(), "waltestroot")
+ if err != nil {
+ t.Fatal(err)
+ }
+ p, err := ioutil.TempDir(testRoot, "waltest")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(testRoot)
+
+ w, err := Create(p, []byte(""), false)
+ if err != nil {
+ t.Fatalf("err = %v, want nil", err)
+ }
+ w.cleanupWAL()
+ fnames, err := fileutil.ReadDir(testRoot)
+ if err != nil {
+ t.Fatalf("err = %v, want nil", err)
+ }
+ if len(fnames) != 1 {
+ t.Fatalf("expected 1 file under %v, got %v", testRoot, len(fnames))
+ }
+ pattern := fmt.Sprintf(`%s.broken\.[\d]{8}\.[\d]{6}\.[\d]{1,6}?`, filepath.Base(p))
+ match, _ := regexp.MatchString(pattern, fnames[0])
+ if !match {
+ t.Errorf("match = false, expected true for %v with pattern %v", fnames[0], pattern)
+ }
+}
+
+func TestCreateFailFromNoSpaceLeft(t *testing.T) {
+ p, err := ioutil.TempDir(os.TempDir(), "waltest")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(p)
+
+ oldSegmentSizeBytes := SegmentSizeBytes
+ defer func() {
+ SegmentSizeBytes = oldSegmentSizeBytes
+ }()
+ SegmentSizeBytes = math.MaxInt64
+
+ _, err = Create(p, []byte("data"), false)
+ if err == nil { // no space left on device
+ t.Fatalf("expected error 'no space left on device', got nil")
+ }
+}
+
func TestNewForInitedDir(t *testing.T) {
p, err := ioutil.TempDir(os.TempDir(), "waltest")
if err != nil {
@@ -150,6 +217,57 @@ func TestOpenAtIndex(t *testing.T) {
}
}
+// TestVerify tests that Verify throws a non-nil error when the WAL is corrupted.
+// The test creates a WAL directory and cuts out multiple WAL files. Then
+// it corrupts one of the files by completely truncating it.
+func TestVerify(t *testing.T) {
+ walDir, err := ioutil.TempDir(os.TempDir(), "waltest")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(walDir)
+
+ // create WAL
+ w, err := Create(walDir, nil, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer w.Close()
+
+ // make 5 separate files
+ for i := 0; i < 5; i++ {
+ es := []raftpb.Entry{{Index: uint64(i), Data: []byte("waldata" + fmt.Sprintf("%v", i+1))}}
+ if err = w.Save(raftpb.HardState{}, es); err != nil {
+ t.Fatal(err)
+ }
+ if err = w.cut(); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ // to verify the WAL is not corrupted at this point
+ err = Verify(walDir, walpb.Snapshot{})
+ if err != nil {
+ t.Errorf("expected a nil error, got %v", err)
+ }
+
+ walFiles, err := ioutil.ReadDir(walDir)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // corrupt the WAL by truncating one of the WAL files completely
+ err = os.Truncate(path.Join(walDir, walFiles[2].Name()), 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = Verify(walDir, walpb.Snapshot{})
+ if err == nil {
+ t.Error("expected a non-nil error, got nil")
+ }
+}
+
// TODO: split it into smaller tests for better readability
func TestCut(t *testing.T) {
p, err := ioutil.TempDir(os.TempDir(), "waltest")
@@ -378,7 +496,7 @@ func TestScanWalName(t *testing.T) {
{"0000000000000000-0000000000000000.snap", 0, 0, false},
}
for i, tt := range tests {
- s, index, err := parseWalName(tt.str)
+ s, index, err := parseWALName(tt.str)
if g := err == nil; g != tt.wok {
t.Errorf("#%d: ok = %v, want %v", i, g, tt.wok)
}
@@ -524,6 +642,35 @@ func TestOpenForRead(t *testing.T) {
}
}
+func TestOpenWithMaxIndex(t *testing.T) {
+ p, err := ioutil.TempDir(os.TempDir(), "waltest")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(p)
+ // create WAL
+ w, err := Create(p, nil, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer w.Close()
+
+ es := []raftpb.Entry{{Index: uint64(math.MaxInt64)}}
+ if err = w.Save(raftpb.HardState{}, es); err != nil {
+ t.Fatal(err)
+ }
+ w.Close()
+
+ w, err = Open(p, walpb.Snapshot{}, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, _, _, err = w.ReadAll()
+ if err == nil {
+ t.Fatalf("err = %v, want out of range error", err)
+ }
+}
+
func TestSaveEmpty(t *testing.T) {
var buf bytes.Buffer
var est raftpb.HardState
@@ -582,7 +729,7 @@ func TestReleaseLockTo(t *testing.T) {
}
for i, l := range w.locks {
var lockIndex uint64
- _, lockIndex, err = parseWalName(filepath.Base(l.Name()))
+ _, lockIndex, err = parseWALName(filepath.Base(l.Name()))
if err != nil {
t.Fatal(err)
}
@@ -600,7 +747,7 @@ func TestReleaseLockTo(t *testing.T) {
if len(w.locks) != 1 {
t.Errorf("len(w.locks) = %d, want %d", len(w.locks), 1)
}
- _, lockIndex, err := parseWalName(filepath.Base(w.locks[0].Name()))
+ _, lockIndex, err := parseWALName(filepath.Base(w.locks[0].Name()))
if err != nil {
t.Fatal(err)
}
@@ -802,3 +949,184 @@ func TestOpenOnTornWrite(t *testing.T) {
t.Fatalf("expected len(ents) = %d, got %d", wEntries, len(ents))
}
}
+
+func TestRenameFail(t *testing.T) {
+ p, err := ioutil.TempDir(os.TempDir(), "waltest")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(p)
+
+ oldSegmentSizeBytes := SegmentSizeBytes
+ defer func() {
+ SegmentSizeBytes = oldSegmentSizeBytes
+ }()
+ SegmentSizeBytes = math.MaxInt64
+
+ tp, terr := ioutil.TempDir(os.TempDir(), "waltest")
+ if terr != nil {
+ t.Fatal(terr)
+ }
+ os.RemoveAll(tp)
+
+ w := &WAL{
+ dir: p,
+ }
+ w2, werr := w.renameWAL(tp)
+ if w2 != nil || werr == nil { // os.Rename should fail from 'no such file or directory'
+ t.Fatalf("expected error, got %v", werr)
+ }
+}
+
+// TestReadAllFail ensure ReadAll error if used without opening the WAL
+func TestReadAllFail(t *testing.T) {
+ dir, err := ioutil.TempDir(os.TempDir(), "waltest")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ // create initial WAL
+ f, err := Create(dir, []byte("metadata"), true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.Close()
+ // try to read without opening the WAL
+ _, _, _, err = f.ReadAll()
+ if err == nil || err != ErrDecoderNotFound {
+ t.Fatalf("err = %v, want ErrDecoderNotFound", err)
+ }
+}
+
+// TestValidSnapshotEntries ensures ValidSnapshotEntries returns all valid wal snapshot entries, accounting
+// for hardstate
+func TestValidSnapshotEntries(t *testing.T) {
+ p, err := ioutil.TempDir(os.TempDir(), "waltest")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(p)
+ snap0 := walpb.Snapshot{Index: 0, Term: 0}
+ snap1 := walpb.Snapshot{Index: 1, Term: 1}
+ state1 := raftpb.HardState{Commit: 1, Term: 1}
+ snap2 := walpb.Snapshot{Index: 2, Term: 1}
+ snap3 := walpb.Snapshot{Index: 3, Term: 2}
+ state2 := raftpb.HardState{Commit: 3, Term: 2}
+ snap4 := walpb.Snapshot{Index: 4, Term: 2} // will be orphaned since the last committed entry will be snap3
+ func() {
+ w, err := Create(p, nil, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer w.Close()
+
+ // snap0 is implicitly created at index 0, term 0
+ if err = w.SaveSnapshot(snap1); err != nil {
+ t.Fatal(err)
+ }
+ if err = w.Save(state1, nil); err != nil {
+ t.Fatal(err)
+ }
+ if err = w.SaveSnapshot(snap2); err != nil {
+ t.Fatal(err)
+ }
+ if err = w.SaveSnapshot(snap3); err != nil {
+ t.Fatal(err)
+ }
+ if err = w.Save(state2, nil); err != nil {
+ t.Fatal(err)
+ }
+ if err = w.SaveSnapshot(snap4); err != nil {
+ t.Fatal(err)
+ }
+ }()
+ walSnaps, err := ValidSnapshotEntries(p)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expected := []walpb.Snapshot{snap0, snap1, snap2, snap3}
+ if !reflect.DeepEqual(walSnaps, expected) {
+ t.Errorf("expected walSnaps %+v, got %+v", expected, walSnaps)
+ }
+}
+
+// TestValidSnapshotEntriesAfterPurgeWal ensure that there are many wal files, and after cleaning the first wal file,
+// it can work well.
+func TestValidSnapshotEntriesAfterPurgeWal(t *testing.T) {
+ oldSegmentSizeBytes := SegmentSizeBytes
+ SegmentSizeBytes = 64
+ defer func() {
+ SegmentSizeBytes = oldSegmentSizeBytes
+ }()
+ p, err := ioutil.TempDir(os.TempDir(), "waltest")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(p)
+ snap0 := walpb.Snapshot{Index: 0, Term: 0}
+ snap1 := walpb.Snapshot{Index: 1, Term: 1}
+ state1 := raftpb.HardState{Commit: 1, Term: 1}
+ snap2 := walpb.Snapshot{Index: 2, Term: 1}
+ snap3 := walpb.Snapshot{Index: 3, Term: 2}
+ state2 := raftpb.HardState{Commit: 3, Term: 2}
+ func() {
+ w, err := Create(p, nil, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer w.Close()
+
+ // snap0 is implicitly created at index 0, term 0
+ if err = w.SaveSnapshot(snap1); err != nil {
+ t.Fatal(err)
+ }
+ if err = w.Save(state1, nil); err != nil {
+ t.Fatal(err)
+ }
+ if err = w.SaveSnapshot(snap2); err != nil {
+ t.Fatal(err)
+ }
+ if err = w.SaveSnapshot(snap3); err != nil {
+ t.Fatal(err)
+ }
+ for i := 0; i < 128; i++ {
+ if err = w.Save(state2, nil); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ }()
+ files, _, err := selectWALFiles(p, snap0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ os.Remove(p + "/" + files[0])
+ _, err = ValidSnapshotEntries(p)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func BenchmarkSaveEntries(b *testing.B) {
+ p, _ := ioutil.TempDir(os.TempDir(), "walbench")
+ defer os.RemoveAll(p)
+
+ w, _ := Create(p, []byte("metadata"), true)
+ defer w.Close()
+
+ state := raftpb.HardState{Term: 1}
+ w.Save(state, nil)
+ bigData := make([]byte, 100)
+ strdata := "Hello World!!"
+ copy(bigData, strdata)
+ // set a lower value for SegmentSizeBytes, else the test takes too long to complete
+ ents := make([]raftpb.Entry, 0, 100)
+ for i := 1; i < 100; i++ {
+ ents = append(ents, raftpb.Entry{Index: uint64(i), Term: 1, Data: bigData})
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ w.Save(state, ents)
+ }
+}
diff --git a/wal/walpb/record.pb.go b/wal/walpb/record.pb.go
index cda2172f..3f43091e 100644
--- a/wal/walpb/record.pb.go
+++ b/wal/walpb/record.pb.go
@@ -1,24 +1,15 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: record.proto
-/*
- Package walpb is a generated protocol buffer package.
-
- It is generated from these files:
- record.proto
-
- It has these top-level messages:
- Record
- Snapshot
-*/
package walpb
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
-
-import io "io"
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@@ -29,35 +20,109 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type Record struct {
- Type int64 `protobuf:"varint,1,opt,name=type" json:"type"`
- Crc uint32 `protobuf:"varint,2,opt,name=crc" json:"crc"`
- Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Type int64 `protobuf:"varint,1,opt,name=type" json:"type"`
+ Crc uint32 `protobuf:"varint,2,opt,name=crc" json:"crc"`
+ Data []byte `protobuf:"bytes,3,opt,name=data" json:"data"`
+}
+
+func (m *Record) Reset() { *m = Record{} }
+func (m *Record) String() string { return proto.CompactTextString(m) }
+func (*Record) ProtoMessage() {}
+func (*Record) Descriptor() ([]byte, []int) {
+ return fileDescriptor_bf94fd919e302a1d, []int{0}
+}
+func (m *Record) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Record) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Record.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Record) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Record.Merge(m, src)
+}
+func (m *Record) XXX_Size() int {
+ return m.Size()
+}
+func (m *Record) XXX_DiscardUnknown() {
+ xxx_messageInfo_Record.DiscardUnknown(m)
}
-func (m *Record) Reset() { *m = Record{} }
-func (m *Record) String() string { return proto.CompactTextString(m) }
-func (*Record) ProtoMessage() {}
-func (*Record) Descriptor() ([]byte, []int) { return fileDescriptorRecord, []int{0} }
+var xxx_messageInfo_Record proto.InternalMessageInfo
type Snapshot struct {
- Index uint64 `protobuf:"varint,1,opt,name=index" json:"index"`
- Term uint64 `protobuf:"varint,2,opt,name=term" json:"term"`
- XXX_unrecognized []byte `json:"-"`
+ Index uint64 `protobuf:"varint,1,opt,name=index" json:"index"`
+ Term uint64 `protobuf:"varint,2,opt,name=term" json:"term"`
+}
+
+func (m *Snapshot) Reset() { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage() {}
+func (*Snapshot) Descriptor() ([]byte, []int) {
+ return fileDescriptor_bf94fd919e302a1d, []int{1}
+}
+func (m *Snapshot) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Snapshot) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Snapshot.Merge(m, src)
+}
+func (m *Snapshot) XXX_Size() int {
+ return m.Size()
+}
+func (m *Snapshot) XXX_DiscardUnknown() {
+ xxx_messageInfo_Snapshot.DiscardUnknown(m)
}
-func (m *Snapshot) Reset() { *m = Snapshot{} }
-func (m *Snapshot) String() string { return proto.CompactTextString(m) }
-func (*Snapshot) ProtoMessage() {}
-func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorRecord, []int{1} }
+var xxx_messageInfo_Snapshot proto.InternalMessageInfo
func init() {
proto.RegisterType((*Record)(nil), "walpb.Record")
proto.RegisterType((*Snapshot)(nil), "walpb.Snapshot")
}
+
+func init() { proto.RegisterFile("record.proto", fileDescriptor_bf94fd919e302a1d) }
+
+var fileDescriptor_bf94fd919e302a1d = []byte{
+ // 193 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x4a, 0x4d, 0xce,
+ 0x2f, 0x4a, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2d, 0x4f, 0xcc, 0x29, 0x48, 0x92,
+ 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x8b, 0xe8, 0x83, 0x58, 0x10, 0x49, 0xa5, 0x10, 0x2e, 0xb6,
+ 0x20, 0xb0, 0x62, 0x21, 0x09, 0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d,
+ 0x66, 0x27, 0x96, 0x13, 0xf7, 0xe4, 0x19, 0x82, 0xc0, 0x22, 0x42, 0x62, 0x5c, 0xcc, 0xc9, 0x45,
+ 0xc9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xbc, 0x50, 0x09, 0x90, 0x00, 0x48, 0x47, 0x4a, 0x62, 0x49,
+ 0xa2, 0x04, 0xb3, 0x02, 0xa3, 0x06, 0x0f, 0x4c, 0x07, 0x48, 0x44, 0xc9, 0x81, 0x8b, 0x23, 0x38,
+ 0x2f, 0xb1, 0xa0, 0x38, 0x23, 0xbf, 0x44, 0x48, 0x8a, 0x8b, 0x35, 0x33, 0x2f, 0x25, 0xb5, 0x02,
+ 0x6c, 0x30, 0x0b, 0x54, 0x19, 0x44, 0x08, 0x6c, 0x67, 0x6a, 0x51, 0x2e, 0xd8, 0x68, 0x16, 0xb8,
+ 0x9d, 0xa9, 0x45, 0xb9, 0x4e, 0x32, 0x27, 0x1e, 0xca, 0x31, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1,
+ 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70,
+ 0xe3, 0xb1, 0x1c, 0x03, 0x20, 0x00, 0x00, 0xff, 0xff, 0x48, 0x2c, 0x9d, 0x03, 0xe1, 0x00, 0x00,
+ 0x00,
+}
+
func (m *Record) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -85,9 +150,6 @@ func (m *Record) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintRecord(dAtA, i, uint64(len(m.Data)))
i += copy(dAtA[i:], m.Data)
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
return i, nil
}
@@ -112,9 +174,6 @@ func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x10
i++
i = encodeVarintRecord(dAtA, i, uint64(m.Term))
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
return i, nil
}
@@ -128,6 +187,9 @@ func encodeVarintRecord(dAtA []byte, offset int, v uint64) int {
return offset + 1
}
func (m *Record) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
n += 1 + sovRecord(uint64(m.Type))
@@ -136,20 +198,17 @@ func (m *Record) Size() (n int) {
l = len(m.Data)
n += 1 + l + sovRecord(uint64(l))
}
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
return n
}
func (m *Snapshot) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
n += 1 + sovRecord(uint64(m.Index))
n += 1 + sovRecord(uint64(m.Term))
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
return n
}
@@ -181,7 +240,7 @@ func (m *Record) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -209,7 +268,7 @@ func (m *Record) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Type |= (int64(b) & 0x7F) << shift
+ m.Type |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -228,7 +287,7 @@ func (m *Record) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Crc |= (uint32(b) & 0x7F) << shift
+ m.Crc |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -247,7 +306,7 @@ func (m *Record) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= (int(b) & 0x7F) << shift
+ byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -256,6 +315,9 @@ func (m *Record) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRecord
}
postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRecord
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -273,10 +335,12 @@ func (m *Record) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthRecord
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthRecord
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -301,7 +365,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -329,7 +393,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Index |= (uint64(b) & 0x7F) << shift
+ m.Index |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -348,7 +412,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Term |= (uint64(b) & 0x7F) << shift
+ m.Term |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -362,10 +426,12 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthRecord
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthRecord
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -429,10 +495,13 @@ func skipRecord(dAtA []byte) (n int, err error) {
break
}
}
- iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthRecord
}
+ iNdEx += length
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthRecord
+ }
return iNdEx, nil
case 3:
for {
@@ -461,6 +530,9 @@ func skipRecord(dAtA []byte) (n int, err error) {
return 0, err
}
iNdEx = start + next
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthRecord
+ }
}
return iNdEx, nil
case 4:
@@ -479,21 +551,3 @@ var (
ErrInvalidLengthRecord = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowRecord = fmt.Errorf("proto: integer overflow")
)
-
-func init() { proto.RegisterFile("record.proto", fileDescriptorRecord) }
-
-var fileDescriptorRecord = []byte{
- // 186 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x4a, 0x4d, 0xce,
- 0x2f, 0x4a, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2d, 0x4f, 0xcc, 0x29, 0x48, 0x92,
- 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x8b, 0xe8, 0x83, 0x58, 0x10, 0x49, 0x25, 0x3f, 0x2e, 0xb6,
- 0x20, 0xb0, 0x62, 0x21, 0x09, 0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d,
- 0x66, 0x27, 0x96, 0x13, 0xf7, 0xe4, 0x19, 0x82, 0xc0, 0x22, 0x42, 0x62, 0x5c, 0xcc, 0xc9, 0x45,
- 0xc9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xbc, 0x50, 0x09, 0x90, 0x80, 0x90, 0x10, 0x17, 0x4b, 0x4a,
- 0x62, 0x49, 0xa2, 0x04, 0xb3, 0x02, 0xa3, 0x06, 0x4f, 0x10, 0x98, 0xad, 0xe4, 0xc0, 0xc5, 0x11,
- 0x9c, 0x97, 0x58, 0x50, 0x9c, 0x91, 0x5f, 0x22, 0x24, 0xc5, 0xc5, 0x9a, 0x99, 0x97, 0x92, 0x5a,
- 0x01, 0x36, 0x92, 0x05, 0xaa, 0x13, 0x22, 0x04, 0xb6, 0x2d, 0xb5, 0x28, 0x17, 0x6c, 0x28, 0x0b,
- 0xdc, 0xb6, 0xd4, 0xa2, 0x5c, 0x27, 0x91, 0x13, 0x0f, 0xe5, 0x18, 0x4e, 0x3c, 0x92, 0x63, 0xbc,
- 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x19, 0x8f, 0xe5, 0x18, 0x00, 0x01, 0x00, 0x00,
- 0xff, 0xff, 0x7f, 0x5e, 0x5c, 0x46, 0xd3, 0x00, 0x00, 0x00,
-}