fix lock usage

Signed-off-by: bigsheeper <yihao.dai@zilliz.com>
This commit is contained in:
bigsheeper 2020-11-24 16:12:39 +08:00 committed by yefu.chen
parent 8ec7e4576e
commit 41fb2d2991
14 changed files with 255 additions and 42 deletions

53
.github/workflows/code-checker.yaml vendored Normal file
View File

@ -0,0 +1,53 @@
name: Code Checker
# TODO: do not trigger action for some document file update
# This workflow is triggered on pushes or pull request to the repository.
on:
push:
# file paths to consider in the event. Optional; defaults to all.
paths:
- 'scripts/**'
- 'internal/**'
- 'cmd/**'
- 'build/**'
- '.github/workflows/code-checker.yaml'
- '.env'
- docker-compose.yml
- Makefile
- '!**.md'
pull_request:
# file paths to consider in the event. Optional; defaults to all.
paths:
- 'scripts/**'
- 'internal/**'
- 'cmd/**'
- 'build/**'
- '.github/workflows/code-checker.yaml'
- '.env'
- docker-compose.yml
- Makefile
- '!**.md'
jobs:
ubuntu:
name: AMD64 Ubuntu ${{ matrix.ubuntu }}
runs-on: ubuntu-latest
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
ubuntu: [18.04]
env:
UBUNTU: ${{ matrix.ubuntu }}
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Dockerfile Lint
uses: reviewdog/action-hadolint@v1
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
reporter: github-pr-check # Default is github-pr-check
hadolint_ignore: DL3008
- name: Code Check
run: |
./build/builder.sh /bin/bash -c "make check-proto-product && make verifiers"

View File

@ -42,12 +42,6 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Check Dockerfile
uses: reviewdog/action-hadolint@v1
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
reporter: github-pr-check # Default is github-pr-check
hadolint_ignore: DL3008
- name: Cache Docker Volumes
uses: actions/cache@v1
with:
@ -62,4 +56,4 @@ jobs:
env:
CHECK_BUILDER: "1"
run: |
./build/builder.sh
./build/builder.sh /bin/bash -c "make check-proto-product && make unittest"

3
go.mod
View File

@ -12,7 +12,6 @@ require (
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
github.com/golang/protobuf v1.3.2
github.com/google/btree v1.0.0
github.com/json-iterator/go v1.1.10
github.com/klauspost/compress v1.10.11 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/minio/minio-go/v7 v7.0.5
@ -26,7 +25,7 @@ require (
github.com/pingcap/log v0.0.0-20200828042413-fce0951f1463 // indirect
github.com/pivotal-golang/bytefmt v0.0.0-20200131002437-cf55d5288a48
github.com/prometheus/client_golang v1.5.1 // indirect
github.com/prometheus/common v0.10.0
github.com/prometheus/common v0.10.0 // indirect
github.com/prometheus/procfs v0.1.3 // indirect
github.com/sirupsen/logrus v1.6.0 // indirect
github.com/spaolacci/murmur3 v1.1.0

View File

@ -2,6 +2,7 @@ package proxy
import (
"log"
"net"
"os"
"strconv"
"strings"
@ -38,6 +39,25 @@ func (pt *ParamTable) Init() {
pt.Save("_proxyID", proxyIDStr)
}
func (pt *ParamTable) NetWorkAddress() string {
addr, err := pt.Load("proxy.network.address")
if err != nil {
panic(err)
}
if ip := net.ParseIP(addr); ip == nil {
panic("invalid ip proxy.network.address")
}
port, err := pt.Load("proxy.network.port")
if err != nil {
panic(err)
}
_, err = strconv.Atoi(port)
if err != nil {
panic(err)
}
return addr + ":" + port
}
func (pt *ParamTable) MasterAddress() string {
ret, err := pt.Load("_MasterAddress")
if err != nil {

View File

@ -135,8 +135,7 @@ func (p *Proxy) AddCloseCallback(callbacks ...func()) {
func (p *Proxy) grpcLoop() {
defer p.proxyLoopWg.Done()
// TODO: use address in config instead
lis, err := net.Listen("tcp", ":5053")
lis, err := net.Listen("tcp", Params.NetWorkAddress())
if err != nil {
log.Fatalf("Proxy grpc server fatal error=%v", err)
}

View File

@ -6,6 +6,7 @@ import (
"log"
"os"
"strconv"
"strings"
"sync"
"testing"
@ -24,7 +25,6 @@ import (
var ctx context.Context
var cancel func()
var proxyAddress = "127.0.0.1:5053"
var proxyConn *grpc.ClientConn
var proxyClient servicepb.MilvusServiceClient
@ -81,8 +81,13 @@ func setup() {
startMaster(ctx)
startProxy(ctx)
proxyAddr := Params.NetWorkAddress()
addr := strings.Split(proxyAddr, ":")
if addr[0] == "0.0.0.0" {
proxyAddr = "127.0.0.1:" + addr[1]
}
conn, err := grpc.DialContext(ctx, proxyAddress, grpc.WithInsecure(), grpc.WithBlock())
conn, err := grpc.DialContext(ctx, proxyAddr, grpc.WithInsecure(), grpc.WithBlock())
if err != nil {
log.Fatalf("Connect to proxy failed, error= %v", err)
}

View File

@ -58,6 +58,8 @@ type collectionReplica interface {
removeSegment(segmentID UniqueID) error
getSegmentByID(segmentID UniqueID) (*Segment, error)
hasSegment(segmentID UniqueID) bool
freeAll()
}
type collectionReplicaImpl struct {
@ -311,6 +313,9 @@ func (colReplica *collectionReplicaImpl) getSegmentNum() int {
}
func (colReplica *collectionReplicaImpl) getSegmentStatistics() *internalpb.QueryNodeSegStats {
colReplica.mu.RLock()
defer colReplica.mu.RUnlock()
var statisticData = make([]*internalpb.SegmentStats, 0)
for segmentID, segment := range colReplica.segments {
@ -405,3 +410,16 @@ func (colReplica *collectionReplicaImpl) hasSegment(segmentID UniqueID) bool {
return ok
}
//-----------------------------------------------------------------------------------------------------
func (colReplica *collectionReplicaImpl) freeAll() {
colReplica.mu.Lock()
defer colReplica.mu.Unlock()
for _, seg := range colReplica.segments {
deleteSegment(seg)
}
for _, col := range colReplica.collections {
deleteCollection(col)
}
}

View File

@ -13,7 +13,7 @@ import (
)
//----------------------------------------------------------------------------------------------------- collection
func TestColSegContainer_getCollectionNum(t *testing.T) {
func TestCollectionReplica_getCollectionNum(t *testing.T) {
ctx := context.Background()
node := NewQueryNode(ctx, 0)
@ -62,9 +62,11 @@ func TestColSegContainer_getCollectionNum(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, (*node.replica).getCollectionNum(), 1)
(*node.replica).freeAll()
}
func TestColSegContainer_addCollection(t *testing.T) {
func TestCollectionReplica_addCollection(t *testing.T) {
ctx := context.Background()
node := NewQueryNode(ctx, 0)
@ -117,9 +119,11 @@ func TestColSegContainer_addCollection(t *testing.T) {
assert.Equal(t, collection.meta.Schema.Name, collectionName)
assert.Equal(t, collection.meta.ID, UniqueID(0))
assert.Equal(t, (*node.replica).getCollectionNum(), 1)
(*node.replica).freeAll()
}
func TestColSegContainer_removeCollection(t *testing.T) {
func TestCollectionReplica_removeCollection(t *testing.T) {
ctx := context.Background()
node := NewQueryNode(ctx, 0)
@ -178,9 +182,11 @@ func TestColSegContainer_removeCollection(t *testing.T) {
err = (*node.replica).removeCollection(collectionID)
assert.NoError(t, err)
assert.Equal(t, (*node.replica).getCollectionNum(), 0)
(*node.replica).freeAll()
}
func TestColSegContainer_getCollectionByID(t *testing.T) {
func TestCollectionReplica_getCollectionByID(t *testing.T) {
ctx := context.Background()
node := NewQueryNode(ctx, 0)
@ -240,9 +246,11 @@ func TestColSegContainer_getCollectionByID(t *testing.T) {
assert.NotNil(t, targetCollection)
assert.Equal(t, targetCollection.meta.Schema.Name, "collection0")
assert.Equal(t, targetCollection.meta.ID, UniqueID(0))
(*node.replica).freeAll()
}
func TestColSegContainer_getCollectionByName(t *testing.T) {
func TestCollectionReplica_getCollectionByName(t *testing.T) {
ctx := context.Background()
node := NewQueryNode(ctx, 0)
@ -302,9 +310,11 @@ func TestColSegContainer_getCollectionByName(t *testing.T) {
assert.NotNil(t, targetCollection)
assert.Equal(t, targetCollection.meta.Schema.Name, "collection0")
assert.Equal(t, targetCollection.meta.ID, UniqueID(0))
(*node.replica).freeAll()
}
func TestColSegContainer_hasCollection(t *testing.T) {
func TestCollectionReplica_hasCollection(t *testing.T) {
ctx := context.Background()
node := NewQueryNode(ctx, 0)
@ -356,10 +366,12 @@ func TestColSegContainer_hasCollection(t *testing.T) {
assert.Equal(t, hasCollection, true)
hasCollection = (*node.replica).hasCollection(UniqueID(1))
assert.Equal(t, hasCollection, false)
(*node.replica).freeAll()
}
//----------------------------------------------------------------------------------------------------- partition
func TestColSegContainer_getPartitionNum(t *testing.T) {
func TestCollectionReplica_getPartitionNum(t *testing.T) {
ctx := context.Background()
node := NewQueryNode(ctx, 0)
@ -426,9 +438,11 @@ func TestColSegContainer_getPartitionNum(t *testing.T) {
partitionNum, err := (*node.replica).getPartitionNum(UniqueID(0))
assert.NoError(t, err)
assert.Equal(t, partitionNum, 1)
(*node.replica).freeAll()
}
func TestColSegContainer_addPartition(t *testing.T) {
func TestCollectionReplica_addPartition(t *testing.T) {
ctx := context.Background()
node := NewQueryNode(ctx, 0)
@ -491,9 +505,11 @@ func TestColSegContainer_addPartition(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, partition.partitionTag, "default")
}
(*node.replica).freeAll()
}
func TestColSegContainer_removePartition(t *testing.T) {
func TestCollectionReplica_removePartition(t *testing.T) {
ctx := context.Background()
node := NewQueryNode(ctx, 0)
@ -559,9 +575,11 @@ func TestColSegContainer_removePartition(t *testing.T) {
err = (*node.replica).removePartition(collectionID, partitionTag)
assert.NoError(t, err)
}
(*node.replica).freeAll()
}
func TestColSegContainer_addPartitionsByCollectionMeta(t *testing.T) {
func TestCollectionReplica_addPartitionsByCollectionMeta(t *testing.T) {
ctx := context.Background()
node := NewQueryNode(ctx, 0)
@ -630,9 +648,11 @@ func TestColSegContainer_addPartitionsByCollectionMeta(t *testing.T) {
assert.Equal(t, hasPartition, true)
hasPartition = (*node.replica).hasPartition(UniqueID(0), "p2")
assert.Equal(t, hasPartition, true)
(*node.replica).freeAll()
}
func TestColSegContainer_removePartitionsByCollectionMeta(t *testing.T) {
func TestCollectionReplica_removePartitionsByCollectionMeta(t *testing.T) {
ctx := context.Background()
node := NewQueryNode(ctx, 0)
@ -701,9 +721,11 @@ func TestColSegContainer_removePartitionsByCollectionMeta(t *testing.T) {
assert.Equal(t, hasPartition, false)
hasPartition = (*node.replica).hasPartition(UniqueID(0), "p2")
assert.Equal(t, hasPartition, false)
(*node.replica).freeAll()
}
func TestColSegContainer_getPartitionByTag(t *testing.T) {
func TestCollectionReplica_getPartitionByTag(t *testing.T) {
ctx := context.Background()
node := NewQueryNode(ctx, 0)
@ -767,9 +789,11 @@ func TestColSegContainer_getPartitionByTag(t *testing.T) {
assert.Equal(t, partition.partitionTag, "default")
assert.NotNil(t, partition)
}
(*node.replica).freeAll()
}
func TestColSegContainer_hasPartition(t *testing.T) {
func TestCollectionReplica_hasPartition(t *testing.T) {
ctx := context.Background()
node := NewQueryNode(ctx, 0)
@ -831,10 +855,12 @@ func TestColSegContainer_hasPartition(t *testing.T) {
assert.Equal(t, hasPartition, true)
hasPartition = (*node.replica).hasPartition(UniqueID(0), "default1")
assert.Equal(t, hasPartition, false)
(*node.replica).freeAll()
}
//----------------------------------------------------------------------------------------------------- segment
func TestColSegContainer_addSegment(t *testing.T) {
func TestCollectionReplica_addSegment(t *testing.T) {
ctx := context.Background()
node := NewQueryNode(ctx, 0)
@ -901,9 +927,11 @@ func TestColSegContainer_addSegment(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, targetSeg.segmentID, UniqueID(i))
}
(*node.replica).freeAll()
}
func TestColSegContainer_removeSegment(t *testing.T) {
func TestCollectionReplica_removeSegment(t *testing.T) {
ctx := context.Background()
node := NewQueryNode(ctx, 0)
@ -972,9 +1000,11 @@ func TestColSegContainer_removeSegment(t *testing.T) {
err = (*node.replica).removeSegment(UniqueID(i))
assert.NoError(t, err)
}
(*node.replica).freeAll()
}
func TestColSegContainer_getSegmentByID(t *testing.T) {
func TestCollectionReplica_getSegmentByID(t *testing.T) {
ctx := context.Background()
node := NewQueryNode(ctx, 0)
@ -1041,9 +1071,11 @@ func TestColSegContainer_getSegmentByID(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, targetSeg.segmentID, UniqueID(i))
}
(*node.replica).freeAll()
}
func TestColSegContainer_hasSegment(t *testing.T) {
func TestCollectionReplica_hasSegment(t *testing.T) {
ctx := context.Background()
node := NewQueryNode(ctx, 0)
@ -1114,4 +1146,81 @@ func TestColSegContainer_hasSegment(t *testing.T) {
hasSeg = (*node.replica).hasSegment(UniqueID(i + 100))
assert.Equal(t, hasSeg, false)
}
(*node.replica).freeAll()
}
func TestCollectionReplica_freeAll(t *testing.T) {
ctx := context.Background()
node := NewQueryNode(ctx, 0)
collectionName := "collection0"
collectionID := UniqueID(0)
fieldVec := schemapb.FieldSchema{
Name: "vec",
DataType: schemapb.DataType_VECTOR_FLOAT,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "dim",
Value: "16",
},
},
}
fieldInt := schemapb.FieldSchema{
Name: "age",
DataType: schemapb.DataType_INT32,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "dim",
Value: "1",
},
},
}
schema := schemapb.CollectionSchema{
Name: "collection0",
Fields: []*schemapb.FieldSchema{
&fieldVec, &fieldInt,
},
}
collectionMeta := etcdpb.CollectionMeta{
ID: collectionID,
Schema: &schema,
CreateTime: Timestamp(0),
SegmentIDs: []UniqueID{0},
PartitionTags: []string{"default"},
}
collectionMetaBlob := proto.MarshalTextString(&collectionMeta)
assert.NotEqual(t, "", collectionMetaBlob)
var err = (*node.replica).addCollection(&collectionMeta, collectionMetaBlob)
assert.NoError(t, err)
collection, err := (*node.replica).getCollectionByName(collectionName)
assert.NoError(t, err)
assert.Equal(t, collection.meta.Schema.Name, collectionName)
assert.Equal(t, collection.meta.ID, UniqueID(0))
assert.Equal(t, (*node.replica).getCollectionNum(), 1)
err = (*node.replica).addPartition(collectionID, collectionMeta.PartitionTags[0])
assert.NoError(t, err)
const segmentNum = 3
for i := 0; i < segmentNum; i++ {
err := (*node.replica).addSegment(UniqueID(i), collectionMeta.PartitionTags[0], collectionID)
assert.NoError(t, err)
targetSeg, err := (*node.replica).getSegmentByID(UniqueID(i))
assert.NoError(t, err)
assert.Equal(t, targetSeg.segmentID, UniqueID(i))
hasSeg := (*node.replica).hasSegment(UniqueID(i))
assert.Equal(t, hasSeg, true)
hasSeg = (*node.replica).hasSegment(UniqueID(i + 100))
assert.Equal(t, hasSeg, false)
}
(*node.replica).freeAll()
}

View File

@ -30,7 +30,9 @@ func (dsService *dataSyncService) start() {
}
func (dsService *dataSyncService) close() {
dsService.fg.Close()
if dsService.fg != nil {
dsService.fg.Close()
}
}
func (dsService *dataSyncService) initNodes() {

View File

@ -69,5 +69,18 @@ func (node *QueryNode) Start() {
}
func (node *QueryNode) Close() {
// TODO: close services
<-node.ctx.Done()
// free collectionReplica
(*node.replica).freeAll()
// close services
if node.dataSyncService != nil {
(*node.dataSyncService).close()
}
if node.searchService != nil {
(*node.searchService).close()
}
if node.statsService != nil {
(*node.statsService).close()
}
}

View File

@ -23,7 +23,6 @@ import (
func TestSearch_Search(t *testing.T) {
Params.Init()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// init query node
pulsarURL, _ := Params.pulsarAddress()
@ -240,6 +239,6 @@ func TestSearch_Search(t *testing.T) {
time.Sleep(2 * time.Second)
node.searchService.close()
cancel()
node.Close()
}

View File

@ -59,6 +59,10 @@ func (sService *statsService) start() {
}
}
func (sService *statsService) close() {
(*sService.statsStream).Close()
}
func (sService *statsService) sendSegmentStatistic() {
statisticData := (*sService.replica).getSegmentStatistics()

View File

@ -77,8 +77,6 @@ func (fg *TimeTickedFlowGraph) Close() {
}
(*inStream.inStream).Close()
}
// close input channels
v.Close()
}
}

View File

@ -154,12 +154,12 @@ if [[ ${RUN_CPPLINT} == "ON" ]]; then
echo "clang-format check passed!"
# clang-tidy check
make check-clang-tidy || true
if [ $? -ne 0 ]; then
echo "ERROR! clang-tidy check failed"
exit 1
fi
echo "clang-tidy check passed!"
# make check-clang-tidy || true
# if [ $? -ne 0 ]; then
# echo "ERROR! clang-tidy check failed"
# exit 1
# fi
# echo "clang-tidy check passed!"
else
# compile and build
make -j ${jobs} install || exit 1