mirror of
https://gitee.com/milvus-io/milvus.git
synced 2024-11-29 18:38:44 +08:00
Add query node for CI progress
Signed-off-by: cai.zhang <cai.zhang@zilliz.com>
This commit is contained in:
parent
70710dee47
commit
806a97505c
1
.env
1
.env
@ -3,7 +3,6 @@ ARCH=amd64
|
||||
UBUNTU=18.04
|
||||
DATE_VERSION=20201202-085131
|
||||
LATEST_DATE_VERSION=latest
|
||||
MINIO_ADDRESS=minio:9000
|
||||
PULSAR_ADDRESS=pulsar://pulsar:6650
|
||||
ETCD_ADDRESS=etcd:2379
|
||||
MASTER_ADDRESS=localhost:53100
|
||||
|
2
.github/workflows/main.yaml
vendored
2
.github/workflows/main.yaml
vendored
@ -51,7 +51,7 @@ jobs:
|
||||
- name: Start Service
|
||||
shell: bash
|
||||
run: |
|
||||
docker-compose up -d pulsar etcd minio
|
||||
docker-compose up -d pulsar etcd
|
||||
- name: Build and UnitTest
|
||||
env:
|
||||
CHECK_BUILDER: "1"
|
||||
|
@ -1,9 +1,9 @@
|
||||
timeout(time: 5, unit: 'MINUTES') {
|
||||
timeout(time: 10, unit: 'MINUTES') {
|
||||
dir ("scripts") {
|
||||
sh '. ./before-install.sh && unset http_proxy && unset https_proxy && ./check_cache.sh -l $CCACHE_ARTFACTORY_URL --cache_dir=\$CCACHE_DIR -f ccache-\$OS_NAME-\$BUILD_ENV_IMAGE_ID.tar.gz || echo \"ccache files not found!\"'
|
||||
}
|
||||
|
||||
sh '. ./scripts/before-install.sh && make check-proto-product && make verifiers && make install'
|
||||
sh '. ./scripts/before-install.sh && make install'
|
||||
|
||||
dir ("scripts") {
|
||||
withCredentials([usernamePassword(credentialsId: "${env.JFROG_CREDENTIALS_ID}", usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) {
|
||||
|
@ -4,7 +4,10 @@ try {
|
||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} up -d pulsar'
|
||||
dir ('build/docker/deploy') {
|
||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} pull'
|
||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} up -d'
|
||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} up -d master'
|
||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} up -d proxy'
|
||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} run -e QUERY_NODE_ID=1 -d querynode'
|
||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} run -e QUERY_NODE_ID=2 -d querynode'
|
||||
}
|
||||
|
||||
dir ('build/docker/test') {
|
||||
|
6
Makefile
6
Makefile
@ -41,9 +41,9 @@ fmt:
|
||||
lint:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml ./internal/...
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml ./cmd/...
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml ./tests/go/...
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=30m --config ./.golangci.yml ./internal/...
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=30m --config ./.golangci.yml ./cmd/...
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=30m --config ./.golangci.yml ./tests/go/...
|
||||
|
||||
ruleguard:
|
||||
@echo "Running $@ check"
|
||||
|
4
build/docker/test/.env
Normal file
4
build/docker/test/.env
Normal file
@ -0,0 +1,4 @@
|
||||
SOURCE_REPO=milvusdb
|
||||
TARGET_REPO=milvusdb
|
||||
SOURCE_TAG=latest
|
||||
TARGET_TAG=latest
|
@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
@ -13,8 +14,7 @@ import (
|
||||
|
||||
func main() {
|
||||
proxy.Init()
|
||||
|
||||
// Creates server.
|
||||
fmt.Println("ProxyID is", proxy.Params.ProxyID())
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
svr, err := proxy.CreateProxy(ctx)
|
||||
if err != nil {
|
||||
|
@ -2,18 +2,24 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/querynode"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
querynode.Init()
|
||||
fmt.Println("QueryNodeID is", querynode.Params.QueryNodeID())
|
||||
// Creates server.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
svr := querynode.NewQueryNode(ctx, 0)
|
||||
|
||||
sc := make(chan os.Signal, 1)
|
||||
signal.Notify(sc,
|
||||
@ -28,8 +34,14 @@ func main() {
|
||||
cancel()
|
||||
}()
|
||||
|
||||
querynode.StartQueryNode(ctx)
|
||||
if err := svr.Start(); err != nil {
|
||||
log.Fatal("run server failed", zap.Error(err))
|
||||
}
|
||||
|
||||
<-ctx.Done()
|
||||
log.Print("Got signal to exit", zap.String("signal", sig.String()))
|
||||
|
||||
svr.Close()
|
||||
switch sig {
|
||||
case syscall.SIGTERM:
|
||||
exit(0)
|
||||
|
@ -32,9 +32,8 @@ msgChannel:
|
||||
|
||||
# default channel range [0, 1)
|
||||
channelRange:
|
||||
insert: [0, 1]
|
||||
insert: [0, 2]
|
||||
delete: [0, 1]
|
||||
dataDefinition: [0,1]
|
||||
k2s: [0, 1]
|
||||
search: [0, 1]
|
||||
searchResult: [0, 1]
|
@ -1,118 +0,0 @@
|
||||
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
||||
# or implied. See the License for the specific language governing permissions and limitations under the License.
|
||||
|
||||
master:
|
||||
address: localhost
|
||||
port: 53100
|
||||
pulsarmoniterinterval: 1
|
||||
pulsartopic: "monitor-topic"
|
||||
|
||||
proxyidlist: [1, 2]
|
||||
proxyTimeSyncChannels: ["proxy1", "proxy2"]
|
||||
proxyTimeSyncSubName: "proxy-topic"
|
||||
softTimeTickBarrierInterval: 500
|
||||
|
||||
writeidlist: [3, 4]
|
||||
writeTimeSyncChannels: ["write3", "write4"]
|
||||
writeTimeSyncSubName: "write-topic"
|
||||
|
||||
dmTimeSyncChannels: ["dm5", "dm6"]
|
||||
k2sTimeSyncChannels: ["k2s7", "k2s8"]
|
||||
|
||||
defaultSizePerRecord: 1024
|
||||
minimumAssignSize: 1048576
|
||||
segmentThreshold: 536870912
|
||||
segmentExpireDuration: 2000
|
||||
segmentThresholdFactor: 0.75
|
||||
querynodenum: 1
|
||||
writenodenum: 1
|
||||
statsChannels: "statistic"
|
||||
|
||||
etcd:
|
||||
address: localhost
|
||||
port: 2379
|
||||
rootpath: by-dev
|
||||
segthreshold: 10000
|
||||
|
||||
minio:
|
||||
address: localhost
|
||||
port: 9000
|
||||
accessKeyID: minioadmin
|
||||
secretAccessKey: minioadmin
|
||||
useSSL: false
|
||||
|
||||
timesync:
|
||||
interval: 400
|
||||
|
||||
storage:
|
||||
driver: TIKV
|
||||
address: localhost
|
||||
port: 2379
|
||||
accesskey:
|
||||
secretkey:
|
||||
|
||||
pulsar:
|
||||
authentication: false
|
||||
user: user-default
|
||||
token: eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJKb2UifQ.ipevRNuRP6HflG8cFKnmUPtypruRC4fb1DWtoLL62SY
|
||||
address: localhost
|
||||
port: 6650
|
||||
topicnum: 128
|
||||
|
||||
reader:
|
||||
clientid: 0
|
||||
stopflag: -1
|
||||
readerqueuesize: 10000
|
||||
searchchansize: 10000
|
||||
key2segchansize: 10000
|
||||
topicstart: 0
|
||||
topicend: 128
|
||||
|
||||
writer:
|
||||
clientid: 0
|
||||
stopflag: -2
|
||||
readerqueuesize: 10000
|
||||
searchbyidchansize: 10000
|
||||
parallelism: 100
|
||||
topicstart: 0
|
||||
topicend: 128
|
||||
bucket: "zilliz-hz"
|
||||
|
||||
proxy:
|
||||
timezone: UTC+8
|
||||
proxy_id: 1
|
||||
numReaderNodes: 2
|
||||
tsoSaveInterval: 200
|
||||
timeTickInterval: 200
|
||||
|
||||
pulsarTopics:
|
||||
readerTopicPrefix: "milvusReader"
|
||||
numReaderTopics: 2
|
||||
deleteTopic: "milvusDeleter"
|
||||
queryTopic: "milvusQuery"
|
||||
resultTopic: "milvusResult"
|
||||
resultGroup: "milvusResultGroup"
|
||||
timeTickTopic: "milvusTimeTick"
|
||||
|
||||
network:
|
||||
address: 0.0.0.0
|
||||
port: 19530
|
||||
|
||||
logs:
|
||||
level: debug
|
||||
trace.enable: true
|
||||
path: /tmp/logs
|
||||
max_log_file_size: 1024MB
|
||||
log_rotate_num: 0
|
||||
|
||||
storage:
|
||||
path: /var/lib/milvus
|
||||
auto_flush_interval: 1
|
@ -12,7 +12,7 @@
|
||||
|
||||
nodeID: # will be deprecated after v0.2
|
||||
proxyIDList: [0]
|
||||
queryNodeIDList: [2]
|
||||
queryNodeIDList: [1, 2]
|
||||
writeNodeIDList: [3]
|
||||
|
||||
etcd:
|
||||
|
@ -20,22 +20,6 @@ services:
|
||||
networks:
|
||||
- milvus
|
||||
|
||||
minio:
|
||||
image: minio/minio:RELEASE.2020-12-03T00-03-10Z
|
||||
ports:
|
||||
- "9000:9000"
|
||||
environment:
|
||||
MINIO_ACCESS_KEY: minioadmin
|
||||
MINIO_SECRET_KEY: minioadmin
|
||||
command: minio server /minio_data
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
networks:
|
||||
- milvus
|
||||
|
||||
networks:
|
||||
milvus:
|
||||
|
||||
|
@ -21,7 +21,6 @@ services:
|
||||
PULSAR_ADDRESS: ${PULSAR_ADDRESS}
|
||||
ETCD_ADDRESS: ${ETCD_ADDRESS}
|
||||
MASTER_ADDRESS: ${MASTER_ADDRESS}
|
||||
MINIO_ADDRESS: ${MINIO_ADDRESS}
|
||||
volumes: &ubuntu-volumes
|
||||
- .:/go/src/github.com/zilliztech/milvus-distributed:delegated
|
||||
- ${DOCKER_VOLUME_DIRECTORY:-.docker}/${ARCH}-ubuntu${UBUNTU}-cache:/ccache:delegated
|
||||
@ -46,7 +45,6 @@ services:
|
||||
PULSAR_ADDRESS: ${PULSAR_ADDRESS}
|
||||
ETCD_ADDRESS: ${ETCD_ADDRESS}
|
||||
MASTER_ADDRESS: ${MASTER_ADDRESS}
|
||||
MINIO_ADDRESS: ${MINIO_ADDRESS}
|
||||
volumes:
|
||||
- .:/go/src/github.com/zilliztech/milvus-distributed:delegated
|
||||
- ${DOCKER_VOLUME_DIRECTORY:-.docker}/${ARCH}-ubuntu${UBUNTU}-gdbserver-home:/home/debugger:delegated
|
||||
@ -70,21 +68,5 @@ services:
|
||||
networks:
|
||||
- milvus
|
||||
|
||||
minio:
|
||||
image: minio/minio:RELEASE.2020-12-03T00-03-10Z
|
||||
ports:
|
||||
- "9000:9000"
|
||||
environment:
|
||||
MINIO_ACCESS_KEY: minioadmin
|
||||
MINIO_SECRET_KEY: minioadmin
|
||||
command: minio server /minio_data
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
networks:
|
||||
- milvus
|
||||
|
||||
networks:
|
||||
milvus:
|
||||
|
@ -184,7 +184,7 @@ Note that *tenantId*, *proxyId*, *collectionId*, *segmentId* are unique strings
|
||||
|
||||
```go
|
||||
type metaTable struct {
|
||||
kv kv.TxnBase // client of a reliable kv service, i.e. etcd client
|
||||
kv kv.Base // client of a reliable kv service, i.e. etcd client
|
||||
tenantId2Meta map[UniqueId]TenantMeta // tenant id to tenant meta
|
||||
proxyId2Meta map[UniqueId]ProxyMeta // proxy id to proxy meta
|
||||
collId2Meta map[UniqueId]CollectionMeta // collection id to collection meta
|
||||
@ -216,7 +216,7 @@ func (meta *metaTable) GetSegmentById(segId UniqueId)(*SegmentMeta, error)
|
||||
func (meta *metaTable) DeleteSegment(segId UniqueId) error
|
||||
func (meta *metaTable) CloseSegment(segId UniqueId, closeTs Timestamp, num_rows int64) error
|
||||
|
||||
func NewMetaTable(kv kv.TxnBase) (*metaTable,error)
|
||||
func NewMetaTable(kv kv.Base) (*metaTable,error)
|
||||
```
|
||||
|
||||
*metaTable* maintains meta both in memory and *etcdKV*. It keeps meta's consistency in both sides. All its member functions may be called concurrently.
|
||||
@ -380,18 +380,13 @@ func (segMgr *SegmentManager) AssignSegmentID(segIDReq []*internalpb.SegIDReques
|
||||
// "/msg_stream/insert"
|
||||
|
||||
message SysConfigRequest {
|
||||
MsgType msg_type = 1;
|
||||
int64 reqID = 2;
|
||||
int64 proxyID = 3;
|
||||
uint64 timestamp = 4;
|
||||
repeated string keys = 5;
|
||||
repeated string key_prefixes = 6;
|
||||
repeated string keys = 1;
|
||||
repeated string key_prefixes = 2;
|
||||
}
|
||||
|
||||
message SysConfigResponse {
|
||||
common.Status status = 1;
|
||||
repeated string keys = 2;
|
||||
repeated string values = 3;
|
||||
repeated string keys = 1;
|
||||
repeated string values = 2;
|
||||
}
|
||||
```
|
||||
|
||||
@ -399,11 +394,12 @@ message SysConfigResponse {
|
||||
|
||||
```go
|
||||
type SysConfig struct {
|
||||
kv *kv.EtcdKV
|
||||
etcdKV *etcd
|
||||
etcdPathPrefix string
|
||||
}
|
||||
|
||||
func (conf *SysConfig) InitFromFile(filePath string) (error)
|
||||
func (conf *SysConfig) GetByPrefix(keyPrefix string) (keys []string, values []string, err error)
|
||||
func (conf *SysConfig) GetByPrefix(keyPrefix string) ([]string, error)
|
||||
func (conf *SysConfig) Get(keys []string) ([]string, error)
|
||||
```
|
||||
|
||||
|
@ -1,150 +0,0 @@
|
||||
package conf
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"runtime"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
|
||||
storagetype "github.com/zilliztech/milvus-distributed/internal/storage/type"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type UniqueID = typeutil.UniqueID
|
||||
|
||||
// yaml.MapSlice
|
||||
|
||||
type MasterConfig struct {
|
||||
Address string
|
||||
Port int32
|
||||
PulsarMonitorInterval int32
|
||||
PulsarTopic string
|
||||
SegmentThreshold float32
|
||||
SegmentExpireDuration int64
|
||||
ProxyIDList []UniqueID
|
||||
QueryNodeNum int
|
||||
WriteNodeNum int
|
||||
}
|
||||
|
||||
type EtcdConfig struct {
|
||||
Address string
|
||||
Port int32
|
||||
Rootpath string
|
||||
Segthreshold int64
|
||||
}
|
||||
|
||||
type TimeSyncConfig struct {
|
||||
Interval int32
|
||||
}
|
||||
|
||||
type StorageConfig struct {
|
||||
Driver storagetype.DriverType
|
||||
Address string
|
||||
Port int32
|
||||
Accesskey string
|
||||
Secretkey string
|
||||
}
|
||||
|
||||
type PulsarConfig struct {
|
||||
Authentication bool
|
||||
User string
|
||||
Token string
|
||||
Address string
|
||||
Port int32
|
||||
TopicNum int
|
||||
}
|
||||
|
||||
type ProxyConfig struct {
|
||||
Timezone string `yaml:"timezone"`
|
||||
ProxyID int `yaml:"proxy_id"`
|
||||
NumReaderNodes int `yaml:"numReaderNodes"`
|
||||
TosSaveInterval int `yaml:"tsoSaveInterval"`
|
||||
TimeTickInterval int `yaml:"timeTickInterval"`
|
||||
PulsarTopics struct {
|
||||
ReaderTopicPrefix string `yaml:"readerTopicPrefix"`
|
||||
NumReaderTopics int `yaml:"numReaderTopics"`
|
||||
DeleteTopic string `yaml:"deleteTopic"`
|
||||
QueryTopic string `yaml:"queryTopic"`
|
||||
ResultTopic string `yaml:"resultTopic"`
|
||||
ResultGroup string `yaml:"resultGroup"`
|
||||
TimeTickTopic string `yaml:"timeTickTopic"`
|
||||
} `yaml:"pulsarTopics"`
|
||||
Network struct {
|
||||
Address string `yaml:"address"`
|
||||
Port int `yaml:"port"`
|
||||
} `yaml:"network"`
|
||||
Logs struct {
|
||||
Level string `yaml:"level"`
|
||||
TraceEnable bool `yaml:"trace.enable"`
|
||||
Path string `yaml:"path"`
|
||||
MaxLogFileSize string `yaml:"max_log_file_size"`
|
||||
LogRotateNum int `yaml:"log_rotate_num"`
|
||||
} `yaml:"logs"`
|
||||
Storage struct {
|
||||
Path string `yaml:"path"`
|
||||
AutoFlushInterval int `yaml:"auto_flush_interval"`
|
||||
} `yaml:"storage"`
|
||||
}
|
||||
|
||||
type Reader struct {
|
||||
ClientID int
|
||||
StopFlag int64
|
||||
ReaderQueueSize int
|
||||
SearchChanSize int
|
||||
Key2SegChanSize int
|
||||
TopicStart int
|
||||
TopicEnd int
|
||||
}
|
||||
|
||||
type Writer struct {
|
||||
ClientID int
|
||||
StopFlag int64
|
||||
ReaderQueueSize int
|
||||
SearchByIDChanSize int
|
||||
Parallelism int
|
||||
TopicStart int
|
||||
TopicEnd int
|
||||
Bucket string
|
||||
}
|
||||
|
||||
type ServerConfig struct {
|
||||
Master MasterConfig
|
||||
Etcd EtcdConfig
|
||||
Timesync TimeSyncConfig
|
||||
Storage StorageConfig
|
||||
Pulsar PulsarConfig
|
||||
Writer Writer
|
||||
Reader Reader
|
||||
Proxy ProxyConfig
|
||||
}
|
||||
|
||||
var Config ServerConfig
|
||||
|
||||
// func init() {
|
||||
// load_config()
|
||||
// }
|
||||
|
||||
func getConfigsDir() string {
|
||||
_, fpath, _, _ := runtime.Caller(0)
|
||||
configPath := path.Dir(fpath) + "/../../configs/"
|
||||
configPath = path.Dir(configPath)
|
||||
return configPath
|
||||
}
|
||||
|
||||
func LoadConfigWithPath(yamlFilePath string) {
|
||||
source, err := ioutil.ReadFile(yamlFilePath)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = yaml.Unmarshal(source, &Config)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
//fmt.Printf("Result: %v\n", Config)
|
||||
}
|
||||
|
||||
func LoadConfig(yamlFile string) {
|
||||
filePath := path.Join(getConfigsDir(), yamlFile)
|
||||
LoadConfigWithPath(filePath)
|
||||
}
|
@ -1,10 +0,0 @@
|
||||
package conf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
fmt.Printf("Result: %v\n", Config)
|
||||
}
|
@ -76,10 +76,6 @@ class PartitionDescriptionDefaultTypeInternal {
|
||||
public:
|
||||
::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<PartitionDescription> _instance;
|
||||
} _PartitionDescription_default_instance_;
|
||||
class SysConfigResponseDefaultTypeInternal {
|
||||
public:
|
||||
::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<SysConfigResponse> _instance;
|
||||
} _SysConfigResponse_default_instance_;
|
||||
class HitsDefaultTypeInternal {
|
||||
public:
|
||||
::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<Hits> _instance;
|
||||
@ -315,22 +311,7 @@ static void InitDefaultsscc_info_StringResponse_service_5fmsg_2eproto() {
|
||||
{{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsscc_info_StringResponse_service_5fmsg_2eproto}, {
|
||||
&scc_info_Status_common_2eproto.base,}};
|
||||
|
||||
static void InitDefaultsscc_info_SysConfigResponse_service_5fmsg_2eproto() {
|
||||
GOOGLE_PROTOBUF_VERIFY_VERSION;
|
||||
|
||||
{
|
||||
void* ptr = &::milvus::proto::service::_SysConfigResponse_default_instance_;
|
||||
new (ptr) ::milvus::proto::service::SysConfigResponse();
|
||||
::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
|
||||
}
|
||||
::milvus::proto::service::SysConfigResponse::InitAsDefaultInstance();
|
||||
}
|
||||
|
||||
::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_SysConfigResponse_service_5fmsg_2eproto =
|
||||
{{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsscc_info_SysConfigResponse_service_5fmsg_2eproto}, {
|
||||
&scc_info_Status_common_2eproto.base,}};
|
||||
|
||||
static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_service_5fmsg_2eproto[16];
|
||||
static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_service_5fmsg_2eproto[15];
|
||||
static const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* file_level_enum_descriptors_service_5fmsg_2eproto[1];
|
||||
static constexpr ::PROTOBUF_NAMESPACE_ID::ServiceDescriptor const** file_level_service_descriptors_service_5fmsg_2eproto = nullptr;
|
||||
|
||||
@ -433,14 +414,6 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_service_5fmsg_2eproto::offsets
|
||||
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::PartitionDescription, name_),
|
||||
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::PartitionDescription, statistics_),
|
||||
~0u, // no _has_bits_
|
||||
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::SysConfigResponse, _internal_metadata_),
|
||||
~0u, // no _extensions_
|
||||
~0u, // no _oneof_case_
|
||||
~0u, // no _weak_field_map_
|
||||
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::SysConfigResponse, status_),
|
||||
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::SysConfigResponse, keys_),
|
||||
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::SysConfigResponse, values_),
|
||||
~0u, // no _has_bits_
|
||||
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::Hits, _internal_metadata_),
|
||||
~0u, // no _extensions_
|
||||
~0u, // no _oneof_case_
|
||||
@ -470,9 +443,8 @@ static const ::PROTOBUF_NAMESPACE_ID::internal::MigrationSchema schemas[] PROTOB
|
||||
{ 73, -1, sizeof(::milvus::proto::service::IntegerRangeResponse)},
|
||||
{ 81, -1, sizeof(::milvus::proto::service::CollectionDescription)},
|
||||
{ 89, -1, sizeof(::milvus::proto::service::PartitionDescription)},
|
||||
{ 97, -1, sizeof(::milvus::proto::service::SysConfigResponse)},
|
||||
{ 105, -1, sizeof(::milvus::proto::service::Hits)},
|
||||
{ 113, -1, sizeof(::milvus::proto::service::QueryResult)},
|
||||
{ 97, -1, sizeof(::milvus::proto::service::Hits)},
|
||||
{ 105, -1, sizeof(::milvus::proto::service::QueryResult)},
|
||||
};
|
||||
|
||||
static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = {
|
||||
@ -489,7 +461,6 @@ static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] =
|
||||
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::milvus::proto::service::_IntegerRangeResponse_default_instance_),
|
||||
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::milvus::proto::service::_CollectionDescription_default_instance_),
|
||||
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::milvus::proto::service::_PartitionDescription_default_instance_),
|
||||
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::milvus::proto::service::_SysConfigResponse_default_instance_),
|
||||
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::milvus::proto::service::_Hits_default_instance_),
|
||||
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::milvus::proto::service::_QueryResult_default_instance_),
|
||||
};
|
||||
@ -528,22 +499,20 @@ const char descriptor_table_protodef_service_5fmsg_2eproto[] PROTOBUF_SECTION_VA
|
||||
"\006status\030\001 \001(\0132\033.milvus.proto.common.Stat"
|
||||
"us\0221\n\004name\030\002 \001(\0132#.milvus.proto.service."
|
||||
"PartitionName\0225\n\nstatistics\030\003 \003(\0132!.milv"
|
||||
"us.proto.common.KeyValuePair\"^\n\021SysConfi"
|
||||
"gResponse\022+\n\006status\030\001 \001(\0132\033.milvus.proto"
|
||||
".common.Status\022\014\n\004keys\030\002 \003(\t\022\016\n\006values\030\003"
|
||||
" \003(\t\"5\n\004Hits\022\013\n\003IDs\030\001 \003(\003\022\020\n\010row_data\030\002 "
|
||||
"\003(\014\022\016\n\006scores\030\003 \003(\002\"H\n\013QueryResult\022+\n\006st"
|
||||
"atus\030\001 \001(\0132\033.milvus.proto.common.Status\022"
|
||||
"\014\n\004hits\030\002 \003(\014*@\n\017PlaceholderType\022\010\n\004NONE"
|
||||
"\020\000\022\021\n\rVECTOR_BINARY\020d\022\020\n\014VECTOR_FLOAT\020eB"
|
||||
"CZAgithub.com/zilliztech/milvus-distribu"
|
||||
"ted/internal/proto/servicepbb\006proto3"
|
||||
"us.proto.common.KeyValuePair\"5\n\004Hits\022\013\n\003"
|
||||
"IDs\030\001 \003(\003\022\020\n\010row_data\030\002 \003(\014\022\016\n\006scores\030\003 "
|
||||
"\003(\002\"H\n\013QueryResult\022+\n\006status\030\001 \001(\0132\033.mil"
|
||||
"vus.proto.common.Status\022\014\n\004hits\030\002 \003(\014*@\n"
|
||||
"\017PlaceholderType\022\010\n\004NONE\020\000\022\021\n\rVECTOR_BIN"
|
||||
"ARY\020d\022\020\n\014VECTOR_FLOAT\020eBCZAgithub.com/zi"
|
||||
"lliztech/milvus-distributed/internal/pro"
|
||||
"to/servicepbb\006proto3"
|
||||
;
|
||||
static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_service_5fmsg_2eproto_deps[2] = {
|
||||
&::descriptor_table_common_2eproto,
|
||||
&::descriptor_table_schema_2eproto,
|
||||
};
|
||||
static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_service_5fmsg_2eproto_sccs[16] = {
|
||||
static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_service_5fmsg_2eproto_sccs[15] = {
|
||||
&scc_info_BoolResponse_service_5fmsg_2eproto.base,
|
||||
&scc_info_CollectionDescription_service_5fmsg_2eproto.base,
|
||||
&scc_info_CollectionName_service_5fmsg_2eproto.base,
|
||||
@ -559,15 +528,14 @@ static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_ser
|
||||
&scc_info_RowBatch_service_5fmsg_2eproto.base,
|
||||
&scc_info_StringListResponse_service_5fmsg_2eproto.base,
|
||||
&scc_info_StringResponse_service_5fmsg_2eproto.base,
|
||||
&scc_info_SysConfigResponse_service_5fmsg_2eproto.base,
|
||||
};
|
||||
static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_service_5fmsg_2eproto_once;
|
||||
static bool descriptor_table_service_5fmsg_2eproto_initialized = false;
|
||||
const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_service_5fmsg_2eproto = {
|
||||
&descriptor_table_service_5fmsg_2eproto_initialized, descriptor_table_protodef_service_5fmsg_2eproto, "service_msg.proto", 1716,
|
||||
&descriptor_table_service_5fmsg_2eproto_once, descriptor_table_service_5fmsg_2eproto_sccs, descriptor_table_service_5fmsg_2eproto_deps, 16, 2,
|
||||
&descriptor_table_service_5fmsg_2eproto_initialized, descriptor_table_protodef_service_5fmsg_2eproto, "service_msg.proto", 1620,
|
||||
&descriptor_table_service_5fmsg_2eproto_once, descriptor_table_service_5fmsg_2eproto_sccs, descriptor_table_service_5fmsg_2eproto_deps, 15, 2,
|
||||
schemas, file_default_instances, TableStruct_service_5fmsg_2eproto::offsets,
|
||||
file_level_metadata_service_5fmsg_2eproto, 16, file_level_enum_descriptors_service_5fmsg_2eproto, file_level_service_descriptors_service_5fmsg_2eproto,
|
||||
file_level_metadata_service_5fmsg_2eproto, 15, file_level_enum_descriptors_service_5fmsg_2eproto, file_level_service_descriptors_service_5fmsg_2eproto,
|
||||
};
|
||||
|
||||
// Force running AddDescriptors() at dynamic initialization time.
|
||||
@ -5191,398 +5159,6 @@ void PartitionDescription::InternalSwap(PartitionDescription* other) {
|
||||
}
|
||||
|
||||
|
||||
// ===================================================================
|
||||
|
||||
void SysConfigResponse::InitAsDefaultInstance() {
|
||||
::milvus::proto::service::_SysConfigResponse_default_instance_._instance.get_mutable()->status_ = const_cast< ::milvus::proto::common::Status*>(
|
||||
::milvus::proto::common::Status::internal_default_instance());
|
||||
}
|
||||
class SysConfigResponse::_Internal {
|
||||
public:
|
||||
static const ::milvus::proto::common::Status& status(const SysConfigResponse* msg);
|
||||
};
|
||||
|
||||
const ::milvus::proto::common::Status&
|
||||
SysConfigResponse::_Internal::status(const SysConfigResponse* msg) {
|
||||
return *msg->status_;
|
||||
}
|
||||
void SysConfigResponse::clear_status() {
|
||||
if (GetArenaNoVirtual() == nullptr && status_ != nullptr) {
|
||||
delete status_;
|
||||
}
|
||||
status_ = nullptr;
|
||||
}
|
||||
SysConfigResponse::SysConfigResponse()
|
||||
: ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
|
||||
SharedCtor();
|
||||
// @@protoc_insertion_point(constructor:milvus.proto.service.SysConfigResponse)
|
||||
}
|
||||
SysConfigResponse::SysConfigResponse(const SysConfigResponse& from)
|
||||
: ::PROTOBUF_NAMESPACE_ID::Message(),
|
||||
_internal_metadata_(nullptr),
|
||||
keys_(from.keys_),
|
||||
values_(from.values_) {
|
||||
_internal_metadata_.MergeFrom(from._internal_metadata_);
|
||||
if (from.has_status()) {
|
||||
status_ = new ::milvus::proto::common::Status(*from.status_);
|
||||
} else {
|
||||
status_ = nullptr;
|
||||
}
|
||||
// @@protoc_insertion_point(copy_constructor:milvus.proto.service.SysConfigResponse)
|
||||
}
|
||||
|
||||
void SysConfigResponse::SharedCtor() {
|
||||
::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_SysConfigResponse_service_5fmsg_2eproto.base);
|
||||
status_ = nullptr;
|
||||
}
|
||||
|
||||
SysConfigResponse::~SysConfigResponse() {
|
||||
// @@protoc_insertion_point(destructor:milvus.proto.service.SysConfigResponse)
|
||||
SharedDtor();
|
||||
}
|
||||
|
||||
void SysConfigResponse::SharedDtor() {
|
||||
if (this != internal_default_instance()) delete status_;
|
||||
}
|
||||
|
||||
void SysConfigResponse::SetCachedSize(int size) const {
|
||||
_cached_size_.Set(size);
|
||||
}
|
||||
const SysConfigResponse& SysConfigResponse::default_instance() {
|
||||
::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_SysConfigResponse_service_5fmsg_2eproto.base);
|
||||
return *internal_default_instance();
|
||||
}
|
||||
|
||||
|
||||
void SysConfigResponse::Clear() {
|
||||
// @@protoc_insertion_point(message_clear_start:milvus.proto.service.SysConfigResponse)
|
||||
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
|
||||
// Prevent compiler warnings about cached_has_bits being unused
|
||||
(void) cached_has_bits;
|
||||
|
||||
keys_.Clear();
|
||||
values_.Clear();
|
||||
if (GetArenaNoVirtual() == nullptr && status_ != nullptr) {
|
||||
delete status_;
|
||||
}
|
||||
status_ = nullptr;
|
||||
_internal_metadata_.Clear();
|
||||
}
|
||||
|
||||
#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
|
||||
const char* SysConfigResponse::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
|
||||
#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
|
||||
while (!ctx->Done(&ptr)) {
|
||||
::PROTOBUF_NAMESPACE_ID::uint32 tag;
|
||||
ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
|
||||
CHK_(ptr);
|
||||
switch (tag >> 3) {
|
||||
// .milvus.proto.common.Status status = 1;
|
||||
case 1:
|
||||
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) {
|
||||
ptr = ctx->ParseMessage(mutable_status(), ptr);
|
||||
CHK_(ptr);
|
||||
} else goto handle_unusual;
|
||||
continue;
|
||||
// repeated string keys = 2;
|
||||
case 2:
|
||||
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) {
|
||||
ptr -= 1;
|
||||
do {
|
||||
ptr += 1;
|
||||
ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(add_keys(), ptr, ctx, "milvus.proto.service.SysConfigResponse.keys");
|
||||
CHK_(ptr);
|
||||
if (!ctx->DataAvailable(ptr)) break;
|
||||
} while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 18);
|
||||
} else goto handle_unusual;
|
||||
continue;
|
||||
// repeated string values = 3;
|
||||
case 3:
|
||||
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) {
|
||||
ptr -= 1;
|
||||
do {
|
||||
ptr += 1;
|
||||
ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(add_values(), ptr, ctx, "milvus.proto.service.SysConfigResponse.values");
|
||||
CHK_(ptr);
|
||||
if (!ctx->DataAvailable(ptr)) break;
|
||||
} while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 26);
|
||||
} else goto handle_unusual;
|
||||
continue;
|
||||
default: {
|
||||
handle_unusual:
|
||||
if ((tag & 7) == 4 || tag == 0) {
|
||||
ctx->SetLastTag(tag);
|
||||
goto success;
|
||||
}
|
||||
ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
|
||||
CHK_(ptr != nullptr);
|
||||
continue;
|
||||
}
|
||||
} // switch
|
||||
} // while
|
||||
success:
|
||||
return ptr;
|
||||
failure:
|
||||
ptr = nullptr;
|
||||
goto success;
|
||||
#undef CHK_
|
||||
}
|
||||
#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
|
||||
bool SysConfigResponse::MergePartialFromCodedStream(
|
||||
::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
|
||||
#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
|
||||
::PROTOBUF_NAMESPACE_ID::uint32 tag;
|
||||
// @@protoc_insertion_point(parse_start:milvus.proto.service.SysConfigResponse)
|
||||
for (;;) {
|
||||
::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
|
||||
tag = p.first;
|
||||
if (!p.second) goto handle_unusual;
|
||||
switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
|
||||
// .milvus.proto.common.Status status = 1;
|
||||
case 1: {
|
||||
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (10 & 0xFF)) {
|
||||
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
|
||||
input, mutable_status()));
|
||||
} else {
|
||||
goto handle_unusual;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// repeated string keys = 2;
|
||||
case 2: {
|
||||
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (18 & 0xFF)) {
|
||||
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
|
||||
input, this->add_keys()));
|
||||
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
|
||||
this->keys(this->keys_size() - 1).data(),
|
||||
static_cast<int>(this->keys(this->keys_size() - 1).length()),
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
|
||||
"milvus.proto.service.SysConfigResponse.keys"));
|
||||
} else {
|
||||
goto handle_unusual;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// repeated string values = 3;
|
||||
case 3: {
|
||||
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (26 & 0xFF)) {
|
||||
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
|
||||
input, this->add_values()));
|
||||
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
|
||||
this->values(this->values_size() - 1).data(),
|
||||
static_cast<int>(this->values(this->values_size() - 1).length()),
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
|
||||
"milvus.proto.service.SysConfigResponse.values"));
|
||||
} else {
|
||||
goto handle_unusual;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default: {
|
||||
handle_unusual:
|
||||
if (tag == 0) {
|
||||
goto success;
|
||||
}
|
||||
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
|
||||
input, tag, _internal_metadata_.mutable_unknown_fields()));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
success:
|
||||
// @@protoc_insertion_point(parse_success:milvus.proto.service.SysConfigResponse)
|
||||
return true;
|
||||
failure:
|
||||
// @@protoc_insertion_point(parse_failure:milvus.proto.service.SysConfigResponse)
|
||||
return false;
|
||||
#undef DO_
|
||||
}
|
||||
#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
|
||||
|
||||
void SysConfigResponse::SerializeWithCachedSizes(
|
||||
::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
|
||||
// @@protoc_insertion_point(serialize_start:milvus.proto.service.SysConfigResponse)
|
||||
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
|
||||
(void) cached_has_bits;
|
||||
|
||||
// .milvus.proto.common.Status status = 1;
|
||||
if (this->has_status()) {
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
|
||||
1, _Internal::status(this), output);
|
||||
}
|
||||
|
||||
// repeated string keys = 2;
|
||||
for (int i = 0, n = this->keys_size(); i < n; i++) {
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
|
||||
this->keys(i).data(), static_cast<int>(this->keys(i).length()),
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
|
||||
"milvus.proto.service.SysConfigResponse.keys");
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteString(
|
||||
2, this->keys(i), output);
|
||||
}
|
||||
|
||||
// repeated string values = 3;
|
||||
for (int i = 0, n = this->values_size(); i < n; i++) {
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
|
||||
this->values(i).data(), static_cast<int>(this->values(i).length()),
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
|
||||
"milvus.proto.service.SysConfigResponse.values");
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteString(
|
||||
3, this->values(i), output);
|
||||
}
|
||||
|
||||
if (_internal_metadata_.have_unknown_fields()) {
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
|
||||
_internal_metadata_.unknown_fields(), output);
|
||||
}
|
||||
// @@protoc_insertion_point(serialize_end:milvus.proto.service.SysConfigResponse)
|
||||
}
|
||||
|
||||
::PROTOBUF_NAMESPACE_ID::uint8* SysConfigResponse::InternalSerializeWithCachedSizesToArray(
|
||||
::PROTOBUF_NAMESPACE_ID::uint8* target) const {
|
||||
// @@protoc_insertion_point(serialize_to_array_start:milvus.proto.service.SysConfigResponse)
|
||||
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
|
||||
(void) cached_has_bits;
|
||||
|
||||
// .milvus.proto.common.Status status = 1;
|
||||
if (this->has_status()) {
|
||||
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
|
||||
InternalWriteMessageToArray(
|
||||
1, _Internal::status(this), target);
|
||||
}
|
||||
|
||||
// repeated string keys = 2;
|
||||
for (int i = 0, n = this->keys_size(); i < n; i++) {
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
|
||||
this->keys(i).data(), static_cast<int>(this->keys(i).length()),
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
|
||||
"milvus.proto.service.SysConfigResponse.keys");
|
||||
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
|
||||
WriteStringToArray(2, this->keys(i), target);
|
||||
}
|
||||
|
||||
// repeated string values = 3;
|
||||
for (int i = 0, n = this->values_size(); i < n; i++) {
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
|
||||
this->values(i).data(), static_cast<int>(this->values(i).length()),
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
|
||||
"milvus.proto.service.SysConfigResponse.values");
|
||||
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
|
||||
WriteStringToArray(3, this->values(i), target);
|
||||
}
|
||||
|
||||
if (_internal_metadata_.have_unknown_fields()) {
|
||||
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
|
||||
_internal_metadata_.unknown_fields(), target);
|
||||
}
|
||||
// @@protoc_insertion_point(serialize_to_array_end:milvus.proto.service.SysConfigResponse)
|
||||
return target;
|
||||
}
|
||||
|
||||
size_t SysConfigResponse::ByteSizeLong() const {
|
||||
// @@protoc_insertion_point(message_byte_size_start:milvus.proto.service.SysConfigResponse)
|
||||
size_t total_size = 0;
|
||||
|
||||
if (_internal_metadata_.have_unknown_fields()) {
|
||||
total_size +=
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
|
||||
_internal_metadata_.unknown_fields());
|
||||
}
|
||||
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
|
||||
// Prevent compiler warnings about cached_has_bits being unused
|
||||
(void) cached_has_bits;
|
||||
|
||||
// repeated string keys = 2;
|
||||
total_size += 1 *
|
||||
::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->keys_size());
|
||||
for (int i = 0, n = this->keys_size(); i < n; i++) {
|
||||
total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
|
||||
this->keys(i));
|
||||
}
|
||||
|
||||
// repeated string values = 3;
|
||||
total_size += 1 *
|
||||
::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->values_size());
|
||||
for (int i = 0, n = this->values_size(); i < n; i++) {
|
||||
total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
|
||||
this->values(i));
|
||||
}
|
||||
|
||||
// .milvus.proto.common.Status status = 1;
|
||||
if (this->has_status()) {
|
||||
total_size += 1 +
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
|
||||
*status_);
|
||||
}
|
||||
|
||||
int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
|
||||
SetCachedSize(cached_size);
|
||||
return total_size;
|
||||
}
|
||||
|
||||
void SysConfigResponse::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
|
||||
// @@protoc_insertion_point(generalized_merge_from_start:milvus.proto.service.SysConfigResponse)
|
||||
GOOGLE_DCHECK_NE(&from, this);
|
||||
const SysConfigResponse* source =
|
||||
::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<SysConfigResponse>(
|
||||
&from);
|
||||
if (source == nullptr) {
|
||||
// @@protoc_insertion_point(generalized_merge_from_cast_fail:milvus.proto.service.SysConfigResponse)
|
||||
::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
|
||||
} else {
|
||||
// @@protoc_insertion_point(generalized_merge_from_cast_success:milvus.proto.service.SysConfigResponse)
|
||||
MergeFrom(*source);
|
||||
}
|
||||
}
|
||||
|
||||
void SysConfigResponse::MergeFrom(const SysConfigResponse& from) {
|
||||
// @@protoc_insertion_point(class_specific_merge_from_start:milvus.proto.service.SysConfigResponse)
|
||||
GOOGLE_DCHECK_NE(&from, this);
|
||||
_internal_metadata_.MergeFrom(from._internal_metadata_);
|
||||
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
|
||||
(void) cached_has_bits;
|
||||
|
||||
keys_.MergeFrom(from.keys_);
|
||||
values_.MergeFrom(from.values_);
|
||||
if (from.has_status()) {
|
||||
mutable_status()->::milvus::proto::common::Status::MergeFrom(from.status());
|
||||
}
|
||||
}
|
||||
|
||||
void SysConfigResponse::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
|
||||
// @@protoc_insertion_point(generalized_copy_from_start:milvus.proto.service.SysConfigResponse)
|
||||
if (&from == this) return;
|
||||
Clear();
|
||||
MergeFrom(from);
|
||||
}
|
||||
|
||||
void SysConfigResponse::CopyFrom(const SysConfigResponse& from) {
|
||||
// @@protoc_insertion_point(class_specific_copy_from_start:milvus.proto.service.SysConfigResponse)
|
||||
if (&from == this) return;
|
||||
Clear();
|
||||
MergeFrom(from);
|
||||
}
|
||||
|
||||
bool SysConfigResponse::IsInitialized() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
void SysConfigResponse::InternalSwap(SysConfigResponse* other) {
|
||||
using std::swap;
|
||||
_internal_metadata_.Swap(&other->_internal_metadata_);
|
||||
keys_.InternalSwap(CastToBase(&other->keys_));
|
||||
values_.InternalSwap(CastToBase(&other->values_));
|
||||
swap(status_, other->status_);
|
||||
}
|
||||
|
||||
::PROTOBUF_NAMESPACE_ID::Metadata SysConfigResponse::GetMetadata() const {
|
||||
return GetMetadataStatic();
|
||||
}
|
||||
|
||||
|
||||
// ===================================================================
|
||||
|
||||
void Hits::InitAsDefaultInstance() {
|
||||
@ -6335,9 +5911,6 @@ template<> PROTOBUF_NOINLINE ::milvus::proto::service::CollectionDescription* Ar
|
||||
template<> PROTOBUF_NOINLINE ::milvus::proto::service::PartitionDescription* Arena::CreateMaybeMessage< ::milvus::proto::service::PartitionDescription >(Arena* arena) {
|
||||
return Arena::CreateInternal< ::milvus::proto::service::PartitionDescription >(arena);
|
||||
}
|
||||
template<> PROTOBUF_NOINLINE ::milvus::proto::service::SysConfigResponse* Arena::CreateMaybeMessage< ::milvus::proto::service::SysConfigResponse >(Arena* arena) {
|
||||
return Arena::CreateInternal< ::milvus::proto::service::SysConfigResponse >(arena);
|
||||
}
|
||||
template<> PROTOBUF_NOINLINE ::milvus::proto::service::Hits* Arena::CreateMaybeMessage< ::milvus::proto::service::Hits >(Arena* arena) {
|
||||
return Arena::CreateInternal< ::milvus::proto::service::Hits >(arena);
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ struct TableStruct_service_5fmsg_2eproto {
|
||||
PROTOBUF_SECTION_VARIABLE(protodesc_cold);
|
||||
static const ::PROTOBUF_NAMESPACE_ID::internal::AuxillaryParseTableField aux[]
|
||||
PROTOBUF_SECTION_VARIABLE(protodesc_cold);
|
||||
static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[16]
|
||||
static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[15]
|
||||
PROTOBUF_SECTION_VARIABLE(protodesc_cold);
|
||||
static const ::PROTOBUF_NAMESPACE_ID::internal::FieldMetadata field_metadata[];
|
||||
static const ::PROTOBUF_NAMESPACE_ID::internal::SerializationTable serialization_table[];
|
||||
@ -105,9 +105,6 @@ extern StringListResponseDefaultTypeInternal _StringListResponse_default_instanc
|
||||
class StringResponse;
|
||||
class StringResponseDefaultTypeInternal;
|
||||
extern StringResponseDefaultTypeInternal _StringResponse_default_instance_;
|
||||
class SysConfigResponse;
|
||||
class SysConfigResponseDefaultTypeInternal;
|
||||
extern SysConfigResponseDefaultTypeInternal _SysConfigResponse_default_instance_;
|
||||
} // namespace service
|
||||
} // namespace proto
|
||||
} // namespace milvus
|
||||
@ -127,7 +124,6 @@ template<> ::milvus::proto::service::QueryResult* Arena::CreateMaybeMessage<::mi
|
||||
template<> ::milvus::proto::service::RowBatch* Arena::CreateMaybeMessage<::milvus::proto::service::RowBatch>(Arena*);
|
||||
template<> ::milvus::proto::service::StringListResponse* Arena::CreateMaybeMessage<::milvus::proto::service::StringListResponse>(Arena*);
|
||||
template<> ::milvus::proto::service::StringResponse* Arena::CreateMaybeMessage<::milvus::proto::service::StringResponse>(Arena*);
|
||||
template<> ::milvus::proto::service::SysConfigResponse* Arena::CreateMaybeMessage<::milvus::proto::service::SysConfigResponse>(Arena*);
|
||||
PROTOBUF_NAMESPACE_CLOSE
|
||||
namespace milvus {
|
||||
namespace proto {
|
||||
@ -2158,178 +2154,6 @@ class PartitionDescription :
|
||||
};
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
class SysConfigResponse :
|
||||
public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.proto.service.SysConfigResponse) */ {
|
||||
public:
|
||||
SysConfigResponse();
|
||||
virtual ~SysConfigResponse();
|
||||
|
||||
SysConfigResponse(const SysConfigResponse& from);
|
||||
SysConfigResponse(SysConfigResponse&& from) noexcept
|
||||
: SysConfigResponse() {
|
||||
*this = ::std::move(from);
|
||||
}
|
||||
|
||||
inline SysConfigResponse& operator=(const SysConfigResponse& from) {
|
||||
CopyFrom(from);
|
||||
return *this;
|
||||
}
|
||||
inline SysConfigResponse& operator=(SysConfigResponse&& from) noexcept {
|
||||
if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) {
|
||||
if (this != &from) InternalSwap(&from);
|
||||
} else {
|
||||
CopyFrom(from);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() {
|
||||
return GetDescriptor();
|
||||
}
|
||||
static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() {
|
||||
return GetMetadataStatic().descriptor;
|
||||
}
|
||||
static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() {
|
||||
return GetMetadataStatic().reflection;
|
||||
}
|
||||
static const SysConfigResponse& default_instance();
|
||||
|
||||
static void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY
|
||||
static inline const SysConfigResponse* internal_default_instance() {
|
||||
return reinterpret_cast<const SysConfigResponse*>(
|
||||
&_SysConfigResponse_default_instance_);
|
||||
}
|
||||
static constexpr int kIndexInFileMessages =
|
||||
13;
|
||||
|
||||
friend void swap(SysConfigResponse& a, SysConfigResponse& b) {
|
||||
a.Swap(&b);
|
||||
}
|
||||
inline void Swap(SysConfigResponse* other) {
|
||||
if (other == this) return;
|
||||
InternalSwap(other);
|
||||
}
|
||||
|
||||
// implements Message ----------------------------------------------
|
||||
|
||||
inline SysConfigResponse* New() const final {
|
||||
return CreateMaybeMessage<SysConfigResponse>(nullptr);
|
||||
}
|
||||
|
||||
SysConfigResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
|
||||
return CreateMaybeMessage<SysConfigResponse>(arena);
|
||||
}
|
||||
void CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final;
|
||||
void MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final;
|
||||
void CopyFrom(const SysConfigResponse& from);
|
||||
void MergeFrom(const SysConfigResponse& from);
|
||||
PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
|
||||
bool IsInitialized() const final;
|
||||
|
||||
size_t ByteSizeLong() const final;
|
||||
#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
|
||||
const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
|
||||
#else
|
||||
bool MergePartialFromCodedStream(
|
||||
::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) final;
|
||||
#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
|
||||
void SerializeWithCachedSizes(
|
||||
::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const final;
|
||||
::PROTOBUF_NAMESPACE_ID::uint8* InternalSerializeWithCachedSizesToArray(
|
||||
::PROTOBUF_NAMESPACE_ID::uint8* target) const final;
|
||||
int GetCachedSize() const final { return _cached_size_.Get(); }
|
||||
|
||||
private:
|
||||
inline void SharedCtor();
|
||||
inline void SharedDtor();
|
||||
void SetCachedSize(int size) const final;
|
||||
void InternalSwap(SysConfigResponse* other);
|
||||
friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
|
||||
static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
|
||||
return "milvus.proto.service.SysConfigResponse";
|
||||
}
|
||||
private:
|
||||
inline ::PROTOBUF_NAMESPACE_ID::Arena* GetArenaNoVirtual() const {
|
||||
return nullptr;
|
||||
}
|
||||
inline void* MaybeArenaPtr() const {
|
||||
return nullptr;
|
||||
}
|
||||
public:
|
||||
|
||||
::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final;
|
||||
private:
|
||||
static ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadataStatic() {
|
||||
::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&::descriptor_table_service_5fmsg_2eproto);
|
||||
return ::descriptor_table_service_5fmsg_2eproto.file_level_metadata[kIndexInFileMessages];
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
// nested types ----------------------------------------------------
|
||||
|
||||
// accessors -------------------------------------------------------
|
||||
|
||||
enum : int {
|
||||
kKeysFieldNumber = 2,
|
||||
kValuesFieldNumber = 3,
|
||||
kStatusFieldNumber = 1,
|
||||
};
|
||||
// repeated string keys = 2;
|
||||
int keys_size() const;
|
||||
void clear_keys();
|
||||
const std::string& keys(int index) const;
|
||||
std::string* mutable_keys(int index);
|
||||
void set_keys(int index, const std::string& value);
|
||||
void set_keys(int index, std::string&& value);
|
||||
void set_keys(int index, const char* value);
|
||||
void set_keys(int index, const char* value, size_t size);
|
||||
std::string* add_keys();
|
||||
void add_keys(const std::string& value);
|
||||
void add_keys(std::string&& value);
|
||||
void add_keys(const char* value);
|
||||
void add_keys(const char* value, size_t size);
|
||||
const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>& keys() const;
|
||||
::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>* mutable_keys();
|
||||
|
||||
// repeated string values = 3;
|
||||
int values_size() const;
|
||||
void clear_values();
|
||||
const std::string& values(int index) const;
|
||||
std::string* mutable_values(int index);
|
||||
void set_values(int index, const std::string& value);
|
||||
void set_values(int index, std::string&& value);
|
||||
void set_values(int index, const char* value);
|
||||
void set_values(int index, const char* value, size_t size);
|
||||
std::string* add_values();
|
||||
void add_values(const std::string& value);
|
||||
void add_values(std::string&& value);
|
||||
void add_values(const char* value);
|
||||
void add_values(const char* value, size_t size);
|
||||
const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>& values() const;
|
||||
::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>* mutable_values();
|
||||
|
||||
// .milvus.proto.common.Status status = 1;
|
||||
bool has_status() const;
|
||||
void clear_status();
|
||||
const ::milvus::proto::common::Status& status() const;
|
||||
::milvus::proto::common::Status* release_status();
|
||||
::milvus::proto::common::Status* mutable_status();
|
||||
void set_allocated_status(::milvus::proto::common::Status* status);
|
||||
|
||||
// @@protoc_insertion_point(class_scope:milvus.proto.service.SysConfigResponse)
|
||||
private:
|
||||
class _Internal;
|
||||
|
||||
::PROTOBUF_NAMESPACE_ID::internal::InternalMetadataWithArena _internal_metadata_;
|
||||
::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string> keys_;
|
||||
::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string> values_;
|
||||
::milvus::proto::common::Status* status_;
|
||||
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
|
||||
friend struct ::TableStruct_service_5fmsg_2eproto;
|
||||
};
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
class Hits :
|
||||
public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.proto.service.Hits) */ {
|
||||
public:
|
||||
@ -2372,7 +2196,7 @@ class Hits :
|
||||
&_Hits_default_instance_);
|
||||
}
|
||||
static constexpr int kIndexInFileMessages =
|
||||
14;
|
||||
13;
|
||||
|
||||
friend void swap(Hits& a, Hits& b) {
|
||||
a.Swap(&b);
|
||||
@ -2543,7 +2367,7 @@ class QueryResult :
|
||||
&_QueryResult_default_instance_);
|
||||
}
|
||||
static constexpr int kIndexInFileMessages =
|
||||
15;
|
||||
14;
|
||||
|
||||
friend void swap(QueryResult& a, QueryResult& b) {
|
||||
a.Swap(&b);
|
||||
@ -4056,185 +3880,6 @@ PartitionDescription::statistics() const {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// SysConfigResponse
|
||||
|
||||
// .milvus.proto.common.Status status = 1;
|
||||
inline bool SysConfigResponse::has_status() const {
|
||||
return this != internal_default_instance() && status_ != nullptr;
|
||||
}
|
||||
inline const ::milvus::proto::common::Status& SysConfigResponse::status() const {
|
||||
const ::milvus::proto::common::Status* p = status_;
|
||||
// @@protoc_insertion_point(field_get:milvus.proto.service.SysConfigResponse.status)
|
||||
return p != nullptr ? *p : *reinterpret_cast<const ::milvus::proto::common::Status*>(
|
||||
&::milvus::proto::common::_Status_default_instance_);
|
||||
}
|
||||
inline ::milvus::proto::common::Status* SysConfigResponse::release_status() {
|
||||
// @@protoc_insertion_point(field_release:milvus.proto.service.SysConfigResponse.status)
|
||||
|
||||
::milvus::proto::common::Status* temp = status_;
|
||||
status_ = nullptr;
|
||||
return temp;
|
||||
}
|
||||
inline ::milvus::proto::common::Status* SysConfigResponse::mutable_status() {
|
||||
|
||||
if (status_ == nullptr) {
|
||||
auto* p = CreateMaybeMessage<::milvus::proto::common::Status>(GetArenaNoVirtual());
|
||||
status_ = p;
|
||||
}
|
||||
// @@protoc_insertion_point(field_mutable:milvus.proto.service.SysConfigResponse.status)
|
||||
return status_;
|
||||
}
|
||||
inline void SysConfigResponse::set_allocated_status(::milvus::proto::common::Status* status) {
|
||||
::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaNoVirtual();
|
||||
if (message_arena == nullptr) {
|
||||
delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(status_);
|
||||
}
|
||||
if (status) {
|
||||
::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = nullptr;
|
||||
if (message_arena != submessage_arena) {
|
||||
status = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
|
||||
message_arena, status, submessage_arena);
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
}
|
||||
status_ = status;
|
||||
// @@protoc_insertion_point(field_set_allocated:milvus.proto.service.SysConfigResponse.status)
|
||||
}
|
||||
|
||||
// repeated string keys = 2;
|
||||
inline int SysConfigResponse::keys_size() const {
|
||||
return keys_.size();
|
||||
}
|
||||
inline void SysConfigResponse::clear_keys() {
|
||||
keys_.Clear();
|
||||
}
|
||||
inline const std::string& SysConfigResponse::keys(int index) const {
|
||||
// @@protoc_insertion_point(field_get:milvus.proto.service.SysConfigResponse.keys)
|
||||
return keys_.Get(index);
|
||||
}
|
||||
inline std::string* SysConfigResponse::mutable_keys(int index) {
|
||||
// @@protoc_insertion_point(field_mutable:milvus.proto.service.SysConfigResponse.keys)
|
||||
return keys_.Mutable(index);
|
||||
}
|
||||
inline void SysConfigResponse::set_keys(int index, const std::string& value) {
|
||||
// @@protoc_insertion_point(field_set:milvus.proto.service.SysConfigResponse.keys)
|
||||
keys_.Mutable(index)->assign(value);
|
||||
}
|
||||
inline void SysConfigResponse::set_keys(int index, std::string&& value) {
|
||||
// @@protoc_insertion_point(field_set:milvus.proto.service.SysConfigResponse.keys)
|
||||
keys_.Mutable(index)->assign(std::move(value));
|
||||
}
|
||||
inline void SysConfigResponse::set_keys(int index, const char* value) {
|
||||
GOOGLE_DCHECK(value != nullptr);
|
||||
keys_.Mutable(index)->assign(value);
|
||||
// @@protoc_insertion_point(field_set_char:milvus.proto.service.SysConfigResponse.keys)
|
||||
}
|
||||
inline void SysConfigResponse::set_keys(int index, const char* value, size_t size) {
|
||||
keys_.Mutable(index)->assign(
|
||||
reinterpret_cast<const char*>(value), size);
|
||||
// @@protoc_insertion_point(field_set_pointer:milvus.proto.service.SysConfigResponse.keys)
|
||||
}
|
||||
inline std::string* SysConfigResponse::add_keys() {
|
||||
// @@protoc_insertion_point(field_add_mutable:milvus.proto.service.SysConfigResponse.keys)
|
||||
return keys_.Add();
|
||||
}
|
||||
inline void SysConfigResponse::add_keys(const std::string& value) {
|
||||
keys_.Add()->assign(value);
|
||||
// @@protoc_insertion_point(field_add:milvus.proto.service.SysConfigResponse.keys)
|
||||
}
|
||||
inline void SysConfigResponse::add_keys(std::string&& value) {
|
||||
keys_.Add(std::move(value));
|
||||
// @@protoc_insertion_point(field_add:milvus.proto.service.SysConfigResponse.keys)
|
||||
}
|
||||
inline void SysConfigResponse::add_keys(const char* value) {
|
||||
GOOGLE_DCHECK(value != nullptr);
|
||||
keys_.Add()->assign(value);
|
||||
// @@protoc_insertion_point(field_add_char:milvus.proto.service.SysConfigResponse.keys)
|
||||
}
|
||||
inline void SysConfigResponse::add_keys(const char* value, size_t size) {
|
||||
keys_.Add()->assign(reinterpret_cast<const char*>(value), size);
|
||||
// @@protoc_insertion_point(field_add_pointer:milvus.proto.service.SysConfigResponse.keys)
|
||||
}
|
||||
inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>&
|
||||
SysConfigResponse::keys() const {
|
||||
// @@protoc_insertion_point(field_list:milvus.proto.service.SysConfigResponse.keys)
|
||||
return keys_;
|
||||
}
|
||||
inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>*
|
||||
SysConfigResponse::mutable_keys() {
|
||||
// @@protoc_insertion_point(field_mutable_list:milvus.proto.service.SysConfigResponse.keys)
|
||||
return &keys_;
|
||||
}
|
||||
|
||||
// repeated string values = 3;
|
||||
inline int SysConfigResponse::values_size() const {
|
||||
return values_.size();
|
||||
}
|
||||
inline void SysConfigResponse::clear_values() {
|
||||
values_.Clear();
|
||||
}
|
||||
inline const std::string& SysConfigResponse::values(int index) const {
|
||||
// @@protoc_insertion_point(field_get:milvus.proto.service.SysConfigResponse.values)
|
||||
return values_.Get(index);
|
||||
}
|
||||
inline std::string* SysConfigResponse::mutable_values(int index) {
|
||||
// @@protoc_insertion_point(field_mutable:milvus.proto.service.SysConfigResponse.values)
|
||||
return values_.Mutable(index);
|
||||
}
|
||||
inline void SysConfigResponse::set_values(int index, const std::string& value) {
|
||||
// @@protoc_insertion_point(field_set:milvus.proto.service.SysConfigResponse.values)
|
||||
values_.Mutable(index)->assign(value);
|
||||
}
|
||||
inline void SysConfigResponse::set_values(int index, std::string&& value) {
|
||||
// @@protoc_insertion_point(field_set:milvus.proto.service.SysConfigResponse.values)
|
||||
values_.Mutable(index)->assign(std::move(value));
|
||||
}
|
||||
inline void SysConfigResponse::set_values(int index, const char* value) {
|
||||
GOOGLE_DCHECK(value != nullptr);
|
||||
values_.Mutable(index)->assign(value);
|
||||
// @@protoc_insertion_point(field_set_char:milvus.proto.service.SysConfigResponse.values)
|
||||
}
|
||||
inline void SysConfigResponse::set_values(int index, const char* value, size_t size) {
|
||||
values_.Mutable(index)->assign(
|
||||
reinterpret_cast<const char*>(value), size);
|
||||
// @@protoc_insertion_point(field_set_pointer:milvus.proto.service.SysConfigResponse.values)
|
||||
}
|
||||
inline std::string* SysConfigResponse::add_values() {
|
||||
// @@protoc_insertion_point(field_add_mutable:milvus.proto.service.SysConfigResponse.values)
|
||||
return values_.Add();
|
||||
}
|
||||
inline void SysConfigResponse::add_values(const std::string& value) {
|
||||
values_.Add()->assign(value);
|
||||
// @@protoc_insertion_point(field_add:milvus.proto.service.SysConfigResponse.values)
|
||||
}
|
||||
inline void SysConfigResponse::add_values(std::string&& value) {
|
||||
values_.Add(std::move(value));
|
||||
// @@protoc_insertion_point(field_add:milvus.proto.service.SysConfigResponse.values)
|
||||
}
|
||||
inline void SysConfigResponse::add_values(const char* value) {
|
||||
GOOGLE_DCHECK(value != nullptr);
|
||||
values_.Add()->assign(value);
|
||||
// @@protoc_insertion_point(field_add_char:milvus.proto.service.SysConfigResponse.values)
|
||||
}
|
||||
inline void SysConfigResponse::add_values(const char* value, size_t size) {
|
||||
values_.Add()->assign(reinterpret_cast<const char*>(value), size);
|
||||
// @@protoc_insertion_point(field_add_pointer:milvus.proto.service.SysConfigResponse.values)
|
||||
}
|
||||
inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>&
|
||||
SysConfigResponse::values() const {
|
||||
// @@protoc_insertion_point(field_list:milvus.proto.service.SysConfigResponse.values)
|
||||
return values_;
|
||||
}
|
||||
inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>*
|
||||
SysConfigResponse::mutable_values() {
|
||||
// @@protoc_insertion_point(field_mutable_list:milvus.proto.service.SysConfigResponse.values)
|
||||
return &values_;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// Hits
|
||||
|
||||
// repeated int64 IDs = 1;
|
||||
@ -4507,8 +4152,6 @@ QueryResult::mutable_hits() {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
|
||||
// @@protoc_insertion_point(namespace_scope)
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
package etcdkv
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
@ -1,11 +1,11 @@
|
||||
package etcdkv_test
|
||||
package kv_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
"github.com/zilliztech/milvus-distributed/internal/kv"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/paramtable"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
)
|
||||
@ -28,7 +28,7 @@ func TestEtcdKV_Load(t *testing.T) {
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
|
||||
assert.Nil(t, err)
|
||||
rootPath := "/etcd/test/root"
|
||||
etcdKV := etcdkv.NewEtcdKV(cli, rootPath)
|
||||
etcdKV := kv.NewEtcdKV(cli, rootPath)
|
||||
|
||||
defer etcdKV.Close()
|
||||
defer etcdKV.RemoveWithPrefix("")
|
||||
@ -86,7 +86,7 @@ func TestEtcdKV_MultiSave(t *testing.T) {
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
|
||||
assert.Nil(t, err)
|
||||
rootPath := "/etcd/test/root"
|
||||
etcdKV := etcdkv.NewEtcdKV(cli, rootPath)
|
||||
etcdKV := kv.NewEtcdKV(cli, rootPath)
|
||||
|
||||
defer etcdKV.Close()
|
||||
defer etcdKV.RemoveWithPrefix("")
|
||||
@ -117,7 +117,7 @@ func TestEtcdKV_Remove(t *testing.T) {
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
|
||||
assert.Nil(t, err)
|
||||
rootPath := "/etcd/test/root"
|
||||
etcdKV := etcdkv.NewEtcdKV(cli, rootPath)
|
||||
etcdKV := kv.NewEtcdKV(cli, rootPath)
|
||||
|
||||
defer etcdKV.Close()
|
||||
defer etcdKV.RemoveWithPrefix("")
|
||||
@ -188,7 +188,7 @@ func TestEtcdKV_MultiSaveAndRemove(t *testing.T) {
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
|
||||
assert.Nil(t, err)
|
||||
rootPath := "/etcd/test/root"
|
||||
etcdKV := etcdkv.NewEtcdKV(cli, rootPath)
|
||||
etcdKV := kv.NewEtcdKV(cli, rootPath)
|
||||
|
||||
defer etcdKV.Close()
|
||||
defer etcdKV.RemoveWithPrefix("")
|
@ -8,11 +8,6 @@ type Base interface {
|
||||
MultiSave(kvs map[string]string) error
|
||||
Remove(key string) error
|
||||
MultiRemove(keys []string) error
|
||||
|
||||
MultiSaveAndRemove(saves map[string]string, removals []string) error
|
||||
Close()
|
||||
}
|
||||
|
||||
type TxnBase interface {
|
||||
Base
|
||||
MultiSaveAndRemove(saves map[string]string, removals []string) error
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
package memkv
|
||||
package kv
|
||||
|
||||
import (
|
||||
"sync"
|
@ -1,149 +0,0 @@
|
||||
package miniokv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio-go/v7"
|
||||
)
|
||||
|
||||
type MinIOKV struct {
|
||||
ctx context.Context
|
||||
minioClient *minio.Client
|
||||
bucketName string
|
||||
}
|
||||
|
||||
// NewMinIOKV creates a new MinIO kv.
|
||||
func NewMinIOKV(ctx context.Context, client *minio.Client, bucketName string) (*MinIOKV, error) {
|
||||
|
||||
bucketExists, err := client.BucketExists(ctx, bucketName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !bucketExists {
|
||||
err = client.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &MinIOKV{
|
||||
ctx: ctx,
|
||||
minioClient: client,
|
||||
bucketName: bucketName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (kv *MinIOKV) LoadWithPrefix(key string) ([]string, []string, error) {
|
||||
objects := kv.minioClient.ListObjects(kv.ctx, kv.bucketName, minio.ListObjectsOptions{Prefix: key})
|
||||
|
||||
var objectsKeys []string
|
||||
var objectsValues []string
|
||||
|
||||
for object := range objects {
|
||||
objectsKeys = append(objectsKeys, object.Key)
|
||||
}
|
||||
objectsValues, err := kv.MultiLoad(objectsKeys)
|
||||
if err != nil {
|
||||
log.Printf("cannot load value with prefix:%s", key)
|
||||
}
|
||||
|
||||
return objectsKeys, objectsValues, nil
|
||||
}
|
||||
|
||||
func (kv *MinIOKV) Load(key string) (string, error) {
|
||||
object, err := kv.minioClient.GetObject(kv.ctx, kv.bucketName, key, minio.GetObjectOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
buf := new(strings.Builder)
|
||||
_, err = io.Copy(buf, object)
|
||||
if err != nil && err != io.EOF {
|
||||
return "", err
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
func (kv *MinIOKV) MultiLoad(keys []string) ([]string, error) {
|
||||
var resultErr error
|
||||
var objectsValues []string
|
||||
for _, key := range keys {
|
||||
objectValue, err := kv.Load(key)
|
||||
if err != nil {
|
||||
if resultErr == nil {
|
||||
resultErr = err
|
||||
}
|
||||
}
|
||||
objectsValues = append(objectsValues, objectValue)
|
||||
}
|
||||
|
||||
return objectsValues, resultErr
|
||||
}
|
||||
|
||||
func (kv *MinIOKV) Save(key, value string) error {
|
||||
reader := strings.NewReader(value)
|
||||
_, err := kv.minioClient.PutObject(kv.ctx, kv.bucketName, key, reader, int64(len(value)), minio.PutObjectOptions{})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (kv *MinIOKV) MultiSave(kvs map[string]string) error {
|
||||
var resultErr error
|
||||
for key, value := range kvs {
|
||||
err := kv.Save(key, value)
|
||||
if err != nil {
|
||||
if resultErr == nil {
|
||||
resultErr = err
|
||||
}
|
||||
}
|
||||
}
|
||||
return resultErr
|
||||
}
|
||||
|
||||
func (kv *MinIOKV) RemoveWithPrefix(prefix string) error {
|
||||
objectsCh := make(chan minio.ObjectInfo)
|
||||
|
||||
go func() {
|
||||
defer close(objectsCh)
|
||||
|
||||
for object := range kv.minioClient.ListObjects(kv.ctx, kv.bucketName, minio.ListObjectsOptions{Prefix: prefix}) {
|
||||
objectsCh <- object
|
||||
}
|
||||
}()
|
||||
|
||||
for rErr := range kv.minioClient.RemoveObjects(kv.ctx, kv.bucketName, objectsCh, minio.RemoveObjectsOptions{GovernanceBypass: true}) {
|
||||
if rErr.Err != nil {
|
||||
return rErr.Err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (kv *MinIOKV) Remove(key string) error {
|
||||
err := kv.minioClient.RemoveObject(kv.ctx, kv.bucketName, string(key), minio.RemoveObjectOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
func (kv *MinIOKV) MultiRemove(keys []string) error {
|
||||
var resultErr error
|
||||
for _, key := range keys {
|
||||
err := kv.Remove(key)
|
||||
if err != nil {
|
||||
if resultErr == nil {
|
||||
resultErr = err
|
||||
}
|
||||
}
|
||||
}
|
||||
return resultErr
|
||||
}
|
||||
|
||||
func (kv *MinIOKV) Close() {
|
||||
|
||||
}
|
@ -1,195 +0,0 @@
|
||||
package miniokv_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
miniokv "github.com/zilliztech/milvus-distributed/internal/kv/minio"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/paramtable"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var Params paramtable.BaseTable
|
||||
|
||||
func TestMinIOKV_Load(t *testing.T) {
|
||||
Params.Init()
|
||||
endPoint, _ := Params.Load("_MinioAddress")
|
||||
accessKeyID, _ := Params.Load("minio.accessKeyID")
|
||||
secretAccessKey, _ := Params.Load("minio.secretAccessKey")
|
||||
useSSLStr, _ := Params.Load("minio.useSSL")
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
useSSL, _ := strconv.ParseBool(useSSLStr)
|
||||
|
||||
minioClient, err := minio.New(endPoint, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""),
|
||||
Secure: useSSL,
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
|
||||
bucketName := "fantastic-tech-test"
|
||||
MinIOKV, err := miniokv.NewMinIOKV(ctx, minioClient, bucketName)
|
||||
assert.Nil(t, err)
|
||||
defer MinIOKV.RemoveWithPrefix("")
|
||||
|
||||
err = MinIOKV.Save("abc", "123")
|
||||
assert.Nil(t, err)
|
||||
err = MinIOKV.Save("abcd", "1234")
|
||||
assert.Nil(t, err)
|
||||
|
||||
val, err := MinIOKV.Load("abc")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, val, "123")
|
||||
|
||||
keys, vals, err := MinIOKV.LoadWithPrefix("abc")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(keys), len(vals))
|
||||
assert.Equal(t, len(keys), 2)
|
||||
|
||||
assert.Equal(t, vals[0], "123")
|
||||
assert.Equal(t, vals[1], "1234")
|
||||
|
||||
err = MinIOKV.Save("key_1", "123")
|
||||
assert.Nil(t, err)
|
||||
err = MinIOKV.Save("key_2", "456")
|
||||
assert.Nil(t, err)
|
||||
err = MinIOKV.Save("key_3", "789")
|
||||
assert.Nil(t, err)
|
||||
|
||||
keys = []string{"key_1", "key_100"}
|
||||
|
||||
vals, err = MinIOKV.MultiLoad(keys)
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, len(vals), len(keys))
|
||||
assert.Equal(t, vals[0], "123")
|
||||
assert.Empty(t, vals[1])
|
||||
|
||||
keys = []string{"key_1", "key_2"}
|
||||
|
||||
vals, err = MinIOKV.MultiLoad(keys)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(vals), len(keys))
|
||||
assert.Equal(t, vals[0], "123")
|
||||
assert.Equal(t, vals[1], "456")
|
||||
|
||||
}
|
||||
|
||||
func TestMinIOKV_MultiSave(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
Params.Init()
|
||||
endPoint, _ := Params.Load("_MinioAddress")
|
||||
accessKeyID, _ := Params.Load("minio.accessKeyID")
|
||||
secretAccessKey, _ := Params.Load("minio.secretAccessKey")
|
||||
useSSLStr, _ := Params.Load("minio.useSSL")
|
||||
useSSL, _ := strconv.ParseBool(useSSLStr)
|
||||
|
||||
minioClient, err := minio.New(endPoint, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""),
|
||||
Secure: useSSL,
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
|
||||
bucketName := "fantastic-tech-test"
|
||||
MinIOKV, err := miniokv.NewMinIOKV(ctx, minioClient, bucketName)
|
||||
assert.Nil(t, err)
|
||||
defer MinIOKV.RemoveWithPrefix("")
|
||||
|
||||
err = MinIOKV.Save("key_1", "111")
|
||||
assert.Nil(t, err)
|
||||
|
||||
kvs := map[string]string{
|
||||
"key_1": "123",
|
||||
"key_2": "456",
|
||||
}
|
||||
|
||||
err = MinIOKV.MultiSave(kvs)
|
||||
assert.Nil(t, err)
|
||||
|
||||
val, err := MinIOKV.Load("key_1")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, val, "123")
|
||||
}
|
||||
|
||||
func TestMinIOKV_Remove(t *testing.T) {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
Params.Init()
|
||||
endPoint, _ := Params.Load("_MinioAddress")
|
||||
accessKeyID, _ := Params.Load("minio.accessKeyID")
|
||||
secretAccessKey, _ := Params.Load("minio.secretAccessKey")
|
||||
useSSLStr, _ := Params.Load("minio.useSSL")
|
||||
useSSL, _ := strconv.ParseBool(useSSLStr)
|
||||
|
||||
minioClient, err := minio.New(endPoint, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""),
|
||||
Secure: useSSL,
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
|
||||
bucketName := "fantastic-tech-test"
|
||||
MinIOKV, err := miniokv.NewMinIOKV(ctx, minioClient, bucketName)
|
||||
assert.Nil(t, err)
|
||||
defer MinIOKV.RemoveWithPrefix("")
|
||||
|
||||
err = MinIOKV.Save("key_1", "123")
|
||||
assert.Nil(t, err)
|
||||
err = MinIOKV.Save("key_2", "456")
|
||||
assert.Nil(t, err)
|
||||
|
||||
val, err := MinIOKV.Load("key_1")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, val, "123")
|
||||
// delete "key_1"
|
||||
err = MinIOKV.Remove("key_1")
|
||||
assert.Nil(t, err)
|
||||
val, err = MinIOKV.Load("key_1")
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, val)
|
||||
|
||||
val, err = MinIOKV.Load("key_2")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, val, "456")
|
||||
|
||||
keys, vals, err := MinIOKV.LoadWithPrefix("key")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(keys), len(vals))
|
||||
assert.Equal(t, len(keys), 1)
|
||||
|
||||
assert.Equal(t, vals[0], "456")
|
||||
|
||||
// MultiRemove
|
||||
err = MinIOKV.Save("key_1", "111")
|
||||
assert.Nil(t, err)
|
||||
|
||||
kvs := map[string]string{
|
||||
"key_1": "123",
|
||||
"key_2": "456",
|
||||
"key_3": "789",
|
||||
"key_4": "012",
|
||||
}
|
||||
|
||||
err = MinIOKV.MultiSave(kvs)
|
||||
assert.Nil(t, err)
|
||||
val, err = MinIOKV.Load("key_1")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, val, "123")
|
||||
val, err = MinIOKV.Load("key_3")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, val, "789")
|
||||
|
||||
keys = []string{"key_1", "key_2", "key_3"}
|
||||
err = MinIOKV.MultiRemove(keys)
|
||||
assert.Nil(t, err)
|
||||
|
||||
val, err = MinIOKV.Load("key_1")
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, val)
|
||||
}
|
@ -1,14 +0,0 @@
|
||||
package mockkv
|
||||
|
||||
import (
|
||||
memkv "github.com/zilliztech/milvus-distributed/internal/kv/mem"
|
||||
)
|
||||
|
||||
// use MemoryKV to mock EtcdKV
|
||||
func NewEtcdKV() *memkv.MemoryKV {
|
||||
return memkv.NewMemoryKV()
|
||||
}
|
||||
|
||||
func NewMemoryKV() *memkv.MemoryKV {
|
||||
return memkv.NewMemoryKV()
|
||||
}
|
@ -5,7 +5,6 @@ import (
|
||||
"log"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
@ -86,23 +85,8 @@ func (t *createCollectionTask) Execute() error {
|
||||
// TODO: initial partition?
|
||||
PartitionTags: make([]string, 0),
|
||||
}
|
||||
err = t.mt.AddCollection(&collection)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgPack := ms.MsgPack{}
|
||||
baseMsg := ms.BaseMsg{
|
||||
BeginTimestamp: t.req.Timestamp,
|
||||
EndTimestamp: t.req.Timestamp,
|
||||
HashValues: []uint32{0},
|
||||
}
|
||||
timeTickMsg := &ms.CreateCollectionMsg{
|
||||
BaseMsg: baseMsg,
|
||||
CreateCollectionRequest: *t.req,
|
||||
}
|
||||
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
|
||||
return t.sch.ddMsgStream.Broadcast(&msgPack)
|
||||
return t.mt.AddCollection(&collection)
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
@ -118,7 +102,7 @@ func (t *dropCollectionTask) Ts() (Timestamp, error) {
|
||||
if t.req == nil {
|
||||
return 0, errors.New("null request")
|
||||
}
|
||||
return t.req.Timestamp, nil
|
||||
return Timestamp(t.req.Timestamp), nil
|
||||
}
|
||||
|
||||
func (t *dropCollectionTask) Execute() error {
|
||||
@ -134,29 +118,7 @@ func (t *dropCollectionTask) Execute() error {
|
||||
|
||||
collectionID := collectionMeta.ID
|
||||
|
||||
err = t.mt.DeleteCollection(collectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ts, err := t.Ts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgPack := ms.MsgPack{}
|
||||
baseMsg := ms.BaseMsg{
|
||||
BeginTimestamp: ts,
|
||||
EndTimestamp: ts,
|
||||
HashValues: []uint32{0},
|
||||
}
|
||||
timeTickMsg := &ms.DropCollectionMsg{
|
||||
BaseMsg: baseMsg,
|
||||
DropCollectionRequest: *t.req,
|
||||
}
|
||||
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
|
||||
return t.sch.ddMsgStream.Broadcast(&msgPack)
|
||||
|
||||
return t.mt.DeleteCollection(collectionID)
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
@ -172,7 +134,7 @@ func (t *hasCollectionTask) Ts() (Timestamp, error) {
|
||||
if t.req == nil {
|
||||
return 0, errors.New("null request")
|
||||
}
|
||||
return t.req.Timestamp, nil
|
||||
return Timestamp(t.req.Timestamp), nil
|
||||
}
|
||||
|
||||
func (t *hasCollectionTask) Execute() error {
|
||||
@ -185,8 +147,8 @@ func (t *hasCollectionTask) Execute() error {
|
||||
if err == nil {
|
||||
t.hasCollection = true
|
||||
}
|
||||
return nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
@ -219,7 +181,6 @@ func (t *describeCollectionTask) Execute() error {
|
||||
t.description.Schema = collection.Schema
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
|
@ -55,7 +55,6 @@ func TestMaster_CollectionTask(t *testing.T) {
|
||||
// msgChannel
|
||||
ProxyTimeTickChannelNames: []string{"proxy1", "proxy2"},
|
||||
WriteNodeTimeTickChannelNames: []string{"write3", "write4"},
|
||||
DDChannelNames: []string{"dd1", "dd2"},
|
||||
InsertChannelNames: []string{"dm0", "dm1"},
|
||||
K2SChannelNames: []string{"k2s0", "k2s1"},
|
||||
QueryNodeStatsChannelName: "statistic",
|
||||
|
@ -1,79 +0,0 @@
|
||||
package master
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
||||
)
|
||||
|
||||
type getSysConfigsTask struct {
|
||||
baseTask
|
||||
configkv *etcdkv.EtcdKV
|
||||
req *internalpb.SysConfigRequest
|
||||
keys []string
|
||||
values []string
|
||||
}
|
||||
|
||||
func (t *getSysConfigsTask) Type() internalpb.MsgType {
|
||||
if t.req == nil {
|
||||
log.Printf("null request")
|
||||
return 0
|
||||
}
|
||||
return t.req.MsgType
|
||||
}
|
||||
|
||||
func (t *getSysConfigsTask) Ts() (Timestamp, error) {
|
||||
if t.req == nil {
|
||||
return 0, errors.New("null request")
|
||||
}
|
||||
return t.req.Timestamp, nil
|
||||
}
|
||||
|
||||
func (t *getSysConfigsTask) Execute() error {
|
||||
if t.req == nil {
|
||||
return errors.New("null request")
|
||||
}
|
||||
|
||||
sc := &SysConfig{kv: t.configkv}
|
||||
keyMap := make(map[string]bool)
|
||||
|
||||
// Load configs with prefix
|
||||
for _, prefix := range t.req.KeyPrefixes {
|
||||
prefixKeys, prefixVals, err := sc.GetByPrefix(prefix)
|
||||
if err != nil {
|
||||
return errors.Errorf("Load configs by prefix wrong: %s", err.Error())
|
||||
}
|
||||
t.keys = append(t.keys, prefixKeys...)
|
||||
t.values = append(t.values, prefixVals...)
|
||||
}
|
||||
|
||||
for _, key := range t.keys {
|
||||
keyMap[key] = true
|
||||
}
|
||||
|
||||
// Load specific configs
|
||||
if len(t.req.Keys) > 0 {
|
||||
// To clean up duplicated keys
|
||||
cleanKeys := []string{}
|
||||
for _, key := range t.req.Keys {
|
||||
if v, ok := keyMap[key]; (!ok) || (ok && !v) {
|
||||
cleanKeys = append(cleanKeys, key)
|
||||
keyMap[key] = true
|
||||
continue
|
||||
}
|
||||
log.Println("[GetSysConfigs] Warning: duplicate key:", key)
|
||||
}
|
||||
|
||||
v, err := sc.Get(cleanKeys)
|
||||
if err != nil {
|
||||
return errors.Errorf("Load configs wrong: %s", err.Error())
|
||||
}
|
||||
|
||||
t.keys = append(t.keys, cleanKeys...)
|
||||
t.values = append(t.values, v...)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,150 +0,0 @@
|
||||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestMaster_ConfigTask(t *testing.T) {
|
||||
Init()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
|
||||
etcdCli, err := clientv3.New(clientv3.Config{Endpoints: []string{Params.EtcdAddress}})
|
||||
require.Nil(t, err)
|
||||
_, err = etcdCli.Delete(ctx, "/test/root", clientv3.WithPrefix())
|
||||
require.Nil(t, err)
|
||||
|
||||
Params = ParamTable{
|
||||
Address: Params.Address,
|
||||
Port: Params.Port,
|
||||
|
||||
EtcdAddress: Params.EtcdAddress,
|
||||
MetaRootPath: "/test/root",
|
||||
PulsarAddress: Params.PulsarAddress,
|
||||
|
||||
ProxyIDList: []typeutil.UniqueID{1, 2},
|
||||
WriteNodeIDList: []typeutil.UniqueID{3, 4},
|
||||
|
||||
TopicNum: 5,
|
||||
QueryNodeNum: 3,
|
||||
SoftTimeTickBarrierInterval: 300,
|
||||
|
||||
// segment
|
||||
SegmentSize: 536870912 / 1024 / 1024,
|
||||
SegmentSizeFactor: 0.75,
|
||||
DefaultRecordSize: 1024,
|
||||
MinSegIDAssignCnt: 1048576 / 1024,
|
||||
MaxSegIDAssignCnt: Params.MaxSegIDAssignCnt,
|
||||
SegIDAssignExpiration: 2000,
|
||||
|
||||
// msgChannel
|
||||
ProxyTimeTickChannelNames: []string{"proxy1", "proxy2"},
|
||||
WriteNodeTimeTickChannelNames: []string{"write3", "write4"},
|
||||
InsertChannelNames: []string{"dm0", "dm1"},
|
||||
K2SChannelNames: []string{"k2s0", "k2s1"},
|
||||
QueryNodeStatsChannelName: "statistic",
|
||||
MsgChannelSubName: Params.MsgChannelSubName,
|
||||
}
|
||||
|
||||
svr, err := CreateServer(ctx)
|
||||
require.Nil(t, err)
|
||||
err = svr.Run(10002)
|
||||
defer svr.Close()
|
||||
require.Nil(t, err)
|
||||
|
||||
conn, err := grpc.DialContext(ctx, "127.0.0.1:10002", grpc.WithInsecure(), grpc.WithBlock())
|
||||
require.Nil(t, err)
|
||||
defer conn.Close()
|
||||
|
||||
cli := masterpb.NewMasterClient(conn)
|
||||
testKeys := []string{
|
||||
"/etcd/address",
|
||||
"/master/port",
|
||||
"/master/proxyidlist",
|
||||
"/master/segmentthresholdfactor",
|
||||
"/pulsar/token",
|
||||
"/reader/stopflag",
|
||||
"/proxy/timezone",
|
||||
"/proxy/network/address",
|
||||
"/proxy/storage/path",
|
||||
"/storage/accesskey",
|
||||
}
|
||||
|
||||
testVals := []string{
|
||||
"localhost",
|
||||
"53100",
|
||||
"[1 2]",
|
||||
"0.75",
|
||||
"eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJKb2UifQ.ipevRNuRP6HflG8cFKnmUPtypruRC4fb1DWtoLL62SY",
|
||||
"-1",
|
||||
"UTC+8",
|
||||
"0.0.0.0",
|
||||
"/var/lib/milvus",
|
||||
"",
|
||||
}
|
||||
|
||||
sc := SysConfig{kv: svr.kvBase}
|
||||
sc.InitFromFile(".")
|
||||
|
||||
configRequest := &internalpb.SysConfigRequest{
|
||||
MsgType: internalpb.MsgType_kGetSysConfigs,
|
||||
ReqID: 1,
|
||||
Timestamp: 11,
|
||||
ProxyID: 1,
|
||||
Keys: testKeys,
|
||||
KeyPrefixes: []string{},
|
||||
}
|
||||
|
||||
response, err := cli.GetSysConfigs(ctx, configRequest)
|
||||
assert.Nil(t, err)
|
||||
assert.ElementsMatch(t, testKeys, response.Keys)
|
||||
assert.ElementsMatch(t, testVals, response.Values)
|
||||
assert.Equal(t, len(response.GetKeys()), len(response.GetValues()))
|
||||
|
||||
configRequest = &internalpb.SysConfigRequest{
|
||||
MsgType: internalpb.MsgType_kGetSysConfigs,
|
||||
ReqID: 1,
|
||||
Timestamp: 11,
|
||||
ProxyID: 1,
|
||||
Keys: []string{},
|
||||
KeyPrefixes: []string{"/master"},
|
||||
}
|
||||
|
||||
response, err = cli.GetSysConfigs(ctx, configRequest)
|
||||
assert.Nil(t, err)
|
||||
for i := range response.GetKeys() {
|
||||
assert.True(t, strings.HasPrefix(response.GetKeys()[i], "/master"))
|
||||
}
|
||||
assert.Equal(t, len(response.GetKeys()), len(response.GetValues()))
|
||||
|
||||
t.Run("Test duplicate keys and key prefix", func(t *testing.T) {
|
||||
configRequest.Keys = []string{}
|
||||
configRequest.KeyPrefixes = []string{"/master"}
|
||||
|
||||
resp, err := cli.GetSysConfigs(ctx, configRequest)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, len(resp.GetKeys()), len(resp.GetValues()))
|
||||
assert.NotEqual(t, 0, len(resp.GetKeys()))
|
||||
|
||||
configRequest.Keys = []string{"/master/port"}
|
||||
configRequest.KeyPrefixes = []string{"/master"}
|
||||
|
||||
respDup, err := cli.GetSysConfigs(ctx, configRequest)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, len(respDup.GetKeys()), len(respDup.GetValues()))
|
||||
assert.NotEqual(t, 0, len(respDup.GetKeys()))
|
||||
assert.Equal(t, len(respDup.GetKeys()), len(resp.GetKeys()))
|
||||
})
|
||||
|
||||
}
|
@ -1,111 +0,0 @@
|
||||
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
||||
# or implied. See the License for the specific language governing permissions and limitations under the License.
|
||||
|
||||
master: # 21
|
||||
address: localhost
|
||||
port: 53100
|
||||
pulsarmoniterinterval: 1
|
||||
pulsartopic: "monitor-topic"
|
||||
|
||||
proxyidlist: [1, 2]
|
||||
proxyTimeSyncChannels: ["proxy1", "proxy2"]
|
||||
proxyTimeSyncSubName: "proxy-topic"
|
||||
softTimeTickBarrierInterval: 500
|
||||
|
||||
writeidlist: [3, 4]
|
||||
writeTimeSyncChannels: ["write3", "write4"]
|
||||
writeTimeSyncSubName: "write-topic"
|
||||
|
||||
dmTimeSyncChannels: ["dm5", "dm6"]
|
||||
k2sTimeSyncChannels: ["k2s7", "k2s8"]
|
||||
|
||||
defaultSizePerRecord: 1024
|
||||
minimumAssignSize: 1048576
|
||||
segmentThreshold: 536870912
|
||||
segmentExpireDuration: 2000
|
||||
segmentThresholdFactor: 0.75
|
||||
querynodenum: 1
|
||||
writenodenum: 1
|
||||
statsChannels: "statistic"
|
||||
|
||||
etcd: # 4
|
||||
address: localhost
|
||||
port: 2379
|
||||
rootpath: by-dev
|
||||
segthreshold: 10000
|
||||
|
||||
timesync: # 1
|
||||
interval: 400
|
||||
|
||||
storage: # 5
|
||||
driver: TIKV
|
||||
address: localhost
|
||||
port: 2379
|
||||
accesskey:
|
||||
secretkey:
|
||||
|
||||
pulsar: # 6
|
||||
authentication: false
|
||||
user: user-default
|
||||
token: eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJKb2UifQ.ipevRNuRP6HflG8cFKnmUPtypruRC4fb1DWtoLL62SY
|
||||
address: localhost
|
||||
port: 6650
|
||||
topicnum: 128
|
||||
|
||||
reader: # 7
|
||||
clientid: 0
|
||||
stopflag: -1
|
||||
readerqueuesize: 10000
|
||||
searchchansize: 10000
|
||||
key2segchansize: 10000
|
||||
topicstart: 0
|
||||
topicend: 128
|
||||
|
||||
writer: # 8
|
||||
clientid: 0
|
||||
stopflag: -2
|
||||
readerqueuesize: 10000
|
||||
searchbyidchansize: 10000
|
||||
parallelism: 100
|
||||
topicstart: 0
|
||||
topicend: 128
|
||||
bucket: "zilliz-hz"
|
||||
|
||||
proxy: # 21
|
||||
timezone: UTC+8
|
||||
proxy_id: 1
|
||||
numReaderNodes: 2
|
||||
tsoSaveInterval: 200
|
||||
timeTickInterval: 200
|
||||
|
||||
pulsarTopics:
|
||||
readerTopicPrefix: "milvusReader"
|
||||
numReaderTopics: 2
|
||||
deleteTopic: "milvusDeleter"
|
||||
queryTopic: "milvusQuery"
|
||||
resultTopic: "milvusResult"
|
||||
resultGroup: "milvusResultGroup"
|
||||
timeTickTopic: "milvusTimeTick"
|
||||
|
||||
network:
|
||||
address: 0.0.0.0
|
||||
port: 19530
|
||||
|
||||
logs:
|
||||
level: debug
|
||||
trace.enable: true
|
||||
path: /tmp/logs
|
||||
max_log_file_size: 1024MB
|
||||
log_rotate_num: 0
|
||||
|
||||
storage:
|
||||
path: /var/lib/milvus
|
||||
auto_flush_interval: 1
|
@ -36,7 +36,7 @@ type GlobalTSOAllocator struct {
|
||||
}
|
||||
|
||||
// NewGlobalTSOAllocator creates a new global TSO allocator.
|
||||
func NewGlobalTSOAllocator(key string, kvBase kv.TxnBase) *GlobalTSOAllocator {
|
||||
func NewGlobalTSOAllocator(key string, kvBase kv.Base) *GlobalTSOAllocator {
|
||||
var saveInterval = 3 * time.Second
|
||||
return &GlobalTSOAllocator{
|
||||
tso: ×tampOracle{
|
||||
|
@ -359,43 +359,6 @@ func (s *Master) ShowPartitions(ctx context.Context, in *internalpb.ShowPartitio
|
||||
return t.(*showPartitionTask).stringListResponse, nil
|
||||
}
|
||||
|
||||
func (s *Master) GetSysConfigs(ctx context.Context, in *internalpb.SysConfigRequest) (*servicepb.SysConfigResponse, error) {
|
||||
var t task = &getSysConfigsTask{
|
||||
req: in,
|
||||
configkv: s.kvBase,
|
||||
baseTask: baseTask{
|
||||
sch: s.scheduler,
|
||||
mt: s.metaTable,
|
||||
cv: make(chan error),
|
||||
},
|
||||
keys: []string{},
|
||||
values: []string{},
|
||||
}
|
||||
|
||||
response := &servicepb.SysConfigResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
},
|
||||
}
|
||||
|
||||
var err = s.scheduler.Enqueue(t)
|
||||
if err != nil {
|
||||
response.Status.Reason = "Enqueue failed: " + err.Error()
|
||||
return response, nil
|
||||
}
|
||||
|
||||
err = t.WaitToFinish(ctx)
|
||||
if err != nil {
|
||||
response.Status.Reason = "Get System Config failed: " + err.Error()
|
||||
return response, nil
|
||||
}
|
||||
|
||||
response.Keys = t.(*getSysConfigsTask).keys
|
||||
response.Values = t.(*getSysConfigsTask).values
|
||||
response.Status.ErrorCode = commonpb.ErrorCode_SUCCESS
|
||||
return response, nil
|
||||
}
|
||||
|
||||
//----------------------------------------Internal GRPC Service--------------------------------
|
||||
|
||||
func (s *Master) AllocTimestamp(ctx context.Context, request *internalpb.TsoRequest) (*internalpb.TsoResponse, error) {
|
||||
|
@ -9,7 +9,7 @@ type GlobalIDAllocator struct {
|
||||
allocator Allocator
|
||||
}
|
||||
|
||||
func NewGlobalIDAllocator(key string, base kv.TxnBase) *GlobalIDAllocator {
|
||||
func NewGlobalIDAllocator(key string, base kv.Base) *GlobalIDAllocator {
|
||||
return &GlobalIDAllocator{
|
||||
allocator: NewGlobalTSOAllocator(key, base),
|
||||
}
|
||||
|
@ -10,7 +10,8 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
"github.com/zilliztech/milvus-distributed/internal/kv"
|
||||
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
|
||||
@ -42,7 +43,7 @@ type Master struct {
|
||||
grpcServer *grpc.Server
|
||||
grpcErr chan error
|
||||
|
||||
kvBase *etcdkv.EtcdKV
|
||||
kvBase *kv.EtcdKV
|
||||
scheduler *ddRequestScheduler
|
||||
metaTable *metaTable
|
||||
timesSyncMsgProducer *timeSyncMsgProducer
|
||||
@ -63,12 +64,12 @@ type Master struct {
|
||||
tsoAllocator *GlobalTSOAllocator
|
||||
}
|
||||
|
||||
func newKVBase(kvRoot string, etcdAddr []string) *etcdkv.EtcdKV {
|
||||
func newKVBase(kvRoot string, etcdAddr []string) *kv.EtcdKV {
|
||||
cli, _ := clientv3.New(clientv3.Config{
|
||||
Endpoints: etcdAddr,
|
||||
DialTimeout: 5 * time.Second,
|
||||
})
|
||||
kvBase := etcdkv.NewEtcdKV(cli, kvRoot)
|
||||
kvBase := kv.NewEtcdKV(cli, kvRoot)
|
||||
return kvBase
|
||||
}
|
||||
|
||||
@ -89,8 +90,8 @@ func CreateServer(ctx context.Context) (*Master, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
etcdKV := etcdkv.NewEtcdKV(etcdClient, metaRootPath)
|
||||
metakv, err := NewMetaTable(etcdKV)
|
||||
etcdkv := kv.NewEtcdKV(etcdClient, metaRootPath)
|
||||
metakv, err := NewMetaTable(etcdkv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -122,11 +123,6 @@ func CreateServer(ctx context.Context) (*Master, error) {
|
||||
}
|
||||
tsMsgProducer.SetWriteNodeTtBarrier(writeTimeTickBarrier)
|
||||
|
||||
pulsarDDStream := ms.NewPulsarMsgStream(ctx, 1024) //input stream
|
||||
pulsarDDStream.SetPulsarClient(pulsarAddr)
|
||||
pulsarDDStream.CreatePulsarProducers(Params.DDChannelNames)
|
||||
tsMsgProducer.SetDDSyncStream(pulsarDDStream)
|
||||
|
||||
pulsarDMStream := ms.NewPulsarMsgStream(ctx, 1024) //input stream
|
||||
pulsarDMStream.SetPulsarClient(pulsarAddr)
|
||||
pulsarDMStream.CreatePulsarProducers(Params.InsertChannelNames)
|
||||
@ -165,10 +161,7 @@ func CreateServer(ctx context.Context) (*Master, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m.scheduler = NewDDRequestScheduler(ctx)
|
||||
m.scheduler.SetDDMsgStream(pulsarDDStream)
|
||||
m.scheduler.SetIDAllocator(func() (UniqueID, error) { return m.idAllocator.AllocOne() })
|
||||
|
||||
m.scheduler = NewDDRequestScheduler(func() (UniqueID, error) { return m.idAllocator.AllocOne() })
|
||||
m.segmentMgr = NewSegmentManager(metakv,
|
||||
func() (UniqueID, error) { return m.idAllocator.AllocOne() },
|
||||
func() (Timestamp, error) { return m.tsoAllocator.AllocOne() },
|
||||
@ -255,11 +248,6 @@ func (s *Master) startServerLoop(ctx context.Context, grpcPort int64) error {
|
||||
return err
|
||||
}
|
||||
|
||||
s.serverLoopWg.Add(1)
|
||||
if err := s.scheduler.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.serverLoopWg.Add(1)
|
||||
go s.grpcLoop(grpcPort)
|
||||
|
||||
@ -267,6 +255,9 @@ func (s *Master) startServerLoop(ctx context.Context, grpcPort int64) error {
|
||||
return err
|
||||
}
|
||||
|
||||
s.serverLoopWg.Add(1)
|
||||
go s.tasksExecutionLoop()
|
||||
|
||||
s.serverLoopWg.Add(1)
|
||||
go s.segmentStatisticsLoop()
|
||||
|
||||
@ -279,8 +270,6 @@ func (s *Master) startServerLoop(ctx context.Context, grpcPort int64) error {
|
||||
func (s *Master) stopServerLoop() {
|
||||
s.timesSyncMsgProducer.Close()
|
||||
s.serverLoopWg.Done()
|
||||
s.scheduler.Close()
|
||||
s.serverLoopWg.Done()
|
||||
|
||||
if s.grpcServer != nil {
|
||||
s.grpcServer.GracefulStop()
|
||||
@ -348,6 +337,33 @@ func (s *Master) tsLoop() {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Master) tasksExecutionLoop() {
|
||||
defer s.serverLoopWg.Done()
|
||||
ctx, cancel := context.WithCancel(s.serverLoopCtx)
|
||||
defer cancel()
|
||||
|
||||
for {
|
||||
select {
|
||||
case task := <-s.scheduler.reqQueue:
|
||||
timeStamp, err := (task).Ts()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
} else {
|
||||
if timeStamp < s.scheduler.scheduleTimeStamp {
|
||||
task.Notify(errors.Errorf("input timestamp = %d, schduler timestamp = %d", timeStamp, s.scheduler.scheduleTimeStamp))
|
||||
} else {
|
||||
s.scheduler.scheduleTimeStamp = timeStamp
|
||||
err = task.Execute()
|
||||
task.Notify(err)
|
||||
}
|
||||
}
|
||||
case <-ctx.Done():
|
||||
log.Print("server is closed, exit task execution loop")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Master) segmentStatisticsLoop() {
|
||||
defer s.serverLoopWg.Done()
|
||||
defer s.segmentStatusMsg.Close()
|
||||
|
@ -1,246 +0,0 @@
|
||||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
||||
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func receiveTimeTickMsg(stream *ms.MsgStream) bool {
|
||||
for {
|
||||
result := (*stream).Consume()
|
||||
if len(result.Msgs) > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getTimeTickMsgPack(ttmsgs [][2]uint64) *ms.MsgPack {
|
||||
msgPack := ms.MsgPack{}
|
||||
for _, vi := range ttmsgs {
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTtMsg(internalPb.MsgType_kTimeTick, UniqueID(vi[0]), Timestamp(vi[1])))
|
||||
}
|
||||
return &msgPack
|
||||
}
|
||||
|
||||
func TestMaster(t *testing.T) {
|
||||
Init()
|
||||
pulsarAddr := Params.PulsarAddress
|
||||
|
||||
// Creates server.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
svr, err := CreateServer(ctx)
|
||||
if err != nil {
|
||||
log.Print("create server failed", zap.Error(err))
|
||||
}
|
||||
|
||||
if err := svr.Run(int64(Params.Port)); err != nil {
|
||||
log.Fatal("run server failed", zap.Error(err))
|
||||
}
|
||||
|
||||
proxyTimeTickStream := ms.NewPulsarMsgStream(ctx, 1024) //input stream
|
||||
proxyTimeTickStream.SetPulsarClient(pulsarAddr)
|
||||
proxyTimeTickStream.CreatePulsarProducers(Params.ProxyTimeTickChannelNames)
|
||||
proxyTimeTickStream.Start()
|
||||
|
||||
writeNodeStream := ms.NewPulsarMsgStream(ctx, 1024) //input stream
|
||||
writeNodeStream.SetPulsarClient(pulsarAddr)
|
||||
writeNodeStream.CreatePulsarProducers(Params.WriteNodeTimeTickChannelNames)
|
||||
writeNodeStream.Start()
|
||||
|
||||
ddMs := ms.NewPulsarMsgStream(ctx, 1024)
|
||||
ddMs.SetPulsarClient(pulsarAddr)
|
||||
ddMs.CreatePulsarConsumers(Params.DDChannelNames, "DDStream", ms.NewUnmarshalDispatcher(), 1024)
|
||||
ddMs.Start()
|
||||
|
||||
dMMs := ms.NewPulsarMsgStream(ctx, 1024)
|
||||
dMMs.SetPulsarClient(pulsarAddr)
|
||||
dMMs.CreatePulsarConsumers(Params.InsertChannelNames, "DMStream", ms.NewUnmarshalDispatcher(), 1024)
|
||||
dMMs.Start()
|
||||
|
||||
k2sMs := ms.NewPulsarMsgStream(ctx, 1024)
|
||||
k2sMs.SetPulsarClient(pulsarAddr)
|
||||
k2sMs.CreatePulsarConsumers(Params.K2SChannelNames, "K2SStream", ms.NewUnmarshalDispatcher(), 1024)
|
||||
k2sMs.Start()
|
||||
|
||||
ttsoftmsgs := [][2]uint64{
|
||||
{0, 10},
|
||||
}
|
||||
msgSoftPackAddr := getTimeTickMsgPack(ttsoftmsgs)
|
||||
|
||||
proxyTimeTickStream.Produce(msgSoftPackAddr)
|
||||
var dMMsgstream ms.MsgStream = dMMs
|
||||
assert.True(t, receiveTimeTickMsg(&dMMsgstream))
|
||||
var ddMsgstream ms.MsgStream = ddMs
|
||||
assert.True(t, receiveTimeTickMsg(&ddMsgstream))
|
||||
|
||||
tthardmsgs := [][2]int{
|
||||
{3, 10},
|
||||
}
|
||||
|
||||
msghardPackAddr := getMsgPack(tthardmsgs)
|
||||
writeNodeStream.Produce(msghardPackAddr)
|
||||
var k2sMsgstream ms.MsgStream = k2sMs
|
||||
assert.True(t, receiveTimeTickMsg(&k2sMsgstream))
|
||||
|
||||
conn, err := grpc.DialContext(ctx, "127.0.0.1:53100", grpc.WithInsecure(), grpc.WithBlock())
|
||||
assert.Nil(t, err)
|
||||
defer conn.Close()
|
||||
|
||||
cli := masterpb.NewMasterClient(conn)
|
||||
|
||||
sch := schemapb.CollectionSchema{
|
||||
Name: "name" + strconv.FormatUint(rand.Uint64(), 10),
|
||||
Description: "test collection",
|
||||
AutoID: false,
|
||||
Fields: []*schemapb.FieldSchema{},
|
||||
}
|
||||
|
||||
schemaBytes, err := proto.Marshal(&sch)
|
||||
assert.Nil(t, err)
|
||||
|
||||
createCollectionReq := internalpb.CreateCollectionRequest{
|
||||
MsgType: internalpb.MsgType_kCreateCollection,
|
||||
ReqID: 1,
|
||||
Timestamp: 11,
|
||||
ProxyID: 1,
|
||||
Schema: &commonpb.Blob{Value: schemaBytes},
|
||||
}
|
||||
st, err := cli.CreateCollection(ctx, &createCollectionReq)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
|
||||
var consumeMsg ms.MsgStream = ddMs
|
||||
var createCollectionMsg *ms.CreateCollectionMsg
|
||||
for {
|
||||
result := consumeMsg.Consume()
|
||||
if len(result.Msgs) > 0 {
|
||||
msgs := result.Msgs
|
||||
for _, v := range msgs {
|
||||
createCollectionMsg = v.(*ms.CreateCollectionMsg)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Equal(t, createCollectionReq.MsgType, createCollectionMsg.CreateCollectionRequest.MsgType)
|
||||
assert.Equal(t, createCollectionReq.ReqID, createCollectionMsg.CreateCollectionRequest.ReqID)
|
||||
assert.Equal(t, createCollectionReq.Timestamp, createCollectionMsg.CreateCollectionRequest.Timestamp)
|
||||
assert.Equal(t, createCollectionReq.ProxyID, createCollectionMsg.CreateCollectionRequest.ProxyID)
|
||||
assert.Equal(t, createCollectionReq.Schema.Value, createCollectionMsg.CreateCollectionRequest.Schema.Value)
|
||||
|
||||
////////////////////////////CreatePartition////////////////////////
|
||||
partitionName := "partitionName" + strconv.FormatUint(rand.Uint64(), 10)
|
||||
createPartitionReq := internalpb.CreatePartitionRequest{
|
||||
MsgType: internalpb.MsgType_kCreatePartition,
|
||||
ReqID: 1,
|
||||
Timestamp: 11,
|
||||
ProxyID: 1,
|
||||
PartitionName: &servicepb.PartitionName{
|
||||
CollectionName: sch.Name,
|
||||
Tag: partitionName,
|
||||
},
|
||||
}
|
||||
|
||||
st, err = cli.CreatePartition(ctx, &createPartitionReq)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
|
||||
var createPartitionMsg *ms.CreatePartitionMsg
|
||||
for {
|
||||
result := consumeMsg.Consume()
|
||||
if len(result.Msgs) > 0 {
|
||||
msgs := result.Msgs
|
||||
for _, v := range msgs {
|
||||
createPartitionMsg = v.(*ms.CreatePartitionMsg)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Equal(t, createPartitionReq.MsgType, createPartitionMsg.CreatePartitionRequest.MsgType)
|
||||
assert.Equal(t, createPartitionReq.ReqID, createPartitionMsg.CreatePartitionRequest.ReqID)
|
||||
assert.Equal(t, createPartitionReq.Timestamp, createPartitionMsg.CreatePartitionRequest.Timestamp)
|
||||
assert.Equal(t, createPartitionReq.ProxyID, createPartitionMsg.CreatePartitionRequest.ProxyID)
|
||||
assert.Equal(t, createPartitionReq.PartitionName.CollectionName, createPartitionMsg.CreatePartitionRequest.PartitionName.CollectionName)
|
||||
assert.Equal(t, createPartitionReq.PartitionName.Tag, createPartitionMsg.CreatePartitionRequest.PartitionName.Tag)
|
||||
|
||||
////////////////////////////DropPartition////////////////////////
|
||||
dropPartitionReq := internalpb.DropPartitionRequest{
|
||||
MsgType: internalpb.MsgType_kDropPartition,
|
||||
ReqID: 1,
|
||||
Timestamp: 11,
|
||||
ProxyID: 1,
|
||||
PartitionName: &servicepb.PartitionName{
|
||||
CollectionName: sch.Name,
|
||||
Tag: partitionName,
|
||||
},
|
||||
}
|
||||
|
||||
st, err = cli.DropPartition(ctx, &dropPartitionReq)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
|
||||
var dropPartitionMsg *ms.DropPartitionMsg
|
||||
for {
|
||||
result := consumeMsg.Consume()
|
||||
if len(result.Msgs) > 0 {
|
||||
msgs := result.Msgs
|
||||
for _, v := range msgs {
|
||||
dropPartitionMsg = v.(*ms.DropPartitionMsg)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Equal(t, dropPartitionReq.MsgType, dropPartitionMsg.DropPartitionRequest.MsgType)
|
||||
assert.Equal(t, dropPartitionReq.ReqID, dropPartitionMsg.DropPartitionRequest.ReqID)
|
||||
assert.Equal(t, dropPartitionReq.Timestamp, dropPartitionMsg.DropPartitionRequest.Timestamp)
|
||||
assert.Equal(t, dropPartitionReq.ProxyID, dropPartitionMsg.DropPartitionRequest.ProxyID)
|
||||
assert.Equal(t, dropPartitionReq.PartitionName.CollectionName, dropPartitionMsg.DropPartitionRequest.PartitionName.CollectionName)
|
||||
|
||||
////////////////////////////DropCollection////////////////////////
|
||||
dropCollectionReq := internalpb.DropCollectionRequest{
|
||||
MsgType: internalpb.MsgType_kDropCollection,
|
||||
ReqID: 1,
|
||||
Timestamp: 11,
|
||||
ProxyID: 1,
|
||||
CollectionName: &servicepb.CollectionName{CollectionName: sch.Name},
|
||||
}
|
||||
|
||||
st, err = cli.DropCollection(ctx, &dropCollectionReq)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
|
||||
var dropCollectionMsg *ms.DropCollectionMsg
|
||||
for {
|
||||
result := consumeMsg.Consume()
|
||||
if len(result.Msgs) > 0 {
|
||||
msgs := result.Msgs
|
||||
for _, v := range msgs {
|
||||
dropCollectionMsg = v.(*ms.DropCollectionMsg)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Equal(t, dropCollectionReq.MsgType, dropCollectionMsg.DropCollectionRequest.MsgType)
|
||||
assert.Equal(t, dropCollectionReq.ReqID, dropCollectionMsg.DropCollectionRequest.ReqID)
|
||||
assert.Equal(t, dropCollectionReq.Timestamp, dropCollectionMsg.DropCollectionRequest.Timestamp)
|
||||
assert.Equal(t, dropCollectionReq.ProxyID, dropCollectionMsg.DropCollectionRequest.ProxyID)
|
||||
assert.Equal(t, dropCollectionReq.CollectionName.CollectionName, dropCollectionMsg.DropCollectionRequest.CollectionName.CollectionName)
|
||||
|
||||
cancel()
|
||||
svr.Close()
|
||||
}
|
@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
type metaTable struct {
|
||||
client kv.TxnBase // client of a reliable kv service, i.e. etcd client
|
||||
client *kv.EtcdKV // client of a reliable kv service, i.e. etcd client
|
||||
tenantID2Meta map[UniqueID]pb.TenantMeta // tenant id to tenant meta
|
||||
proxyID2Meta map[UniqueID]pb.ProxyMeta // proxy id to proxy meta
|
||||
collID2Meta map[UniqueID]pb.CollectionMeta // collection id to collection meta
|
||||
@ -23,7 +23,7 @@ type metaTable struct {
|
||||
ddLock sync.RWMutex
|
||||
}
|
||||
|
||||
func NewMetaTable(kv kv.TxnBase) (*metaTable, error) {
|
||||
func NewMetaTable(kv *kv.EtcdKV) (*metaTable, error) {
|
||||
mt := &metaTable{
|
||||
client: kv,
|
||||
tenantLock: sync.RWMutex{},
|
||||
|
@ -7,7 +7,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
"github.com/zilliztech/milvus-distributed/internal/kv"
|
||||
pb "github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
@ -19,7 +19,7 @@ func TestMetaTable_Collection(t *testing.T) {
|
||||
etcdAddr := Params.EtcdAddress
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
|
||||
assert.Nil(t, err)
|
||||
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
|
||||
etcdKV := kv.NewEtcdKV(cli, "/etcd/test/root")
|
||||
|
||||
_, err = cli.Delete(context.TODO(), "/etcd/test/root", clientv3.WithPrefix())
|
||||
assert.Nil(t, err)
|
||||
@ -157,7 +157,7 @@ func TestMetaTable_DeletePartition(t *testing.T) {
|
||||
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
|
||||
assert.Nil(t, err)
|
||||
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
|
||||
etcdKV := kv.NewEtcdKV(cli, "/etcd/test/root")
|
||||
|
||||
_, err = cli.Delete(context.TODO(), "/etcd/test/root", clientv3.WithPrefix())
|
||||
assert.Nil(t, err)
|
||||
@ -252,7 +252,7 @@ func TestMetaTable_Segment(t *testing.T) {
|
||||
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
|
||||
assert.Nil(t, err)
|
||||
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
|
||||
etcdKV := kv.NewEtcdKV(cli, "/etcd/test/root")
|
||||
|
||||
_, err = cli.Delete(context.TODO(), "/etcd/test/root", clientv3.WithPrefix())
|
||||
assert.Nil(t, err)
|
||||
@ -333,7 +333,7 @@ func TestMetaTable_UpdateSegment(t *testing.T) {
|
||||
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
|
||||
assert.Nil(t, err)
|
||||
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
|
||||
etcdKV := kv.NewEtcdKV(cli, "/etcd/test/root")
|
||||
|
||||
_, err = cli.Delete(context.TODO(), "/etcd/test/root", clientv3.WithPrefix())
|
||||
assert.Nil(t, err)
|
||||
@ -379,7 +379,7 @@ func TestMetaTable_AddPartition_Limit(t *testing.T) {
|
||||
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
|
||||
assert.Nil(t, err)
|
||||
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
|
||||
etcdKV := kv.NewEtcdKV(cli, "/etcd/test/root")
|
||||
|
||||
_, err = cli.Delete(context.TODO(), "/etcd/test/root", clientv3.WithPrefix())
|
||||
assert.Nil(t, err)
|
||||
|
@ -40,7 +40,6 @@ type ParamTable struct {
|
||||
// msgChannel
|
||||
ProxyTimeTickChannelNames []string
|
||||
WriteNodeTimeTickChannelNames []string
|
||||
DDChannelNames []string
|
||||
InsertChannelNames []string
|
||||
K2SChannelNames []string
|
||||
QueryNodeStatsChannelName string
|
||||
@ -55,19 +54,8 @@ var Params ParamTable
|
||||
func (p *ParamTable) Init() {
|
||||
// load yaml
|
||||
p.BaseTable.Init()
|
||||
err := p.LoadYaml("milvus.yaml")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = p.LoadYaml("advanced/channel.yaml")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = p.LoadYaml("advanced/master.yaml")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = p.LoadYaml("advanced/common.yaml")
|
||||
|
||||
err := p.LoadYaml("advanced/master.yaml")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -98,7 +86,6 @@ func (p *ParamTable) Init() {
|
||||
p.initProxyTimeTickChannelNames()
|
||||
p.initWriteNodeTimeTickChannelNames()
|
||||
p.initInsertChannelNames()
|
||||
p.initDDChannelNames()
|
||||
p.initK2SChannelNames()
|
||||
p.initQueryNodeStatsChannelName()
|
||||
p.initMsgChannelSubName()
|
||||
@ -384,27 +371,6 @@ func (p *ParamTable) initWriteNodeTimeTickChannelNames() {
|
||||
p.WriteNodeTimeTickChannelNames = channels
|
||||
}
|
||||
|
||||
func (p *ParamTable) initDDChannelNames() {
|
||||
ch, err := p.Load("msgChannel.chanNamePrefix.dataDefinition")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
id, err := p.Load("nodeID.queryNodeIDList")
|
||||
if err != nil {
|
||||
log.Panicf("load query node id list error, %s", err.Error())
|
||||
}
|
||||
ids := strings.Split(id, ",")
|
||||
channels := make([]string, 0, len(ids))
|
||||
for _, i := range ids {
|
||||
_, err := strconv.ParseInt(i, 10, 64)
|
||||
if err != nil {
|
||||
log.Panicf("load query node id list error, %s", err.Error())
|
||||
}
|
||||
channels = append(channels, ch+"-"+i)
|
||||
}
|
||||
p.DDChannelNames = channels
|
||||
}
|
||||
|
||||
func (p *ParamTable) initInsertChannelNames() {
|
||||
ch, err := p.Load("msgChannel.chanNamePrefix.insert")
|
||||
if err != nil {
|
||||
|
@ -1,6 +1,7 @@
|
||||
package master
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -37,7 +38,7 @@ func TestParamTable_KVRootPath(t *testing.T) {
|
||||
func TestParamTable_TopicNum(t *testing.T) {
|
||||
Params.Init()
|
||||
num := Params.TopicNum
|
||||
assert.Equal(t, num, 1)
|
||||
fmt.Println("TopicNum:", num)
|
||||
}
|
||||
|
||||
func TestParamTable_SegmentSize(t *testing.T) {
|
||||
@ -79,7 +80,7 @@ func TestParamTable_SegIDAssignExpiration(t *testing.T) {
|
||||
func TestParamTable_QueryNodeNum(t *testing.T) {
|
||||
Params.Init()
|
||||
num := Params.QueryNodeNum
|
||||
assert.Equal(t, num, 1)
|
||||
fmt.Println("QueryNodeNum", num)
|
||||
}
|
||||
|
||||
func TestParamTable_QueryNodeStatsChannelName(t *testing.T) {
|
||||
@ -131,8 +132,7 @@ func TestParamTable_WriteNodeTimeTickChannelNames(t *testing.T) {
|
||||
func TestParamTable_InsertChannelNames(t *testing.T) {
|
||||
Params.Init()
|
||||
names := Params.InsertChannelNames
|
||||
assert.Equal(t, len(names), 1)
|
||||
assert.Equal(t, names[0], "insert-0")
|
||||
assert.Equal(t, Params.TopicNum, len(names))
|
||||
}
|
||||
|
||||
func TestParamTable_K2SChannelNames(t *testing.T) {
|
||||
|
@ -4,7 +4,6 @@ import (
|
||||
"errors"
|
||||
"log"
|
||||
|
||||
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
|
||||
@ -67,30 +66,7 @@ func (t *createPartitionTask) Execute() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ts, err := t.Ts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = t.mt.AddPartition(collectionMeta.ID, partitionName.Tag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgPack := ms.MsgPack{}
|
||||
baseMsg := ms.BaseMsg{
|
||||
BeginTimestamp: ts,
|
||||
EndTimestamp: ts,
|
||||
HashValues: []uint32{0},
|
||||
}
|
||||
timeTickMsg := &ms.CreatePartitionMsg{
|
||||
BaseMsg: baseMsg,
|
||||
CreatePartitionRequest: *t.req,
|
||||
}
|
||||
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
|
||||
return t.sch.ddMsgStream.Broadcast(&msgPack)
|
||||
|
||||
return t.mt.AddPartition(collectionMeta.ID, partitionName.Tag)
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
@ -122,29 +98,7 @@ func (t *dropPartitionTask) Execute() error {
|
||||
return err
|
||||
}
|
||||
|
||||
err = t.mt.DeletePartition(collectionMeta.ID, partitionName.Tag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ts, err := t.Ts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgPack := ms.MsgPack{}
|
||||
baseMsg := ms.BaseMsg{
|
||||
BeginTimestamp: ts,
|
||||
EndTimestamp: ts,
|
||||
HashValues: []uint32{0},
|
||||
}
|
||||
timeTickMsg := &ms.DropPartitionMsg{
|
||||
BaseMsg: baseMsg,
|
||||
DropPartitionRequest: *t.req,
|
||||
}
|
||||
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
|
||||
return t.sch.ddMsgStream.Broadcast(&msgPack)
|
||||
|
||||
return t.mt.DeletePartition(collectionMeta.ID, partitionName.Tag)
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
@ -178,7 +132,6 @@ func (t *hasPartitionTask) Execute() error {
|
||||
t.hasPartition = t.mt.HasPartition(collectionMeta.ID, partitionName.Tag)
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
@ -215,7 +168,6 @@ func (t *describePartitionTask) Execute() error {
|
||||
t.description = &description
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
@ -256,5 +208,4 @@ func (t *showPartitionTask) Execute() error {
|
||||
t.stringListResponse = &stringListResponse
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
@ -1,36 +1,17 @@
|
||||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
)
|
||||
|
||||
//type ddRequestScheduler interface {}
|
||||
|
||||
//type ddReqFIFOScheduler struct {}
|
||||
|
||||
type ddRequestScheduler struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
globalIDAllocator func() (UniqueID, error)
|
||||
reqQueue chan task
|
||||
scheduleTimeStamp Timestamp
|
||||
ddMsgStream ms.MsgStream
|
||||
}
|
||||
|
||||
func NewDDRequestScheduler(ctx context.Context) *ddRequestScheduler {
|
||||
func NewDDRequestScheduler(allocGlobalID func() (UniqueID, error)) *ddRequestScheduler {
|
||||
const channelSize = 1024
|
||||
|
||||
ctx2, cancel := context.WithCancel(ctx)
|
||||
|
||||
rs := ddRequestScheduler{
|
||||
ctx: ctx2,
|
||||
cancel: cancel,
|
||||
reqQueue: make(chan task, channelSize),
|
||||
globalIDAllocator: allocGlobalID,
|
||||
reqQueue: make(chan task, channelSize),
|
||||
}
|
||||
return &rs
|
||||
}
|
||||
@ -39,51 +20,3 @@ func (rs *ddRequestScheduler) Enqueue(task task) error {
|
||||
rs.reqQueue <- task
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rs *ddRequestScheduler) SetIDAllocator(allocGlobalID func() (UniqueID, error)) {
|
||||
rs.globalIDAllocator = allocGlobalID
|
||||
}
|
||||
|
||||
func (rs *ddRequestScheduler) SetDDMsgStream(ddStream ms.MsgStream) {
|
||||
rs.ddMsgStream = ddStream
|
||||
}
|
||||
|
||||
func (rs *ddRequestScheduler) scheduleLoop() {
|
||||
for {
|
||||
select {
|
||||
case task := <-rs.reqQueue:
|
||||
err := rs.schedule(task)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
case <-rs.ctx.Done():
|
||||
log.Print("server is closed, exit task execution loop")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rs *ddRequestScheduler) schedule(t task) error {
|
||||
timeStamp, err := t.Ts()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return err
|
||||
}
|
||||
if timeStamp < rs.scheduleTimeStamp {
|
||||
t.Notify(errors.Errorf("input timestamp = %d, schduler timestamp = %d", timeStamp, rs.scheduleTimeStamp))
|
||||
} else {
|
||||
rs.scheduleTimeStamp = timeStamp
|
||||
err = t.Execute()
|
||||
t.Notify(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rs *ddRequestScheduler) Start() error {
|
||||
go rs.scheduleLoop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rs *ddRequestScheduler) Close() {
|
||||
rs.cancel()
|
||||
}
|
||||
|
@ -1,342 +0,0 @@
|
||||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
)
|
||||
|
||||
func TestMaster_Scheduler_Collection(t *testing.T) {
|
||||
Init()
|
||||
etcdAddress := Params.EtcdAddress
|
||||
kvRootPath := Params.MetaRootPath
|
||||
pulsarAddr := Params.PulsarAddress
|
||||
|
||||
producerChannels := []string{"ddstream"}
|
||||
consumerChannels := []string{"ddstream"}
|
||||
consumerSubName := "substream"
|
||||
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddress}})
|
||||
assert.Nil(t, err)
|
||||
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
|
||||
|
||||
meta, err := NewMetaTable(etcdKV)
|
||||
assert.Nil(t, err)
|
||||
defer meta.client.Close()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
pulsarDDStream := ms.NewPulsarMsgStream(ctx, 1024) //input stream
|
||||
pulsarDDStream.SetPulsarClient(pulsarAddr)
|
||||
pulsarDDStream.CreatePulsarProducers(producerChannels)
|
||||
pulsarDDStream.Start()
|
||||
defer pulsarDDStream.Close()
|
||||
|
||||
consumeMs := ms.NewPulsarMsgStream(ctx, 1024)
|
||||
consumeMs.SetPulsarClient(pulsarAddr)
|
||||
consumeMs.CreatePulsarConsumers(consumerChannels, consumerSubName, ms.NewUnmarshalDispatcher(), 1024)
|
||||
consumeMs.Start()
|
||||
defer consumeMs.Close()
|
||||
|
||||
idAllocator := NewGlobalIDAllocator("idTimestamp", tsoutil.NewTSOKVBase([]string{etcdAddress}, kvRootPath, "gid"))
|
||||
err = idAllocator.Initialize()
|
||||
assert.Nil(t, err)
|
||||
|
||||
scheduler := NewDDRequestScheduler(ctx)
|
||||
scheduler.SetDDMsgStream(pulsarDDStream)
|
||||
scheduler.SetIDAllocator(func() (UniqueID, error) { return idAllocator.AllocOne() })
|
||||
scheduler.Start()
|
||||
defer scheduler.Close()
|
||||
|
||||
rand.Seed(time.Now().Unix())
|
||||
sch := schemapb.CollectionSchema{
|
||||
Name: "name" + strconv.FormatUint(rand.Uint64(), 10),
|
||||
Description: "string",
|
||||
AutoID: true,
|
||||
Fields: nil,
|
||||
}
|
||||
|
||||
schemaBytes, err := proto.Marshal(&sch)
|
||||
assert.Nil(t, err)
|
||||
|
||||
////////////////////////////CreateCollection////////////////////////
|
||||
createCollectionReq := internalpb.CreateCollectionRequest{
|
||||
MsgType: internalpb.MsgType_kCreateCollection,
|
||||
ReqID: 1,
|
||||
Timestamp: 11,
|
||||
ProxyID: 1,
|
||||
Schema: &commonpb.Blob{Value: schemaBytes},
|
||||
}
|
||||
|
||||
var createCollectionTask task = &createCollectionTask{
|
||||
req: &createCollectionReq,
|
||||
baseTask: baseTask{
|
||||
sch: scheduler,
|
||||
mt: meta,
|
||||
cv: make(chan error),
|
||||
},
|
||||
}
|
||||
|
||||
err = scheduler.Enqueue(createCollectionTask)
|
||||
assert.Nil(t, err)
|
||||
err = createCollectionTask.WaitToFinish(ctx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var consumeMsg ms.MsgStream = consumeMs
|
||||
var createCollectionMsg *ms.CreateCollectionMsg
|
||||
for {
|
||||
result := consumeMsg.Consume()
|
||||
if len(result.Msgs) > 0 {
|
||||
msgs := result.Msgs
|
||||
for _, v := range msgs {
|
||||
createCollectionMsg = v.(*ms.CreateCollectionMsg)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Equal(t, createCollectionReq.MsgType, createCollectionMsg.CreateCollectionRequest.MsgType)
|
||||
assert.Equal(t, createCollectionReq.ReqID, createCollectionMsg.CreateCollectionRequest.ReqID)
|
||||
assert.Equal(t, createCollectionReq.Timestamp, createCollectionMsg.CreateCollectionRequest.Timestamp)
|
||||
assert.Equal(t, createCollectionReq.ProxyID, createCollectionMsg.CreateCollectionRequest.ProxyID)
|
||||
assert.Equal(t, createCollectionReq.Schema.Value, createCollectionMsg.CreateCollectionRequest.Schema.Value)
|
||||
|
||||
////////////////////////////DropCollection////////////////////////
|
||||
dropCollectionReq := internalpb.DropCollectionRequest{
|
||||
MsgType: internalpb.MsgType_kDropCollection,
|
||||
ReqID: 1,
|
||||
Timestamp: 11,
|
||||
ProxyID: 1,
|
||||
CollectionName: &servicepb.CollectionName{CollectionName: sch.Name},
|
||||
}
|
||||
|
||||
var dropCollectionTask task = &dropCollectionTask{
|
||||
req: &dropCollectionReq,
|
||||
baseTask: baseTask{
|
||||
sch: scheduler,
|
||||
mt: meta,
|
||||
cv: make(chan error),
|
||||
},
|
||||
}
|
||||
|
||||
err = scheduler.Enqueue(dropCollectionTask)
|
||||
assert.Nil(t, err)
|
||||
err = dropCollectionTask.WaitToFinish(ctx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var dropCollectionMsg *ms.DropCollectionMsg
|
||||
for {
|
||||
result := consumeMsg.Consume()
|
||||
if len(result.Msgs) > 0 {
|
||||
msgs := result.Msgs
|
||||
for _, v := range msgs {
|
||||
dropCollectionMsg = v.(*ms.DropCollectionMsg)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Equal(t, dropCollectionReq.MsgType, dropCollectionMsg.DropCollectionRequest.MsgType)
|
||||
assert.Equal(t, dropCollectionReq.ReqID, dropCollectionMsg.DropCollectionRequest.ReqID)
|
||||
assert.Equal(t, dropCollectionReq.Timestamp, dropCollectionMsg.DropCollectionRequest.Timestamp)
|
||||
assert.Equal(t, dropCollectionReq.ProxyID, dropCollectionMsg.DropCollectionRequest.ProxyID)
|
||||
assert.Equal(t, dropCollectionReq.CollectionName.CollectionName, dropCollectionMsg.DropCollectionRequest.CollectionName.CollectionName)
|
||||
|
||||
}
|
||||
|
||||
func TestMaster_Scheduler_Partition(t *testing.T) {
|
||||
Init()
|
||||
etcdAddress := Params.EtcdAddress
|
||||
kvRootPath := Params.MetaRootPath
|
||||
pulsarAddr := Params.PulsarAddress
|
||||
|
||||
producerChannels := []string{"ddstream"}
|
||||
consumerChannels := []string{"ddstream"}
|
||||
consumerSubName := "substream"
|
||||
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddress}})
|
||||
assert.Nil(t, err)
|
||||
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
|
||||
|
||||
meta, err := NewMetaTable(etcdKV)
|
||||
assert.Nil(t, err)
|
||||
defer meta.client.Close()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
pulsarDDStream := ms.NewPulsarMsgStream(ctx, 1024) //input stream
|
||||
pulsarDDStream.SetPulsarClient(pulsarAddr)
|
||||
pulsarDDStream.CreatePulsarProducers(producerChannels)
|
||||
pulsarDDStream.Start()
|
||||
defer pulsarDDStream.Close()
|
||||
|
||||
consumeMs := ms.NewPulsarMsgStream(ctx, 1024)
|
||||
consumeMs.SetPulsarClient(pulsarAddr)
|
||||
consumeMs.CreatePulsarConsumers(consumerChannels, consumerSubName, ms.NewUnmarshalDispatcher(), 1024)
|
||||
consumeMs.Start()
|
||||
defer consumeMs.Close()
|
||||
|
||||
idAllocator := NewGlobalIDAllocator("idTimestamp", tsoutil.NewTSOKVBase([]string{etcdAddress}, kvRootPath, "gid"))
|
||||
err = idAllocator.Initialize()
|
||||
assert.Nil(t, err)
|
||||
|
||||
scheduler := NewDDRequestScheduler(ctx)
|
||||
scheduler.SetDDMsgStream(pulsarDDStream)
|
||||
scheduler.SetIDAllocator(func() (UniqueID, error) { return idAllocator.AllocOne() })
|
||||
scheduler.Start()
|
||||
defer scheduler.Close()
|
||||
|
||||
rand.Seed(time.Now().Unix())
|
||||
sch := schemapb.CollectionSchema{
|
||||
Name: "name" + strconv.FormatUint(rand.Uint64(), 10),
|
||||
Description: "string",
|
||||
AutoID: true,
|
||||
Fields: nil,
|
||||
}
|
||||
|
||||
schemaBytes, err := proto.Marshal(&sch)
|
||||
assert.Nil(t, err)
|
||||
|
||||
////////////////////////////CreateCollection////////////////////////
|
||||
createCollectionReq := internalpb.CreateCollectionRequest{
|
||||
MsgType: internalpb.MsgType_kCreateCollection,
|
||||
ReqID: 1,
|
||||
Timestamp: 11,
|
||||
ProxyID: 1,
|
||||
Schema: &commonpb.Blob{Value: schemaBytes},
|
||||
}
|
||||
|
||||
var createCollectionTask task = &createCollectionTask{
|
||||
req: &createCollectionReq,
|
||||
baseTask: baseTask{
|
||||
sch: scheduler,
|
||||
mt: meta,
|
||||
cv: make(chan error),
|
||||
},
|
||||
}
|
||||
|
||||
err = scheduler.Enqueue(createCollectionTask)
|
||||
assert.Nil(t, err)
|
||||
err = createCollectionTask.WaitToFinish(ctx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var consumeMsg ms.MsgStream = consumeMs
|
||||
var createCollectionMsg *ms.CreateCollectionMsg
|
||||
for {
|
||||
result := consumeMsg.Consume()
|
||||
if len(result.Msgs) > 0 {
|
||||
msgs := result.Msgs
|
||||
for _, v := range msgs {
|
||||
createCollectionMsg = v.(*ms.CreateCollectionMsg)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Equal(t, createCollectionReq.MsgType, createCollectionMsg.CreateCollectionRequest.MsgType)
|
||||
assert.Equal(t, createCollectionReq.ReqID, createCollectionMsg.CreateCollectionRequest.ReqID)
|
||||
assert.Equal(t, createCollectionReq.Timestamp, createCollectionMsg.CreateCollectionRequest.Timestamp)
|
||||
assert.Equal(t, createCollectionReq.ProxyID, createCollectionMsg.CreateCollectionRequest.ProxyID)
|
||||
assert.Equal(t, createCollectionReq.Schema.Value, createCollectionMsg.CreateCollectionRequest.Schema.Value)
|
||||
|
||||
////////////////////////////CreatePartition////////////////////////
|
||||
partitionName := "partitionName" + strconv.FormatUint(rand.Uint64(), 10)
|
||||
createPartitionReq := internalpb.CreatePartitionRequest{
|
||||
MsgType: internalpb.MsgType_kCreatePartition,
|
||||
ReqID: 1,
|
||||
Timestamp: 11,
|
||||
ProxyID: 1,
|
||||
PartitionName: &servicepb.PartitionName{
|
||||
CollectionName: sch.Name,
|
||||
Tag: partitionName,
|
||||
},
|
||||
}
|
||||
|
||||
var createPartitionTask task = &createPartitionTask{
|
||||
req: &createPartitionReq,
|
||||
baseTask: baseTask{
|
||||
sch: scheduler,
|
||||
mt: meta,
|
||||
cv: make(chan error),
|
||||
},
|
||||
}
|
||||
|
||||
err = scheduler.Enqueue(createPartitionTask)
|
||||
assert.Nil(t, err)
|
||||
err = createPartitionTask.WaitToFinish(ctx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var createPartitionMsg *ms.CreatePartitionMsg
|
||||
for {
|
||||
result := consumeMsg.Consume()
|
||||
if len(result.Msgs) > 0 {
|
||||
msgs := result.Msgs
|
||||
for _, v := range msgs {
|
||||
createPartitionMsg = v.(*ms.CreatePartitionMsg)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Equal(t, createPartitionReq.MsgType, createPartitionMsg.CreatePartitionRequest.MsgType)
|
||||
assert.Equal(t, createPartitionReq.ReqID, createPartitionMsg.CreatePartitionRequest.ReqID)
|
||||
assert.Equal(t, createPartitionReq.Timestamp, createPartitionMsg.CreatePartitionRequest.Timestamp)
|
||||
assert.Equal(t, createPartitionReq.ProxyID, createPartitionMsg.CreatePartitionRequest.ProxyID)
|
||||
assert.Equal(t, createPartitionReq.PartitionName.CollectionName, createPartitionMsg.CreatePartitionRequest.PartitionName.CollectionName)
|
||||
assert.Equal(t, createPartitionReq.PartitionName.Tag, createPartitionMsg.CreatePartitionRequest.PartitionName.Tag)
|
||||
|
||||
////////////////////////////DropPartition////////////////////////
|
||||
dropPartitionReq := internalpb.DropPartitionRequest{
|
||||
MsgType: internalpb.MsgType_kDropPartition,
|
||||
ReqID: 1,
|
||||
Timestamp: 11,
|
||||
ProxyID: 1,
|
||||
PartitionName: &servicepb.PartitionName{
|
||||
CollectionName: sch.Name,
|
||||
Tag: partitionName,
|
||||
},
|
||||
}
|
||||
|
||||
var dropPartitionTask task = &dropPartitionTask{
|
||||
req: &dropPartitionReq,
|
||||
baseTask: baseTask{
|
||||
sch: scheduler,
|
||||
mt: meta,
|
||||
cv: make(chan error),
|
||||
},
|
||||
}
|
||||
|
||||
err = scheduler.Enqueue(dropPartitionTask)
|
||||
assert.Nil(t, err)
|
||||
err = dropPartitionTask.WaitToFinish(ctx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var dropPartitionMsg *ms.DropPartitionMsg
|
||||
for {
|
||||
result := consumeMsg.Consume()
|
||||
if len(result.Msgs) > 0 {
|
||||
msgs := result.Msgs
|
||||
for _, v := range msgs {
|
||||
dropPartitionMsg = v.(*ms.DropPartitionMsg)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Equal(t, dropPartitionReq.MsgType, dropPartitionMsg.DropPartitionRequest.MsgType)
|
||||
assert.Equal(t, dropPartitionReq.ReqID, dropPartitionMsg.DropPartitionRequest.ReqID)
|
||||
assert.Equal(t, dropPartitionReq.Timestamp, dropPartitionMsg.DropPartitionRequest.Timestamp)
|
||||
assert.Equal(t, dropPartitionReq.ProxyID, dropPartitionMsg.DropPartitionRequest.ProxyID)
|
||||
assert.Equal(t, dropPartitionReq.PartitionName.CollectionName, dropPartitionMsg.DropPartitionRequest.PartitionName.CollectionName)
|
||||
|
||||
}
|
@ -11,7 +11,6 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
"github.com/zilliztech/milvus-distributed/internal/kv"
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
pb "github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
@ -30,7 +29,7 @@ var segMgr *SegmentManager
|
||||
var collName = "coll_segmgr_test"
|
||||
var collID = int64(1001)
|
||||
var partitionTag = "test"
|
||||
var kvBase kv.TxnBase
|
||||
var kvBase *kv.EtcdKV
|
||||
var master *Master
|
||||
var masterCancelFunc context.CancelFunc
|
||||
|
||||
@ -49,7 +48,7 @@ func setup() {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
kvBase = etcdkv.NewEtcdKV(cli, rootPath)
|
||||
kvBase = kv.NewEtcdKV(cli, rootPath)
|
||||
tmpMt, err := NewMetaTable(kvBase)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -1,117 +0,0 @@
|
||||
package master
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
)
|
||||
|
||||
type SysConfig struct {
|
||||
kv *etcdkv.EtcdKV
|
||||
}
|
||||
|
||||
// Initialize Configs from config files, and store them in Etcd.
|
||||
func (conf *SysConfig) InitFromFile(filePath string) error {
|
||||
memConfigs, err := conf.getConfigFiles(filePath)
|
||||
if err != nil {
|
||||
return errors.Errorf("[Init SysConfig] %s\n", err.Error())
|
||||
}
|
||||
|
||||
for _, memConfig := range memConfigs {
|
||||
if err := conf.saveToEtcd(memConfig, "config"); err != nil {
|
||||
return errors.Errorf("[Init SysConfig] %s\n", err.Error())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conf *SysConfig) GetByPrefix(keyPrefix string) (keys []string, values []string, err error) {
|
||||
realPrefix := path.Join("config", strings.ToLower(keyPrefix))
|
||||
keys, values, err = conf.kv.LoadWithPrefix(realPrefix)
|
||||
for index := range keys {
|
||||
keys[index] = strings.Replace(keys[index], conf.kv.GetPath("config"), "", 1)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
log.Println("Loaded", len(keys), "pairs of configs with prefix", keyPrefix)
|
||||
return keys, values, err
|
||||
}
|
||||
|
||||
// Get specific configs for keys.
|
||||
func (conf *SysConfig) Get(keys []string) ([]string, error) {
|
||||
var keysToLoad []string
|
||||
for i := range keys {
|
||||
keysToLoad = append(keysToLoad, path.Join("config", strings.ToLower(keys[i])))
|
||||
}
|
||||
|
||||
values, err := conf.kv.MultiLoad(keysToLoad)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func (conf *SysConfig) getConfigFiles(filePath string) ([]*viper.Viper, error) {
|
||||
|
||||
var vipers []*viper.Viper
|
||||
err := filepath.Walk(filePath,
|
||||
func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// all names
|
||||
if !info.IsDir() && filepath.Ext(path) == ".yaml" {
|
||||
log.Println("Config files ", info.Name())
|
||||
|
||||
currentConf := viper.New()
|
||||
currentConf.SetConfigFile(path)
|
||||
if err := currentConf.ReadInConfig(); err != nil {
|
||||
log.Panic("Config file error: ", err)
|
||||
}
|
||||
vipers = append(vipers, currentConf)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(vipers) == 0 {
|
||||
return nil, errors.Errorf("There are no config files in the path `%s`.\n", filePath)
|
||||
}
|
||||
return vipers, nil
|
||||
}
|
||||
|
||||
func (conf *SysConfig) saveToEtcd(memConfig *viper.Viper, secondRootPath string) error {
|
||||
configMaps := map[string]string{}
|
||||
|
||||
allKeys := memConfig.AllKeys()
|
||||
for _, key := range allKeys {
|
||||
etcdKey := strings.ReplaceAll(key, ".", "/")
|
||||
|
||||
etcdKey = path.Join(secondRootPath, etcdKey)
|
||||
|
||||
val := memConfig.Get(key)
|
||||
if val == nil {
|
||||
configMaps[etcdKey] = ""
|
||||
continue
|
||||
}
|
||||
configMaps[etcdKey] = fmt.Sprintf("%v", val)
|
||||
}
|
||||
|
||||
if err := conf.kv.MultiSave(configMaps); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,209 +0,0 @@
|
||||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
)
|
||||
|
||||
func Test_SysConfig(t *testing.T) {
|
||||
Init()
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{Params.EtcdAddress},
|
||||
DialTimeout: 5 * time.Second,
|
||||
})
|
||||
require.Nil(t, err)
|
||||
_, err = cli.Delete(ctx, "/test/root", clientv3.WithPrefix())
|
||||
require.Nil(t, err)
|
||||
|
||||
rootPath := "/test/root"
|
||||
configKV := etcdkv.NewEtcdKV(cli, rootPath)
|
||||
defer configKV.Close()
|
||||
|
||||
sc := SysConfig{kv: configKV}
|
||||
require.Equal(t, rootPath, sc.kv.GetPath("."))
|
||||
|
||||
t.Run("tests on contig_test.yaml", func(t *testing.T) {
|
||||
err = sc.InitFromFile(".")
|
||||
require.Nil(t, err)
|
||||
|
||||
testKeys := []string{
|
||||
"/etcd/address",
|
||||
"/master/port",
|
||||
"/master/proxyidlist",
|
||||
"/master/segmentthresholdfactor",
|
||||
"/pulsar/token",
|
||||
"/reader/stopflag",
|
||||
"/proxy/timezone",
|
||||
"/proxy/network/address",
|
||||
"/proxy/storage/path",
|
||||
"/storage/accesskey",
|
||||
}
|
||||
|
||||
testVals := []string{
|
||||
"localhost",
|
||||
"53100",
|
||||
"[1 2]",
|
||||
"0.75",
|
||||
"eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJKb2UifQ.ipevRNuRP6HflG8cFKnmUPtypruRC4fb1DWtoLL62SY",
|
||||
"-1",
|
||||
"UTC+8",
|
||||
"0.0.0.0",
|
||||
"/var/lib/milvus",
|
||||
"",
|
||||
}
|
||||
|
||||
vals, err := sc.Get(testKeys)
|
||||
assert.Nil(t, err)
|
||||
for i := range testVals {
|
||||
assert.Equal(t, testVals[i], vals[i])
|
||||
}
|
||||
|
||||
keys, vals, err := sc.GetByPrefix("/master")
|
||||
assert.Nil(t, err)
|
||||
for i := range keys {
|
||||
assert.True(t, strings.HasPrefix(keys[i], "/master/"))
|
||||
}
|
||||
assert.Equal(t, len(keys), len(vals))
|
||||
assert.Equal(t, 21, len(keys))
|
||||
|
||||
// Test get all configs
|
||||
keys, vals, err = sc.GetByPrefix("/")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(keys), len(vals))
|
||||
assert.Equal(t, 73, len(vals))
|
||||
|
||||
// Test get configs with prefix not exist
|
||||
keys, vals, err = sc.GetByPrefix("/config")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(keys), len(vals))
|
||||
assert.Equal(t, 0, len(keys))
|
||||
assert.Equal(t, 0, len(vals))
|
||||
|
||||
_, _, err = sc.GetByPrefix("//././../../../../../..//../")
|
||||
assert.Nil(t, err)
|
||||
_, _, err = sc.GetByPrefix("/master/./address")
|
||||
assert.Nil(t, err)
|
||||
_, _, err = sc.GetByPrefix(".")
|
||||
assert.Nil(t, err)
|
||||
_, _, err = sc.GetByPrefix("\\")
|
||||
assert.Nil(t, err)
|
||||
|
||||
})
|
||||
|
||||
t.Run("getConfigFiles", func(t *testing.T) {
|
||||
filePath := "../../configs"
|
||||
vipers, err := sc.getConfigFiles(filePath)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, vipers[0])
|
||||
|
||||
filePath = "/path/not/exists"
|
||||
_, err = sc.getConfigFiles(filePath)
|
||||
assert.NotNil(t, err)
|
||||
log.Println(err)
|
||||
})
|
||||
|
||||
t.Run("Test saveToEtcd Normal", func(t *testing.T) {
|
||||
_, err = cli.Delete(ctx, "/test/root/config", clientv3.WithPrefix())
|
||||
require.Nil(t, err)
|
||||
|
||||
v := viper.New()
|
||||
|
||||
v.Set("a.suba1", "v1")
|
||||
v.Set("a.suba2", "v2")
|
||||
v.Set("a.suba3.subsuba1", "v3")
|
||||
v.Set("a.suba3.subsuba2", "v4")
|
||||
|
||||
secondRootPath := "config"
|
||||
err := sc.saveToEtcd(v, secondRootPath)
|
||||
assert.Nil(t, err)
|
||||
|
||||
value, err := sc.kv.Load(path.Join(secondRootPath, "a/suba1"))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "v1", value)
|
||||
value, err = sc.kv.Load(path.Join(secondRootPath, "a/suba2"))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "v2", value)
|
||||
value, err = sc.kv.Load(path.Join(secondRootPath, "a/suba3/subsuba1"))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "v3", value)
|
||||
value, err = sc.kv.Load(path.Join(secondRootPath, "a/suba3/subsuba2"))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "v4", value)
|
||||
|
||||
keys, values, err := sc.kv.LoadWithPrefix(path.Join(secondRootPath, "a"))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 4, len(keys))
|
||||
assert.Equal(t, 4, len(values))
|
||||
assert.ElementsMatch(t, []string{
|
||||
path.Join(sc.kv.GetPath(secondRootPath), "/a/suba1"),
|
||||
path.Join(sc.kv.GetPath(secondRootPath), "/a/suba2"),
|
||||
path.Join(sc.kv.GetPath(secondRootPath), "/a/suba3/subsuba1"),
|
||||
path.Join(sc.kv.GetPath(secondRootPath), "/a/suba3/subsuba2"),
|
||||
}, keys)
|
||||
assert.ElementsMatch(t, []string{"v1", "v2", "v3", "v4"}, values)
|
||||
|
||||
keys = []string{
|
||||
"/a/suba1",
|
||||
"/a/suba2",
|
||||
"/a/suba3/subsuba1",
|
||||
"/a/suba3/subsuba2",
|
||||
}
|
||||
values, err = sc.Get(keys)
|
||||
assert.Nil(t, err)
|
||||
assert.ElementsMatch(t, []string{"v1", "v2", "v3", "v4"}, values)
|
||||
|
||||
keysAfter, values, err := sc.GetByPrefix("/a")
|
||||
fmt.Println(keysAfter)
|
||||
assert.Nil(t, err)
|
||||
assert.ElementsMatch(t, []string{"v1", "v2", "v3", "v4"}, values)
|
||||
assert.ElementsMatch(t, keys, keysAfter)
|
||||
|
||||
})
|
||||
|
||||
t.Run("Test saveToEtcd Different value types", func(t *testing.T) {
|
||||
v := viper.New()
|
||||
|
||||
v.Set("string", "string")
|
||||
v.Set("number", 1)
|
||||
v.Set("nil", nil)
|
||||
v.Set("float", 1.2)
|
||||
v.Set("intslice", []int{100, 200})
|
||||
v.Set("stringslice", []string{"a", "b"})
|
||||
v.Set("stringmapstring", map[string]string{"k1": "1", "k2": "2"})
|
||||
|
||||
secondRootPath := "test_save_to_etcd_different_value_types"
|
||||
err := sc.saveToEtcd(v, secondRootPath)
|
||||
require.Nil(t, err)
|
||||
|
||||
keys, values, err := sc.kv.LoadWithPrefix(path.Join("/", secondRootPath))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 7, len(keys))
|
||||
assert.Equal(t, 7, len(values))
|
||||
|
||||
assert.ElementsMatch(t, []string{
|
||||
path.Join(sc.kv.GetPath(secondRootPath), "nil"),
|
||||
path.Join(sc.kv.GetPath(secondRootPath), "string"),
|
||||
path.Join(sc.kv.GetPath(secondRootPath), "number"),
|
||||
path.Join(sc.kv.GetPath(secondRootPath), "float"),
|
||||
path.Join(sc.kv.GetPath(secondRootPath), "intslice"),
|
||||
path.Join(sc.kv.GetPath(secondRootPath), "stringslice"),
|
||||
path.Join(sc.kv.GetPath(secondRootPath), "stringmapstring"),
|
||||
}, keys)
|
||||
assert.ElementsMatch(t, []string{"", "string", "1", "1.2", "[100 200]", "[a b]", "map[k1:1 k2:2]"}, values)
|
||||
})
|
||||
}
|
@ -81,18 +81,13 @@ func receiveMsg(stream *ms.MsgStream) []uint64 {
|
||||
func TestStream_PulsarMsgStream_TimeTick(t *testing.T) {
|
||||
Init()
|
||||
pulsarAddress := Params.PulsarAddress
|
||||
|
||||
producerChannels := []string{"proxyTtBarrier"}
|
||||
consumerChannels := []string{"proxyTtBarrier"}
|
||||
consumerSubName := "proxyTtBarrier"
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
producerChannels := []string{"proxyDMTtBarrier"}
|
||||
consumerChannels := []string{"proxyDMTtBarrier"}
|
||||
consumerSubName := "proxyDMTtBarrier"
|
||||
proxyDMTtInputStream, proxyDMTtOutputStream := initTestPulsarStream(ctx, pulsarAddress, producerChannels, consumerChannels, consumerSubName)
|
||||
|
||||
producerChannels = []string{"proxyDDTtBarrier"}
|
||||
consumerChannels = []string{"proxyDDTtBarrier"}
|
||||
consumerSubName = "proxyDDTtBarrier"
|
||||
proxyDDTtInputStream, proxyDDTtOutputStream := initTestPulsarStream(ctx, pulsarAddress, producerChannels, consumerChannels, consumerSubName)
|
||||
proxyTtInputStream, proxyTtOutputStream := initTestPulsarStream(ctx, pulsarAddress, producerChannels, consumerChannels, consumerSubName)
|
||||
|
||||
producerChannels = []string{"writeNodeBarrier"}
|
||||
consumerChannels = []string{"writeNodeBarrier"}
|
||||
@ -102,20 +97,14 @@ func TestStream_PulsarMsgStream_TimeTick(t *testing.T) {
|
||||
timeSyncProducer, _ := NewTimeSyncMsgProducer(ctx)
|
||||
timeSyncProducer.SetProxyTtBarrier(&TestTickBarrier{ctx: ctx})
|
||||
timeSyncProducer.SetWriteNodeTtBarrier(&TestTickBarrier{ctx: ctx})
|
||||
timeSyncProducer.SetDMSyncStream(*proxyDMTtInputStream)
|
||||
timeSyncProducer.SetDDSyncStream(*proxyDDTtInputStream)
|
||||
timeSyncProducer.SetDMSyncStream(*proxyTtInputStream)
|
||||
timeSyncProducer.SetK2sSyncStream(*writeNodeInputStream)
|
||||
(*proxyDMTtOutputStream).Start()
|
||||
(*proxyDDTtOutputStream).Start()
|
||||
(*proxyTtOutputStream).Start()
|
||||
(*writeNodeOutputStream).Start()
|
||||
timeSyncProducer.Start()
|
||||
expected := []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
|
||||
result1 := receiveMsg(proxyDMTtOutputStream)
|
||||
result1 := receiveMsg(proxyTtOutputStream)
|
||||
assert.Equal(t, expected, result1)
|
||||
expected = []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
|
||||
result1 = receiveMsg(proxyDDTtOutputStream)
|
||||
assert.Equal(t, expected, result1)
|
||||
expected = []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
|
||||
result2 := receiveMsg(writeNodeOutputStream)
|
||||
assert.Equal(t, expected, result2)
|
||||
|
||||
|
@ -15,8 +15,7 @@ type timeSyncMsgProducer struct {
|
||||
//hardTimeTickBarrier
|
||||
writeNodeTtBarrier TimeTickBarrier
|
||||
|
||||
ddSyncStream ms.MsgStream // insert & delete
|
||||
dmSyncStream ms.MsgStream
|
||||
dmSyncStream ms.MsgStream // insert & delete
|
||||
k2sSyncStream ms.MsgStream
|
||||
|
||||
ctx context.Context
|
||||
@ -35,9 +34,6 @@ func (syncMsgProducer *timeSyncMsgProducer) SetProxyTtBarrier(proxyTtBarrier Tim
|
||||
func (syncMsgProducer *timeSyncMsgProducer) SetWriteNodeTtBarrier(writeNodeTtBarrier TimeTickBarrier) {
|
||||
syncMsgProducer.writeNodeTtBarrier = writeNodeTtBarrier
|
||||
}
|
||||
func (syncMsgProducer *timeSyncMsgProducer) SetDDSyncStream(ddSync ms.MsgStream) {
|
||||
syncMsgProducer.ddSyncStream = ddSync
|
||||
}
|
||||
|
||||
func (syncMsgProducer *timeSyncMsgProducer) SetDMSyncStream(dmSync ms.MsgStream) {
|
||||
syncMsgProducer.dmSyncStream = dmSync
|
||||
@ -47,7 +43,7 @@ func (syncMsgProducer *timeSyncMsgProducer) SetK2sSyncStream(k2sSync ms.MsgStrea
|
||||
syncMsgProducer.k2sSyncStream = k2sSync
|
||||
}
|
||||
|
||||
func (syncMsgProducer *timeSyncMsgProducer) broadcastMsg(barrier TimeTickBarrier, streams []ms.MsgStream) error {
|
||||
func (syncMsgProducer *timeSyncMsgProducer) broadcastMsg(barrier TimeTickBarrier, stream ms.MsgStream) error {
|
||||
for {
|
||||
select {
|
||||
case <-syncMsgProducer.ctx.Done():
|
||||
@ -76,9 +72,7 @@ func (syncMsgProducer *timeSyncMsgProducer) broadcastMsg(barrier TimeTickBarrier
|
||||
TimeTickMsg: timeTickResult,
|
||||
}
|
||||
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
|
||||
for _, stream := range streams {
|
||||
err = stream.Broadcast(&msgPack)
|
||||
}
|
||||
err = stream.Broadcast(&msgPack)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -97,17 +91,16 @@ func (syncMsgProducer *timeSyncMsgProducer) Start() error {
|
||||
return err
|
||||
}
|
||||
|
||||
go syncMsgProducer.broadcastMsg(syncMsgProducer.proxyTtBarrier, []ms.MsgStream{syncMsgProducer.dmSyncStream, syncMsgProducer.ddSyncStream})
|
||||
go syncMsgProducer.broadcastMsg(syncMsgProducer.writeNodeTtBarrier, []ms.MsgStream{syncMsgProducer.k2sSyncStream})
|
||||
go syncMsgProducer.broadcastMsg(syncMsgProducer.proxyTtBarrier, syncMsgProducer.dmSyncStream)
|
||||
go syncMsgProducer.broadcastMsg(syncMsgProducer.writeNodeTtBarrier, syncMsgProducer.k2sSyncStream)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (syncMsgProducer *timeSyncMsgProducer) Close() {
|
||||
syncMsgProducer.ddSyncStream.Close()
|
||||
syncMsgProducer.proxyTtBarrier.Close()
|
||||
syncMsgProducer.writeNodeTtBarrier.Close()
|
||||
syncMsgProducer.dmSyncStream.Close()
|
||||
syncMsgProducer.k2sSyncStream.Close()
|
||||
syncMsgProducer.cancel()
|
||||
syncMsgProducer.proxyTtBarrier.Close()
|
||||
syncMsgProducer.writeNodeTtBarrier.Close()
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ type atomicObject struct {
|
||||
// timestampOracle is used to maintain the logic of tso.
|
||||
type timestampOracle struct {
|
||||
key string
|
||||
kvBase kv.TxnBase
|
||||
kvBase kv.Base
|
||||
|
||||
// TODO: remove saveInterval
|
||||
saveInterval time.Duration
|
||||
|
@ -1,593 +0,0 @@
|
||||
package msgstream
|
||||
|
||||
import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
||||
)
|
||||
|
||||
type MsgType = internalPb.MsgType
|
||||
|
||||
type TsMsg interface {
|
||||
BeginTs() Timestamp
|
||||
EndTs() Timestamp
|
||||
Type() MsgType
|
||||
HashKeys() []uint32
|
||||
Marshal(TsMsg) ([]byte, error)
|
||||
Unmarshal([]byte) (TsMsg, error)
|
||||
}
|
||||
|
||||
type BaseMsg struct {
|
||||
BeginTimestamp Timestamp
|
||||
EndTimestamp Timestamp
|
||||
HashValues []uint32
|
||||
}
|
||||
|
||||
func (bm *BaseMsg) BeginTs() Timestamp {
|
||||
return bm.BeginTimestamp
|
||||
}
|
||||
|
||||
func (bm *BaseMsg) EndTs() Timestamp {
|
||||
return bm.EndTimestamp
|
||||
}
|
||||
|
||||
func (bm *BaseMsg) HashKeys() []uint32 {
|
||||
return bm.HashValues
|
||||
}
|
||||
|
||||
/////////////////////////////////////////Insert//////////////////////////////////////////
|
||||
type InsertMsg struct {
|
||||
BaseMsg
|
||||
internalPb.InsertRequest
|
||||
}
|
||||
|
||||
func (it *InsertMsg) Type() MsgType {
|
||||
return it.MsgType
|
||||
}
|
||||
|
||||
func (it *InsertMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
insertMsg := input.(*InsertMsg)
|
||||
insertRequest := &insertMsg.InsertRequest
|
||||
mb, err := proto.Marshal(insertRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mb, nil
|
||||
}
|
||||
|
||||
func (it *InsertMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
insertRequest := internalPb.InsertRequest{}
|
||||
err := proto.Unmarshal(input, &insertRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
insertMsg := &InsertMsg{InsertRequest: insertRequest}
|
||||
for _, timestamp := range insertMsg.Timestamps {
|
||||
insertMsg.BeginTimestamp = timestamp
|
||||
insertMsg.EndTimestamp = timestamp
|
||||
break
|
||||
}
|
||||
for _, timestamp := range insertMsg.Timestamps {
|
||||
if timestamp > insertMsg.EndTimestamp {
|
||||
insertMsg.EndTimestamp = timestamp
|
||||
}
|
||||
if timestamp < insertMsg.BeginTimestamp {
|
||||
insertMsg.BeginTimestamp = timestamp
|
||||
}
|
||||
}
|
||||
|
||||
return insertMsg, nil
|
||||
}
|
||||
|
||||
/////////////////////////////////////////Delete//////////////////////////////////////////
|
||||
type DeleteMsg struct {
|
||||
BaseMsg
|
||||
internalPb.DeleteRequest
|
||||
}
|
||||
|
||||
func (dt *DeleteMsg) Type() MsgType {
|
||||
return dt.MsgType
|
||||
}
|
||||
|
||||
func (dt *DeleteMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
deleteTask := input.(*DeleteMsg)
|
||||
deleteRequest := &deleteTask.DeleteRequest
|
||||
mb, err := proto.Marshal(deleteRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mb, nil
|
||||
}
|
||||
|
||||
func (dt *DeleteMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
deleteRequest := internalPb.DeleteRequest{}
|
||||
err := proto.Unmarshal(input, &deleteRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
deleteMsg := &DeleteMsg{DeleteRequest: deleteRequest}
|
||||
for _, timestamp := range deleteMsg.Timestamps {
|
||||
deleteMsg.BeginTimestamp = timestamp
|
||||
deleteMsg.EndTimestamp = timestamp
|
||||
break
|
||||
}
|
||||
for _, timestamp := range deleteMsg.Timestamps {
|
||||
if timestamp > deleteMsg.EndTimestamp {
|
||||
deleteMsg.EndTimestamp = timestamp
|
||||
}
|
||||
if timestamp < deleteMsg.BeginTimestamp {
|
||||
deleteMsg.BeginTimestamp = timestamp
|
||||
}
|
||||
}
|
||||
|
||||
return deleteMsg, nil
|
||||
}
|
||||
|
||||
/////////////////////////////////////////Search//////////////////////////////////////////
|
||||
type SearchMsg struct {
|
||||
BaseMsg
|
||||
internalPb.SearchRequest
|
||||
}
|
||||
|
||||
func (st *SearchMsg) Type() MsgType {
|
||||
return st.MsgType
|
||||
}
|
||||
|
||||
func (st *SearchMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
searchTask := input.(*SearchMsg)
|
||||
searchRequest := &searchTask.SearchRequest
|
||||
mb, err := proto.Marshal(searchRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mb, nil
|
||||
}
|
||||
|
||||
func (st *SearchMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
searchRequest := internalPb.SearchRequest{}
|
||||
err := proto.Unmarshal(input, &searchRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
searchMsg := &SearchMsg{SearchRequest: searchRequest}
|
||||
searchMsg.BeginTimestamp = searchMsg.Timestamp
|
||||
searchMsg.EndTimestamp = searchMsg.Timestamp
|
||||
|
||||
return searchMsg, nil
|
||||
}
|
||||
|
||||
/////////////////////////////////////////SearchResult//////////////////////////////////////////
|
||||
type SearchResultMsg struct {
|
||||
BaseMsg
|
||||
internalPb.SearchResult
|
||||
}
|
||||
|
||||
func (srt *SearchResultMsg) Type() MsgType {
|
||||
return srt.MsgType
|
||||
}
|
||||
|
||||
func (srt *SearchResultMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
searchResultTask := input.(*SearchResultMsg)
|
||||
searchResultRequest := &searchResultTask.SearchResult
|
||||
mb, err := proto.Marshal(searchResultRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mb, nil
|
||||
}
|
||||
|
||||
func (srt *SearchResultMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
searchResultRequest := internalPb.SearchResult{}
|
||||
err := proto.Unmarshal(input, &searchResultRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
searchResultMsg := &SearchResultMsg{SearchResult: searchResultRequest}
|
||||
searchResultMsg.BeginTimestamp = searchResultMsg.Timestamp
|
||||
searchResultMsg.EndTimestamp = searchResultMsg.Timestamp
|
||||
|
||||
return searchResultMsg, nil
|
||||
}
|
||||
|
||||
/////////////////////////////////////////TimeTick//////////////////////////////////////////
|
||||
type TimeTickMsg struct {
|
||||
BaseMsg
|
||||
internalPb.TimeTickMsg
|
||||
}
|
||||
|
||||
func (tst *TimeTickMsg) Type() MsgType {
|
||||
return tst.MsgType
|
||||
}
|
||||
|
||||
func (tst *TimeTickMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
timeTickTask := input.(*TimeTickMsg)
|
||||
timeTick := &timeTickTask.TimeTickMsg
|
||||
mb, err := proto.Marshal(timeTick)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mb, nil
|
||||
}
|
||||
|
||||
func (tst *TimeTickMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
timeTickMsg := internalPb.TimeTickMsg{}
|
||||
err := proto.Unmarshal(input, &timeTickMsg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
timeTick := &TimeTickMsg{TimeTickMsg: timeTickMsg}
|
||||
timeTick.BeginTimestamp = timeTick.Timestamp
|
||||
timeTick.EndTimestamp = timeTick.Timestamp
|
||||
|
||||
return timeTick, nil
|
||||
}
|
||||
|
||||
/////////////////////////////////////////QueryNodeSegStats//////////////////////////////////////////
|
||||
type QueryNodeSegStatsMsg struct {
|
||||
BaseMsg
|
||||
internalPb.QueryNodeSegStats
|
||||
}
|
||||
|
||||
func (qs *QueryNodeSegStatsMsg) Type() MsgType {
|
||||
return qs.MsgType
|
||||
}
|
||||
|
||||
func (qs *QueryNodeSegStatsMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
queryNodeSegStatsTask := input.(*QueryNodeSegStatsMsg)
|
||||
queryNodeSegStats := &queryNodeSegStatsTask.QueryNodeSegStats
|
||||
mb, err := proto.Marshal(queryNodeSegStats)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mb, nil
|
||||
}
|
||||
|
||||
func (qs *QueryNodeSegStatsMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
queryNodeSegStats := internalPb.QueryNodeSegStats{}
|
||||
err := proto.Unmarshal(input, &queryNodeSegStats)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
queryNodeSegStatsMsg := &QueryNodeSegStatsMsg{QueryNodeSegStats: queryNodeSegStats}
|
||||
|
||||
return queryNodeSegStatsMsg, nil
|
||||
}
|
||||
|
||||
///////////////////////////////////////////Key2Seg//////////////////////////////////////////
|
||||
//type Key2SegMsg struct {
|
||||
// BaseMsg
|
||||
// internalPb.Key2SegMsg
|
||||
//}
|
||||
//
|
||||
//func (k2st *Key2SegMsg) Type() MsgType {
|
||||
// return
|
||||
//}
|
||||
|
||||
/////////////////////////////////////////CreateCollection//////////////////////////////////////////
|
||||
type CreateCollectionMsg struct {
|
||||
BaseMsg
|
||||
internalPb.CreateCollectionRequest
|
||||
}
|
||||
|
||||
func (cc *CreateCollectionMsg) Type() MsgType {
|
||||
return cc.MsgType
|
||||
}
|
||||
|
||||
func (cc *CreateCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
createCollectionMsg := input.(*CreateCollectionMsg)
|
||||
createCollectionRequest := &createCollectionMsg.CreateCollectionRequest
|
||||
mb, err := proto.Marshal(createCollectionRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mb, nil
|
||||
}
|
||||
|
||||
func (cc *CreateCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
createCollectionRequest := internalPb.CreateCollectionRequest{}
|
||||
err := proto.Unmarshal(input, &createCollectionRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
createCollectionMsg := &CreateCollectionMsg{CreateCollectionRequest: createCollectionRequest}
|
||||
createCollectionMsg.BeginTimestamp = createCollectionMsg.Timestamp
|
||||
createCollectionMsg.EndTimestamp = createCollectionMsg.Timestamp
|
||||
|
||||
return createCollectionMsg, nil
|
||||
}
|
||||
|
||||
/////////////////////////////////////////DropCollection//////////////////////////////////////////
|
||||
type DropCollectionMsg struct {
|
||||
BaseMsg
|
||||
internalPb.DropCollectionRequest
|
||||
}
|
||||
|
||||
func (dc *DropCollectionMsg) Type() MsgType {
|
||||
return dc.MsgType
|
||||
}
|
||||
|
||||
func (dc *DropCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
dropCollectionMsg := input.(*DropCollectionMsg)
|
||||
dropCollectionRequest := &dropCollectionMsg.DropCollectionRequest
|
||||
mb, err := proto.Marshal(dropCollectionRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mb, nil
|
||||
}
|
||||
|
||||
func (dc *DropCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
dropCollectionRequest := internalPb.DropCollectionRequest{}
|
||||
err := proto.Unmarshal(input, &dropCollectionRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dropCollectionMsg := &DropCollectionMsg{DropCollectionRequest: dropCollectionRequest}
|
||||
dropCollectionMsg.BeginTimestamp = dropCollectionMsg.Timestamp
|
||||
dropCollectionMsg.EndTimestamp = dropCollectionMsg.Timestamp
|
||||
|
||||
return dropCollectionMsg, nil
|
||||
}
|
||||
|
||||
/////////////////////////////////////////HasCollection//////////////////////////////////////////
|
||||
type HasCollectionMsg struct {
|
||||
BaseMsg
|
||||
internalPb.HasCollectionRequest
|
||||
}
|
||||
|
||||
func (hc *HasCollectionMsg) Type() MsgType {
|
||||
return hc.MsgType
|
||||
}
|
||||
|
||||
func (hc *HasCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
hasCollectionMsg := input.(*HasCollectionMsg)
|
||||
hasCollectionRequest := &hasCollectionMsg.HasCollectionRequest
|
||||
mb, err := proto.Marshal(hasCollectionRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mb, nil
|
||||
}
|
||||
|
||||
func (hc *HasCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
hasCollectionRequest := internalPb.HasCollectionRequest{}
|
||||
err := proto.Unmarshal(input, &hasCollectionRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hasCollectionMsg := &HasCollectionMsg{HasCollectionRequest: hasCollectionRequest}
|
||||
hasCollectionMsg.BeginTimestamp = hasCollectionMsg.Timestamp
|
||||
hasCollectionMsg.EndTimestamp = hasCollectionMsg.Timestamp
|
||||
|
||||
return hasCollectionMsg, nil
|
||||
}
|
||||
|
||||
/////////////////////////////////////////DescribeCollection//////////////////////////////////////////
|
||||
type DescribeCollectionMsg struct {
|
||||
BaseMsg
|
||||
internalPb.DescribeCollectionRequest
|
||||
}
|
||||
|
||||
func (dc *DescribeCollectionMsg) Type() MsgType {
|
||||
return dc.MsgType
|
||||
}
|
||||
|
||||
func (dc *DescribeCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
describeCollectionMsg := input.(*DescribeCollectionMsg)
|
||||
describeCollectionRequest := &describeCollectionMsg.DescribeCollectionRequest
|
||||
mb, err := proto.Marshal(describeCollectionRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mb, nil
|
||||
}
|
||||
|
||||
func (dc *DescribeCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
describeCollectionRequest := internalPb.DescribeCollectionRequest{}
|
||||
err := proto.Unmarshal(input, &describeCollectionRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
describeCollectionMsg := &DescribeCollectionMsg{DescribeCollectionRequest: describeCollectionRequest}
|
||||
describeCollectionMsg.BeginTimestamp = describeCollectionMsg.Timestamp
|
||||
describeCollectionMsg.EndTimestamp = describeCollectionMsg.Timestamp
|
||||
|
||||
return describeCollectionMsg, nil
|
||||
}
|
||||
|
||||
/////////////////////////////////////////ShowCollection//////////////////////////////////////////
|
||||
type ShowCollectionMsg struct {
|
||||
BaseMsg
|
||||
internalPb.ShowCollectionRequest
|
||||
}
|
||||
|
||||
func (sc *ShowCollectionMsg) Type() MsgType {
|
||||
return sc.MsgType
|
||||
}
|
||||
|
||||
func (sc *ShowCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
showCollectionMsg := input.(*ShowCollectionMsg)
|
||||
showCollectionRequest := &showCollectionMsg.ShowCollectionRequest
|
||||
mb, err := proto.Marshal(showCollectionRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mb, nil
|
||||
}
|
||||
|
||||
func (sc *ShowCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
showCollectionRequest := internalPb.ShowCollectionRequest{}
|
||||
err := proto.Unmarshal(input, &showCollectionRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
showCollectionMsg := &ShowCollectionMsg{ShowCollectionRequest: showCollectionRequest}
|
||||
showCollectionMsg.BeginTimestamp = showCollectionMsg.Timestamp
|
||||
showCollectionMsg.EndTimestamp = showCollectionMsg.Timestamp
|
||||
|
||||
return showCollectionMsg, nil
|
||||
}
|
||||
|
||||
/////////////////////////////////////////CreatePartition//////////////////////////////////////////
|
||||
type CreatePartitionMsg struct {
|
||||
BaseMsg
|
||||
internalPb.CreatePartitionRequest
|
||||
}
|
||||
|
||||
func (cc *CreatePartitionMsg) Type() MsgType {
|
||||
return cc.MsgType
|
||||
}
|
||||
|
||||
func (cc *CreatePartitionMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
createPartitionMsg := input.(*CreatePartitionMsg)
|
||||
createPartitionRequest := &createPartitionMsg.CreatePartitionRequest
|
||||
mb, err := proto.Marshal(createPartitionRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mb, nil
|
||||
}
|
||||
|
||||
func (cc *CreatePartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
createPartitionRequest := internalPb.CreatePartitionRequest{}
|
||||
err := proto.Unmarshal(input, &createPartitionRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
createPartitionMsg := &CreatePartitionMsg{CreatePartitionRequest: createPartitionRequest}
|
||||
createPartitionMsg.BeginTimestamp = createPartitionMsg.Timestamp
|
||||
createPartitionMsg.EndTimestamp = createPartitionMsg.Timestamp
|
||||
|
||||
return createPartitionMsg, nil
|
||||
}
|
||||
|
||||
/////////////////////////////////////////DropPartition//////////////////////////////////////////
|
||||
type DropPartitionMsg struct {
|
||||
BaseMsg
|
||||
internalPb.DropPartitionRequest
|
||||
}
|
||||
|
||||
func (dc *DropPartitionMsg) Type() MsgType {
|
||||
return dc.MsgType
|
||||
}
|
||||
|
||||
func (dc *DropPartitionMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
dropPartitionMsg := input.(*DropPartitionMsg)
|
||||
dropPartitionRequest := &dropPartitionMsg.DropPartitionRequest
|
||||
mb, err := proto.Marshal(dropPartitionRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mb, nil
|
||||
}
|
||||
|
||||
func (dc *DropPartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
dropPartitionRequest := internalPb.DropPartitionRequest{}
|
||||
err := proto.Unmarshal(input, &dropPartitionRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dropPartitionMsg := &DropPartitionMsg{DropPartitionRequest: dropPartitionRequest}
|
||||
dropPartitionMsg.BeginTimestamp = dropPartitionMsg.Timestamp
|
||||
dropPartitionMsg.EndTimestamp = dropPartitionMsg.Timestamp
|
||||
|
||||
return dropPartitionMsg, nil
|
||||
}
|
||||
|
||||
/////////////////////////////////////////HasPartition//////////////////////////////////////////
|
||||
type HasPartitionMsg struct {
|
||||
BaseMsg
|
||||
internalPb.HasPartitionRequest
|
||||
}
|
||||
|
||||
func (hc *HasPartitionMsg) Type() MsgType {
|
||||
return hc.MsgType
|
||||
}
|
||||
|
||||
func (hc *HasPartitionMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
hasPartitionMsg := input.(*HasPartitionMsg)
|
||||
hasPartitionRequest := &hasPartitionMsg.HasPartitionRequest
|
||||
mb, err := proto.Marshal(hasPartitionRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mb, nil
|
||||
}
|
||||
|
||||
func (hc *HasPartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
hasPartitionRequest := internalPb.HasPartitionRequest{}
|
||||
err := proto.Unmarshal(input, &hasPartitionRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hasPartitionMsg := &HasPartitionMsg{HasPartitionRequest: hasPartitionRequest}
|
||||
hasPartitionMsg.BeginTimestamp = hasPartitionMsg.Timestamp
|
||||
hasPartitionMsg.EndTimestamp = hasPartitionMsg.Timestamp
|
||||
|
||||
return hasPartitionMsg, nil
|
||||
}
|
||||
|
||||
/////////////////////////////////////////DescribePartition//////////////////////////////////////////
|
||||
type DescribePartitionMsg struct {
|
||||
BaseMsg
|
||||
internalPb.DescribePartitionRequest
|
||||
}
|
||||
|
||||
func (dc *DescribePartitionMsg) Type() MsgType {
|
||||
return dc.MsgType
|
||||
}
|
||||
|
||||
func (dc *DescribePartitionMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
describePartitionMsg := input.(*DescribePartitionMsg)
|
||||
describePartitionRequest := &describePartitionMsg.DescribePartitionRequest
|
||||
mb, err := proto.Marshal(describePartitionRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mb, nil
|
||||
}
|
||||
|
||||
func (dc *DescribePartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
describePartitionRequest := internalPb.DescribePartitionRequest{}
|
||||
err := proto.Unmarshal(input, &describePartitionRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
describePartitionMsg := &DescribePartitionMsg{DescribePartitionRequest: describePartitionRequest}
|
||||
describePartitionMsg.BeginTimestamp = describePartitionMsg.Timestamp
|
||||
describePartitionMsg.EndTimestamp = describePartitionMsg.Timestamp
|
||||
|
||||
return describePartitionMsg, nil
|
||||
}
|
||||
|
||||
/////////////////////////////////////////ShowPartition//////////////////////////////////////////
|
||||
type ShowPartitionMsg struct {
|
||||
BaseMsg
|
||||
internalPb.ShowPartitionRequest
|
||||
}
|
||||
|
||||
func (sc *ShowPartitionMsg) Type() MsgType {
|
||||
return sc.MsgType
|
||||
}
|
||||
|
||||
func (sc *ShowPartitionMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
showPartitionMsg := input.(*ShowPartitionMsg)
|
||||
showPartitionRequest := &showPartitionMsg.ShowPartitionRequest
|
||||
mb, err := proto.Marshal(showPartitionRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mb, nil
|
||||
}
|
||||
|
||||
func (sc *ShowPartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
showPartitionRequest := internalPb.ShowPartitionRequest{}
|
||||
err := proto.Unmarshal(input, &showPartitionRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
showPartitionMsg := &ShowPartitionMsg{ShowPartitionRequest: showPartitionRequest}
|
||||
showPartitionMsg.BeginTimestamp = showPartitionMsg.Timestamp
|
||||
showPartitionMsg.EndTimestamp = showPartitionMsg.Timestamp
|
||||
|
||||
return showPartitionMsg, nil
|
||||
}
|
263
internal/msgstream/task.go
Normal file
263
internal/msgstream/task.go
Normal file
@ -0,0 +1,263 @@
|
||||
package msgstream
|
||||
|
||||
import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
||||
)
|
||||
|
||||
type MsgType = internalPb.MsgType
|
||||
|
||||
type TsMsg interface {
|
||||
BeginTs() Timestamp
|
||||
EndTs() Timestamp
|
||||
Type() MsgType
|
||||
HashKeys() []uint32
|
||||
Marshal(TsMsg) ([]byte, error)
|
||||
Unmarshal([]byte) (TsMsg, error)
|
||||
}
|
||||
|
||||
type BaseMsg struct {
|
||||
BeginTimestamp Timestamp
|
||||
EndTimestamp Timestamp
|
||||
HashValues []uint32
|
||||
}
|
||||
|
||||
func (bm *BaseMsg) BeginTs() Timestamp {
|
||||
return bm.BeginTimestamp
|
||||
}
|
||||
|
||||
func (bm *BaseMsg) EndTs() Timestamp {
|
||||
return bm.EndTimestamp
|
||||
}
|
||||
|
||||
func (bm *BaseMsg) HashKeys() []uint32 {
|
||||
return bm.HashValues
|
||||
}
|
||||
|
||||
/////////////////////////////////////////Insert//////////////////////////////////////////
|
||||
type InsertMsg struct {
|
||||
BaseMsg
|
||||
internalPb.InsertRequest
|
||||
}
|
||||
|
||||
func (it *InsertMsg) Type() MsgType {
|
||||
return it.MsgType
|
||||
}
|
||||
|
||||
func (it *InsertMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
insertMsg := input.(*InsertMsg)
|
||||
insertRequest := &insertMsg.InsertRequest
|
||||
mb, err := proto.Marshal(insertRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mb, nil
|
||||
}
|
||||
|
||||
func (it *InsertMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
insertRequest := internalPb.InsertRequest{}
|
||||
err := proto.Unmarshal(input, &insertRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
insertMsg := &InsertMsg{InsertRequest: insertRequest}
|
||||
for _, timestamp := range insertMsg.Timestamps {
|
||||
insertMsg.BeginTimestamp = timestamp
|
||||
insertMsg.EndTimestamp = timestamp
|
||||
break
|
||||
}
|
||||
for _, timestamp := range insertMsg.Timestamps {
|
||||
if timestamp > insertMsg.EndTimestamp {
|
||||
insertMsg.EndTimestamp = timestamp
|
||||
}
|
||||
if timestamp < insertMsg.BeginTimestamp {
|
||||
insertMsg.BeginTimestamp = timestamp
|
||||
}
|
||||
}
|
||||
|
||||
return insertMsg, nil
|
||||
}
|
||||
|
||||
/////////////////////////////////////////Delete//////////////////////////////////////////
|
||||
type DeleteMsg struct {
|
||||
BaseMsg
|
||||
internalPb.DeleteRequest
|
||||
}
|
||||
|
||||
func (dt *DeleteMsg) Type() MsgType {
|
||||
return dt.MsgType
|
||||
}
|
||||
|
||||
func (dt *DeleteMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
deleteTask := input.(*DeleteMsg)
|
||||
deleteRequest := &deleteTask.DeleteRequest
|
||||
mb, err := proto.Marshal(deleteRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mb, nil
|
||||
}
|
||||
|
||||
func (dt *DeleteMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
deleteRequest := internalPb.DeleteRequest{}
|
||||
err := proto.Unmarshal(input, &deleteRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
deleteMsg := &DeleteMsg{DeleteRequest: deleteRequest}
|
||||
for _, timestamp := range deleteMsg.Timestamps {
|
||||
deleteMsg.BeginTimestamp = timestamp
|
||||
deleteMsg.EndTimestamp = timestamp
|
||||
break
|
||||
}
|
||||
for _, timestamp := range deleteMsg.Timestamps {
|
||||
if timestamp > deleteMsg.EndTimestamp {
|
||||
deleteMsg.EndTimestamp = timestamp
|
||||
}
|
||||
if timestamp < deleteMsg.BeginTimestamp {
|
||||
deleteMsg.BeginTimestamp = timestamp
|
||||
}
|
||||
}
|
||||
|
||||
return deleteMsg, nil
|
||||
}
|
||||
|
||||
/////////////////////////////////////////Search//////////////////////////////////////////
|
||||
type SearchMsg struct {
|
||||
BaseMsg
|
||||
internalPb.SearchRequest
|
||||
}
|
||||
|
||||
func (st *SearchMsg) Type() MsgType {
|
||||
return st.MsgType
|
||||
}
|
||||
|
||||
func (st *SearchMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
searchTask := input.(*SearchMsg)
|
||||
searchRequest := &searchTask.SearchRequest
|
||||
mb, err := proto.Marshal(searchRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mb, nil
|
||||
}
|
||||
|
||||
func (st *SearchMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
searchRequest := internalPb.SearchRequest{}
|
||||
err := proto.Unmarshal(input, &searchRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
searchMsg := &SearchMsg{SearchRequest: searchRequest}
|
||||
searchMsg.BeginTimestamp = searchMsg.Timestamp
|
||||
searchMsg.EndTimestamp = searchMsg.Timestamp
|
||||
|
||||
return searchMsg, nil
|
||||
}
|
||||
|
||||
/////////////////////////////////////////SearchResult//////////////////////////////////////////
|
||||
type SearchResultMsg struct {
|
||||
BaseMsg
|
||||
internalPb.SearchResult
|
||||
}
|
||||
|
||||
func (srt *SearchResultMsg) Type() MsgType {
|
||||
return srt.MsgType
|
||||
}
|
||||
|
||||
func (srt *SearchResultMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
searchResultTask := input.(*SearchResultMsg)
|
||||
searchResultRequest := &searchResultTask.SearchResult
|
||||
mb, err := proto.Marshal(searchResultRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mb, nil
|
||||
}
|
||||
|
||||
func (srt *SearchResultMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
searchResultRequest := internalPb.SearchResult{}
|
||||
err := proto.Unmarshal(input, &searchResultRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
searchResultMsg := &SearchResultMsg{SearchResult: searchResultRequest}
|
||||
searchResultMsg.BeginTimestamp = searchResultMsg.Timestamp
|
||||
searchResultMsg.EndTimestamp = searchResultMsg.Timestamp
|
||||
|
||||
return searchResultMsg, nil
|
||||
}
|
||||
|
||||
/////////////////////////////////////////TimeTick//////////////////////////////////////////
|
||||
type TimeTickMsg struct {
|
||||
BaseMsg
|
||||
internalPb.TimeTickMsg
|
||||
}
|
||||
|
||||
func (tst *TimeTickMsg) Type() MsgType {
|
||||
return tst.MsgType
|
||||
}
|
||||
|
||||
func (tst *TimeTickMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
timeTickTask := input.(*TimeTickMsg)
|
||||
timeTick := &timeTickTask.TimeTickMsg
|
||||
mb, err := proto.Marshal(timeTick)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mb, nil
|
||||
}
|
||||
|
||||
func (tst *TimeTickMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
timeTickMsg := internalPb.TimeTickMsg{}
|
||||
err := proto.Unmarshal(input, &timeTickMsg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
timeTick := &TimeTickMsg{TimeTickMsg: timeTickMsg}
|
||||
timeTick.BeginTimestamp = timeTick.Timestamp
|
||||
timeTick.EndTimestamp = timeTick.Timestamp
|
||||
|
||||
return timeTick, nil
|
||||
}
|
||||
|
||||
/////////////////////////////////////////QueryNodeSegStats//////////////////////////////////////////
|
||||
type QueryNodeSegStatsMsg struct {
|
||||
BaseMsg
|
||||
internalPb.QueryNodeSegStats
|
||||
}
|
||||
|
||||
func (qs *QueryNodeSegStatsMsg) Type() MsgType {
|
||||
return qs.MsgType
|
||||
}
|
||||
|
||||
func (qs *QueryNodeSegStatsMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
queryNodeSegStatsTask := input.(*QueryNodeSegStatsMsg)
|
||||
queryNodeSegStats := &queryNodeSegStatsTask.QueryNodeSegStats
|
||||
mb, err := proto.Marshal(queryNodeSegStats)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mb, nil
|
||||
}
|
||||
|
||||
func (qs *QueryNodeSegStatsMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
queryNodeSegStats := internalPb.QueryNodeSegStats{}
|
||||
err := proto.Unmarshal(input, &queryNodeSegStats)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
queryNodeSegStatsMsg := &QueryNodeSegStatsMsg{QueryNodeSegStats: queryNodeSegStats}
|
||||
|
||||
return queryNodeSegStatsMsg, nil
|
||||
}
|
||||
|
||||
///////////////////////////////////////////Key2Seg//////////////////////////////////////////
|
||||
//type Key2SegMsg struct {
|
||||
// BaseMsg
|
||||
// internalPb.Key2SegMsg
|
||||
//}
|
||||
//
|
||||
//func (k2st *Key2SegMsg) Type() MsgType {
|
||||
// return
|
||||
//}
|
@ -30,11 +30,6 @@ func (dispatcher *UnmarshalDispatcher) addDefaultMsgTemplates() {
|
||||
searchMsg := SearchMsg{}
|
||||
searchResultMsg := SearchResultMsg{}
|
||||
timeTickMsg := TimeTickMsg{}
|
||||
createCollectionMsg := CreateCollectionMsg{}
|
||||
dropCollectionMsg := DropCollectionMsg{}
|
||||
createPartitionMsg := CreatePartitionMsg{}
|
||||
dropPartitionMsg := DropPartitionMsg{}
|
||||
|
||||
queryNodeSegStatsMsg := QueryNodeSegStatsMsg{}
|
||||
dispatcher.tempMap = make(map[internalPb.MsgType]UnmarshalFunc)
|
||||
dispatcher.tempMap[internalPb.MsgType_kInsert] = insertMsg.Unmarshal
|
||||
@ -43,11 +38,6 @@ func (dispatcher *UnmarshalDispatcher) addDefaultMsgTemplates() {
|
||||
dispatcher.tempMap[internalPb.MsgType_kSearchResult] = searchResultMsg.Unmarshal
|
||||
dispatcher.tempMap[internalPb.MsgType_kTimeTick] = timeTickMsg.Unmarshal
|
||||
dispatcher.tempMap[internalPb.MsgType_kQueryNodeSegStats] = queryNodeSegStatsMsg.Unmarshal
|
||||
dispatcher.tempMap[internalPb.MsgType_kCreateCollection] = createCollectionMsg.Unmarshal
|
||||
dispatcher.tempMap[internalPb.MsgType_kDropCollection] = dropCollectionMsg.Unmarshal
|
||||
dispatcher.tempMap[internalPb.MsgType_kCreatePartition] = createPartitionMsg.Unmarshal
|
||||
dispatcher.tempMap[internalPb.MsgType_kDropPartition] = dropPartitionMsg.Unmarshal
|
||||
|
||||
}
|
||||
|
||||
func NewUnmarshalDispatcher() *UnmarshalDispatcher {
|
||||
|
@ -14,7 +14,6 @@ enum MsgType {
|
||||
kHasCollection = 102;
|
||||
kDescribeCollection = 103;
|
||||
kShowCollections = 104;
|
||||
kGetSysConfigs = 105;
|
||||
|
||||
/* Definition Requests: partition */
|
||||
kCreatePartition = 200;
|
||||
@ -34,7 +33,6 @@ enum MsgType {
|
||||
/* System Control */
|
||||
kTimeTick = 1200;
|
||||
kQueryNodeSegStats = 1201;
|
||||
|
||||
}
|
||||
|
||||
enum PeerRole {
|
||||
@ -225,19 +223,6 @@ message SearchRequest {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Request of DescribePartition
|
||||
*/
|
||||
message SysConfigRequest {
|
||||
MsgType msg_type = 1;
|
||||
int64 reqID = 2;
|
||||
int64 proxyID = 3;
|
||||
uint64 timestamp = 4;
|
||||
repeated string keys = 5;
|
||||
repeated string key_prefixes = 6;
|
||||
}
|
||||
|
||||
|
||||
message SearchResult {
|
||||
MsgType msg_type = 1;
|
||||
common.Status status = 2;
|
||||
@ -281,4 +266,4 @@ message QueryNodeSegStats {
|
||||
MsgType msg_type = 1;
|
||||
int64 peerID = 2;
|
||||
repeated SegmentStats seg_stats = 3;
|
||||
}
|
||||
}
|
@ -32,7 +32,6 @@ const (
|
||||
MsgType_kHasCollection MsgType = 102
|
||||
MsgType_kDescribeCollection MsgType = 103
|
||||
MsgType_kShowCollections MsgType = 104
|
||||
MsgType_kGetSysConfigs MsgType = 105
|
||||
// Definition Requests: partition
|
||||
MsgType_kCreatePartition MsgType = 200
|
||||
MsgType_kDropPartition MsgType = 201
|
||||
@ -57,7 +56,6 @@ var MsgType_name = map[int32]string{
|
||||
102: "kHasCollection",
|
||||
103: "kDescribeCollection",
|
||||
104: "kShowCollections",
|
||||
105: "kGetSysConfigs",
|
||||
200: "kCreatePartition",
|
||||
201: "kDropPartition",
|
||||
202: "kHasPartition",
|
||||
@ -78,7 +76,6 @@ var MsgType_value = map[string]int32{
|
||||
"kHasCollection": 102,
|
||||
"kDescribeCollection": 103,
|
||||
"kShowCollections": 104,
|
||||
"kGetSysConfigs": 105,
|
||||
"kCreatePartition": 200,
|
||||
"kDropPartition": 201,
|
||||
"kHasPartition": 202,
|
||||
@ -1582,87 +1579,6 @@ func (m *SearchRequest) GetQuery() *commonpb.Blob {
|
||||
return nil
|
||||
}
|
||||
|
||||
//*
|
||||
// @brief Request of DescribePartition
|
||||
type SysConfigRequest struct {
|
||||
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
|
||||
ReqID int64 `protobuf:"varint,2,opt,name=reqID,proto3" json:"reqID,omitempty"`
|
||||
ProxyID int64 `protobuf:"varint,3,opt,name=proxyID,proto3" json:"proxyID,omitempty"`
|
||||
Timestamp uint64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
Keys []string `protobuf:"bytes,5,rep,name=keys,proto3" json:"keys,omitempty"`
|
||||
KeyPrefixes []string `protobuf:"bytes,6,rep,name=key_prefixes,json=keyPrefixes,proto3" json:"key_prefixes,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SysConfigRequest) Reset() { *m = SysConfigRequest{} }
|
||||
func (m *SysConfigRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*SysConfigRequest) ProtoMessage() {}
|
||||
func (*SysConfigRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_7eb37f6b80b23116, []int{21}
|
||||
}
|
||||
|
||||
func (m *SysConfigRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SysConfigRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SysConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SysConfigRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *SysConfigRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SysConfigRequest.Merge(m, src)
|
||||
}
|
||||
func (m *SysConfigRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_SysConfigRequest.Size(m)
|
||||
}
|
||||
func (m *SysConfigRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SysConfigRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SysConfigRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *SysConfigRequest) GetMsgType() MsgType {
|
||||
if m != nil {
|
||||
return m.MsgType
|
||||
}
|
||||
return MsgType_kNone
|
||||
}
|
||||
|
||||
func (m *SysConfigRequest) GetReqID() int64 {
|
||||
if m != nil {
|
||||
return m.ReqID
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *SysConfigRequest) GetProxyID() int64 {
|
||||
if m != nil {
|
||||
return m.ProxyID
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *SysConfigRequest) GetTimestamp() uint64 {
|
||||
if m != nil {
|
||||
return m.Timestamp
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *SysConfigRequest) GetKeys() []string {
|
||||
if m != nil {
|
||||
return m.Keys
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *SysConfigRequest) GetKeyPrefixes() []string {
|
||||
if m != nil {
|
||||
return m.KeyPrefixes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type SearchResult struct {
|
||||
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
|
||||
Status *commonpb.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
|
||||
@ -1681,7 +1597,7 @@ func (m *SearchResult) Reset() { *m = SearchResult{} }
|
||||
func (m *SearchResult) String() string { return proto.CompactTextString(m) }
|
||||
func (*SearchResult) ProtoMessage() {}
|
||||
func (*SearchResult) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_7eb37f6b80b23116, []int{22}
|
||||
return fileDescriptor_7eb37f6b80b23116, []int{21}
|
||||
}
|
||||
|
||||
func (m *SearchResult) XXX_Unmarshal(b []byte) error {
|
||||
@ -1771,7 +1687,7 @@ func (m *TimeTickMsg) Reset() { *m = TimeTickMsg{} }
|
||||
func (m *TimeTickMsg) String() string { return proto.CompactTextString(m) }
|
||||
func (*TimeTickMsg) ProtoMessage() {}
|
||||
func (*TimeTickMsg) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_7eb37f6b80b23116, []int{23}
|
||||
return fileDescriptor_7eb37f6b80b23116, []int{22}
|
||||
}
|
||||
|
||||
func (m *TimeTickMsg) XXX_Unmarshal(b []byte) error {
|
||||
@ -1828,7 +1744,7 @@ func (m *Key2Seg) Reset() { *m = Key2Seg{} }
|
||||
func (m *Key2Seg) String() string { return proto.CompactTextString(m) }
|
||||
func (*Key2Seg) ProtoMessage() {}
|
||||
func (*Key2Seg) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_7eb37f6b80b23116, []int{24}
|
||||
return fileDescriptor_7eb37f6b80b23116, []int{23}
|
||||
}
|
||||
|
||||
func (m *Key2Seg) XXX_Unmarshal(b []byte) error {
|
||||
@ -1896,7 +1812,7 @@ func (m *Key2SegMsg) Reset() { *m = Key2SegMsg{} }
|
||||
func (m *Key2SegMsg) String() string { return proto.CompactTextString(m) }
|
||||
func (*Key2SegMsg) ProtoMessage() {}
|
||||
func (*Key2SegMsg) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_7eb37f6b80b23116, []int{25}
|
||||
return fileDescriptor_7eb37f6b80b23116, []int{24}
|
||||
}
|
||||
|
||||
func (m *Key2SegMsg) XXX_Unmarshal(b []byte) error {
|
||||
@ -1945,7 +1861,7 @@ func (m *SegmentStats) Reset() { *m = SegmentStats{} }
|
||||
func (m *SegmentStats) String() string { return proto.CompactTextString(m) }
|
||||
func (*SegmentStats) ProtoMessage() {}
|
||||
func (*SegmentStats) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_7eb37f6b80b23116, []int{26}
|
||||
return fileDescriptor_7eb37f6b80b23116, []int{25}
|
||||
}
|
||||
|
||||
func (m *SegmentStats) XXX_Unmarshal(b []byte) error {
|
||||
@ -2007,7 +1923,7 @@ func (m *QueryNodeSegStats) Reset() { *m = QueryNodeSegStats{} }
|
||||
func (m *QueryNodeSegStats) String() string { return proto.CompactTextString(m) }
|
||||
func (*QueryNodeSegStats) ProtoMessage() {}
|
||||
func (*QueryNodeSegStats) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_7eb37f6b80b23116, []int{27}
|
||||
return fileDescriptor_7eb37f6b80b23116, []int{26}
|
||||
}
|
||||
|
||||
func (m *QueryNodeSegStats) XXX_Unmarshal(b []byte) error {
|
||||
@ -2073,7 +1989,6 @@ func init() {
|
||||
proto.RegisterType((*InsertRequest)(nil), "milvus.proto.internal.InsertRequest")
|
||||
proto.RegisterType((*DeleteRequest)(nil), "milvus.proto.internal.DeleteRequest")
|
||||
proto.RegisterType((*SearchRequest)(nil), "milvus.proto.internal.SearchRequest")
|
||||
proto.RegisterType((*SysConfigRequest)(nil), "milvus.proto.internal.SysConfigRequest")
|
||||
proto.RegisterType((*SearchResult)(nil), "milvus.proto.internal.SearchResult")
|
||||
proto.RegisterType((*TimeTickMsg)(nil), "milvus.proto.internal.TimeTickMsg")
|
||||
proto.RegisterType((*Key2Seg)(nil), "milvus.proto.internal.Key2Seg")
|
||||
@ -2085,98 +2000,94 @@ func init() {
|
||||
func init() { proto.RegisterFile("internal_msg.proto", fileDescriptor_7eb37f6b80b23116) }
|
||||
|
||||
var fileDescriptor_7eb37f6b80b23116 = []byte{
|
||||
// 1474 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x58, 0x4b, 0x6f, 0x1b, 0x47,
|
||||
0x12, 0xf6, 0x70, 0x28, 0x3e, 0x8a, 0x14, 0x35, 0x6a, 0x49, 0x36, 0x6d, 0x2f, 0x6c, 0x79, 0xbc,
|
||||
0x58, 0x6b, 0xbd, 0x58, 0x09, 0x2b, 0xef, 0x61, 0x7d, 0xdb, 0xb5, 0x08, 0xac, 0xb9, 0x86, 0x0c,
|
||||
0xed, 0x50, 0x48, 0x80, 0xc0, 0xc0, 0x60, 0x44, 0x96, 0x86, 0x83, 0x79, 0xaa, 0x7b, 0x28, 0x99,
|
||||
0x3a, 0xe4, 0xe4, 0x1f, 0x90, 0x1c, 0x72, 0xc8, 0x21, 0x40, 0x8e, 0x39, 0x19, 0xc9, 0xbf, 0xc8,
|
||||
0xeb, 0x14, 0x20, 0x7f, 0x22, 0x81, 0x63, 0x20, 0x71, 0xee, 0x41, 0x77, 0xcf, 0x83, 0xa3, 0xa7,
|
||||
0x01, 0x5b, 0x89, 0x00, 0xdd, 0xba, 0x6a, 0x7a, 0xba, 0xaa, 0xbe, 0xaf, 0xba, 0xba, 0xba, 0x81,
|
||||
0x38, 0x41, 0x8c, 0x34, 0xb0, 0x3c, 0xd3, 0x67, 0xf6, 0x72, 0x44, 0xc3, 0x38, 0x24, 0x0b, 0xbe,
|
||||
0xe3, 0xed, 0x8e, 0x98, 0x94, 0x96, 0xd3, 0x09, 0xd7, 0x9a, 0xfd, 0xd0, 0xf7, 0xc3, 0x40, 0xaa,
|
||||
0xaf, 0xcd, 0x32, 0xa4, 0xbb, 0x4e, 0x1f, 0xf3, 0xff, 0xf4, 0x00, 0xea, 0xdd, 0x8e, 0x81, 0x3b,
|
||||
0x23, 0x64, 0x31, 0xb9, 0x0c, 0x95, 0x08, 0x91, 0x76, 0x3b, 0x6d, 0x65, 0x51, 0x59, 0x52, 0x8d,
|
||||
0x44, 0x22, 0xf7, 0xa0, 0x4c, 0x43, 0x0f, 0xdb, 0xa5, 0x45, 0x65, 0xa9, 0xb5, 0x7a, 0x73, 0xf9,
|
||||
0x48, 0x5b, 0xcb, 0x1b, 0x88, 0xd4, 0x08, 0x3d, 0x34, 0xc4, 0x64, 0x32, 0x0f, 0x53, 0xfd, 0x70,
|
||||
0x14, 0xc4, 0x6d, 0x75, 0x51, 0x59, 0x9a, 0x36, 0xa4, 0xa0, 0xdb, 0x00, 0xdc, 0x1e, 0x8b, 0xc2,
|
||||
0x80, 0x21, 0xb9, 0x07, 0x15, 0x16, 0x5b, 0xf1, 0x88, 0x09, 0x83, 0x8d, 0xd5, 0xeb, 0xc5, 0xa5,
|
||||
0x13, 0xe7, 0x7b, 0x62, 0x8a, 0x91, 0x4c, 0x25, 0x2d, 0x28, 0x75, 0x3b, 0xc2, 0x17, 0xd5, 0x28,
|
||||
0x75, 0x3b, 0xc7, 0x18, 0x0a, 0x01, 0x36, 0x59, 0xf8, 0x3b, 0x46, 0xb6, 0x0b, 0x0d, 0x61, 0xf0,
|
||||
0x4d, 0x42, 0xfb, 0x13, 0xd4, 0x63, 0xc7, 0x47, 0x16, 0x5b, 0x7e, 0x24, 0x7c, 0x2a, 0x1b, 0xb9,
|
||||
0xe2, 0x18, 0xbb, 0xcf, 0x14, 0x68, 0xf6, 0xd0, 0xce, 0x59, 0xcc, 0xa6, 0x29, 0x13, 0xd3, 0xf8,
|
||||
0xd2, 0xfd, 0xa1, 0x15, 0x04, 0xe8, 0x25, 0xe0, 0x4d, 0x19, 0xb9, 0x82, 0x5c, 0x87, 0x7a, 0x3f,
|
||||
0xf4, 0x3c, 0x33, 0xb0, 0x7c, 0x14, 0xcb, 0xd7, 0x8d, 0x1a, 0x57, 0x3c, 0xb6, 0x7c, 0x24, 0xb7,
|
||||
0x61, 0x3a, 0xb2, 0x68, 0xec, 0xc4, 0x4e, 0x18, 0x98, 0xb1, 0x65, 0xb7, 0xcb, 0x62, 0x42, 0x33,
|
||||
0x53, 0x6e, 0x5a, 0xb6, 0xfe, 0x5c, 0x01, 0xf2, 0x1f, 0xc6, 0x1c, 0x3b, 0x28, 0x38, 0xf3, 0x56,
|
||||
0x81, 0x7f, 0x04, 0x33, 0x11, 0x52, 0x33, 0x71, 0xdb, 0xa4, 0xb8, 0xd3, 0x56, 0x17, 0xd5, 0xa5,
|
||||
0xc6, 0xea, 0xed, 0x63, 0xfe, 0x9f, 0x74, 0xc5, 0x98, 0x8e, 0x90, 0xae, 0xc9, 0x5f, 0x0d, 0xdc,
|
||||
0xd1, 0x3f, 0x51, 0x60, 0x46, 0x7c, 0x97, 0x5e, 0xfb, 0x18, 0x08, 0xe8, 0x18, 0x57, 0x25, 0xce,
|
||||
0x4a, 0xe1, 0x14, 0xe8, 0x8e, 0x64, 0xa5, 0x08, 0x68, 0xf9, 0x34, 0x40, 0xa7, 0x8e, 0x00, 0xf4,
|
||||
0xa5, 0x02, 0x73, 0x05, 0x40, 0xcf, 0x2e, 0xb1, 0xee, 0xc0, 0x0c, 0x3e, 0x8d, 0x1c, 0x8a, 0xe6,
|
||||
0x60, 0x44, 0x2d, 0xee, 0x80, 0x08, 0xa6, 0x6c, 0xb4, 0xa4, 0xba, 0x93, 0x68, 0xc9, 0x13, 0xb8,
|
||||
0x3c, 0x49, 0x80, 0x95, 0x21, 0xd7, 0x2e, 0x0b, 0x1e, 0xfe, 0x72, 0x12, 0x0f, 0x39, 0xce, 0xc6,
|
||||
0x7c, 0x4e, 0x45, 0xae, 0xd5, 0xbf, 0x57, 0xe0, 0xca, 0x1a, 0x45, 0x2b, 0xc6, 0xb5, 0xd0, 0xf3,
|
||||
0xb0, 0xcf, 0x4d, 0xa6, 0x79, 0x74, 0x1f, 0x6a, 0x3e, 0xb3, 0xcd, 0x78, 0x1c, 0xa1, 0x88, 0xbb,
|
||||
0xb5, 0x7a, 0xe3, 0x18, 0x5b, 0xeb, 0xcc, 0xde, 0x1c, 0x47, 0x68, 0x54, 0x7d, 0x39, 0xe0, 0x04,
|
||||
0x51, 0xdc, 0xc9, 0x4a, 0x86, 0x14, 0x8a, 0x88, 0xa8, 0x07, 0x11, 0x69, 0x43, 0x35, 0xa2, 0xe1,
|
||||
0xd3, 0x71, 0xb7, 0x23, 0xc8, 0x53, 0x8d, 0x54, 0x24, 0xff, 0x80, 0x0a, 0xeb, 0x0f, 0xd1, 0xb7,
|
||||
0x04, 0x69, 0x8d, 0xd5, 0xab, 0x47, 0xc2, 0xff, 0xc0, 0x0b, 0xb7, 0x8c, 0x64, 0x22, 0x67, 0x72,
|
||||
0xa1, 0x43, 0xc3, 0xe8, 0x1c, 0x47, 0xb5, 0x0e, 0x33, 0xfd, 0xcc, 0x3b, 0x99, 0xb4, 0x32, 0xbc,
|
||||
0x3f, 0x17, 0xfd, 0x49, 0x0e, 0x90, 0xe5, 0x3c, 0x14, 0x9e, 0xd0, 0x46, 0xab, 0x5f, 0x90, 0xf5,
|
||||
0x9f, 0x14, 0x98, 0x7f, 0x68, 0xb1, 0x8b, 0x13, 0xf0, 0x2f, 0x0a, 0x5c, 0xed, 0x20, 0xeb, 0x53,
|
||||
0x67, 0x0b, 0x2f, 0x4e, 0xd4, 0x9f, 0x2a, 0xb0, 0xd0, 0x1b, 0x86, 0x7b, 0xe7, 0x37, 0x62, 0xfd,
|
||||
0x85, 0x02, 0x97, 0x65, 0x4d, 0xd9, 0x48, 0x8b, 0xeb, 0xb9, 0x63, 0xe5, 0x7f, 0xd0, 0xca, 0x8f,
|
||||
0x83, 0x09, 0x52, 0x6e, 0x1f, 0x4d, 0x4a, 0x16, 0x88, 0xe0, 0x24, 0x3f, 0x49, 0x04, 0x25, 0x3f,
|
||||
0x2a, 0x30, 0xcf, 0x6b, 0xcd, 0xc5, 0x88, 0xf6, 0x07, 0x05, 0xe6, 0x1e, 0x5a, 0xec, 0x62, 0x04,
|
||||
0xfb, 0x52, 0x81, 0x76, 0x5a, 0x63, 0x2e, 0x46, 0xc4, 0xfc, 0x18, 0xe1, 0xf5, 0xe5, 0xfc, 0x46,
|
||||
0xfb, 0x96, 0x0b, 0xea, 0xcf, 0x25, 0x98, 0xee, 0x06, 0x0c, 0x69, 0x7c, 0x66, 0x91, 0xde, 0x39,
|
||||
0xec, 0xb1, 0xec, 0xf7, 0x0f, 0xf8, 0xf2, 0x5a, 0x5d, 0x3f, 0xc7, 0x8d, 0xa1, 0xcd, 0xbb, 0xb7,
|
||||
0x6e, 0x47, 0x44, 0xae, 0x1a, 0xb9, 0xa2, 0xd8, 0x38, 0x57, 0xe4, 0xd7, 0xbc, 0x71, 0x9e, 0x40,
|
||||
0xb5, 0x5a, 0x44, 0xf5, 0x06, 0x40, 0x06, 0x3e, 0x6b, 0xd7, 0x16, 0xd5, 0xa5, 0xb2, 0x31, 0xa1,
|
||||
0xe1, 0x97, 0x0a, 0x1a, 0xee, 0x75, 0x3b, 0xac, 0x5d, 0x5f, 0x54, 0xf9, 0xa5, 0x42, 0x4a, 0xe4,
|
||||
0x9f, 0x50, 0xa3, 0xe1, 0x9e, 0x39, 0xb0, 0x62, 0xab, 0x0d, 0xa2, 0x21, 0x3d, 0xa1, 0x3b, 0xab,
|
||||
0xd2, 0x70, 0xaf, 0x63, 0xc5, 0x96, 0xfe, 0xac, 0x04, 0xd3, 0x1d, 0xf4, 0x30, 0xc6, 0x3f, 0x1e,
|
||||
0xf4, 0x02, 0x62, 0xe5, 0x13, 0x10, 0x9b, 0x3a, 0x09, 0xb1, 0xca, 0x21, 0xc4, 0x6e, 0x41, 0x33,
|
||||
0xa2, 0x8e, 0x6f, 0xd1, 0xb1, 0xe9, 0xe2, 0x98, 0xb5, 0xab, 0x02, 0xb7, 0x46, 0xa2, 0x7b, 0x84,
|
||||
0x63, 0xa6, 0xbf, 0x52, 0x60, 0xba, 0x87, 0x16, 0xed, 0x0f, 0xcf, 0x0c, 0x86, 0x09, 0xff, 0xd5,
|
||||
0xa2, 0xff, 0x85, 0xfd, 0x57, 0x3e, 0xb8, 0xff, 0xfe, 0x0a, 0x1a, 0x45, 0x36, 0xf2, 0x62, 0x33,
|
||||
0x07, 0x47, 0x02, 0x30, 0x23, 0xf5, 0x6b, 0x19, 0x44, 0x2b, 0x30, 0xb5, 0x33, 0x42, 0x3a, 0x16,
|
||||
0xe9, 0x76, 0x22, 0xff, 0x72, 0x9e, 0xfe, 0x9d, 0x02, 0x5a, 0x6f, 0xcc, 0xd6, 0xc2, 0x60, 0xdb,
|
||||
0xb1, 0xcf, 0x5d, 0xe4, 0x04, 0xca, 0x82, 0xaf, 0xa9, 0x45, 0x75, 0xa9, 0x6e, 0x88, 0x31, 0xe7,
|
||||
0xd2, 0xc5, 0xb1, 0x19, 0x51, 0xdc, 0x76, 0x9e, 0xa2, 0x64, 0xbb, 0x6e, 0x34, 0x5c, 0x1c, 0x6f,
|
||||
0x24, 0x2a, 0xfd, 0x79, 0x09, 0x9a, 0x29, 0x97, 0x1c, 0x9f, 0x37, 0x09, 0x28, 0xbf, 0x6f, 0x96,
|
||||
0x5e, 0xff, 0xbe, 0x99, 0xa1, 0xa0, 0x1e, 0x83, 0xc2, 0x81, 0x3a, 0x7a, 0x0b, 0x9a, 0x82, 0x0e,
|
||||
0x33, 0x08, 0x07, 0x98, 0xb1, 0xdb, 0x10, 0xba, 0xc7, 0x42, 0x55, 0x04, 0xaa, 0xf2, 0x3a, 0x29,
|
||||
0x52, 0x3d, 0x3a, 0x45, 0x08, 0x94, 0x87, 0x4e, 0x2c, 0xeb, 0x4a, 0xd3, 0x10, 0x63, 0xfd, 0x7d,
|
||||
0x68, 0x6c, 0x3a, 0x3e, 0x6e, 0x3a, 0x7d, 0x77, 0x9d, 0xd9, 0x6f, 0x02, 0x57, 0xfe, 0xe0, 0x51,
|
||||
0x2a, 0x3c, 0x78, 0x9c, 0x78, 0xc2, 0xe8, 0x1f, 0x2b, 0x50, 0x7d, 0x84, 0xe3, 0xd5, 0x1e, 0xda,
|
||||
0x02, 0x3b, 0x5e, 0xcf, 0xd2, 0x47, 0x08, 0x21, 0x90, 0x9b, 0xd0, 0x98, 0xd8, 0xc1, 0xc9, 0xe2,
|
||||
0x90, 0x6f, 0xe0, 0x53, 0x8e, 0xb0, 0xab, 0x50, 0x73, 0x98, 0xb9, 0x6b, 0x79, 0xce, 0x40, 0x60,
|
||||
0x5f, 0x33, 0xaa, 0x0e, 0x7b, 0x87, 0x8b, 0xbc, 0x76, 0x64, 0x25, 0x5b, 0x66, 0x9a, 0x6a, 0x4c,
|
||||
0x68, 0xf4, 0x27, 0x00, 0x89, 0x6b, 0x1c, 0x9a, 0x8c, 0x59, 0x65, 0x92, 0xd9, 0x7f, 0x41, 0xd5,
|
||||
0xc5, 0xf1, 0x2a, 0x43, 0xbb, 0x5d, 0x12, 0x85, 0xf7, 0x38, 0xbc, 0x92, 0x95, 0x8c, 0x74, 0xba,
|
||||
0xfe, 0x91, 0x7c, 0xbe, 0xe2, 0xc6, 0x78, 0x0e, 0xb1, 0xe2, 0x91, 0xa2, 0x1c, 0x3c, 0x52, 0x6e,
|
||||
0x42, 0xc3, 0x47, 0x3f, 0xa4, 0x63, 0x93, 0x39, 0xfb, 0x98, 0xc2, 0x20, 0x55, 0x3d, 0x67, 0x1f,
|
||||
0x79, 0xa0, 0xc1, 0xc8, 0x37, 0x69, 0xb8, 0xc7, 0xd2, 0xad, 0x16, 0x8c, 0x7c, 0x23, 0xdc, 0x63,
|
||||
0xe4, 0x6f, 0x30, 0x4b, 0xb1, 0x8f, 0x41, 0xec, 0x8d, 0x4d, 0x3f, 0x1c, 0x38, 0xdb, 0x0e, 0xa6,
|
||||
0x60, 0x68, 0xe9, 0x87, 0xf5, 0x44, 0xaf, 0x7f, 0xa6, 0xc0, 0xec, 0xff, 0xd3, 0xf4, 0xeb, 0xa1,
|
||||
0x2d, 0x9d, 0x3b, 0x83, 0xc4, 0xf8, 0xb7, 0x88, 0xd7, 0xe4, 0x1b, 0x87, 0x9d, 0xfe, 0x9c, 0x95,
|
||||
0xe1, 0x64, 0xd4, 0x58, 0xe2, 0xd4, 0xdd, 0x17, 0x25, 0xa8, 0x26, 0xe6, 0x48, 0x1d, 0xa6, 0xdc,
|
||||
0xc7, 0x61, 0x80, 0xda, 0x25, 0xb2, 0x00, 0xb3, 0xee, 0xc1, 0xe7, 0x14, 0x6d, 0x40, 0xe6, 0x60,
|
||||
0xc6, 0x2d, 0xbe, 0x46, 0x68, 0x48, 0x08, 0xb4, 0xdc, 0xc2, 0x85, 0x5d, 0xdb, 0x26, 0x57, 0x60,
|
||||
0xce, 0x3d, 0x7c, 0xa7, 0xd5, 0x78, 0x0a, 0x68, 0x6e, 0xf1, 0xda, 0xc7, 0xb4, 0xa1, 0x58, 0xe2,
|
||||
0xbf, 0x18, 0x67, 0xb5, 0x94, 0x69, 0x0e, 0x59, 0x00, 0xcd, 0x3d, 0x70, 0xfb, 0xd2, 0xbe, 0x54,
|
||||
0xc8, 0x1c, 0xb4, 0xdc, 0xc2, 0x25, 0x45, 0xfb, 0x4a, 0x21, 0x04, 0xa6, 0xdd, 0xc9, 0x5e, 0x5e,
|
||||
0xfb, 0x5a, 0x21, 0x57, 0x80, 0xb8, 0x87, 0x5a, 0x5e, 0xed, 0x1b, 0x85, 0xcc, 0xc3, 0x8c, 0x5b,
|
||||
0xe8, 0x0c, 0x99, 0xf6, 0xad, 0x42, 0x9a, 0x50, 0x75, 0x65, 0xfb, 0xa4, 0x7d, 0xa0, 0x0a, 0x49,
|
||||
0x9e, 0xeb, 0xda, 0x87, 0x52, 0x92, 0x25, 0x51, 0x7b, 0xa5, 0x0a, 0x63, 0x93, 0x05, 0x52, 0xfb,
|
||||
0x55, 0x25, 0x2d, 0xa8, 0xbb, 0x69, 0x11, 0xd0, 0x3e, 0xaf, 0x0b, 0xe3, 0x87, 0x32, 0x40, 0xfb,
|
||||
0xa2, 0x7e, 0xf7, 0x3e, 0xd4, 0xd2, 0x87, 0x49, 0x02, 0x50, 0x59, 0xb7, 0x58, 0x8c, 0x54, 0xbb,
|
||||
0xc4, 0xc7, 0x06, 0x5a, 0x03, 0xa4, 0x9a, 0xc2, 0xc7, 0xef, 0x52, 0x87, 0xeb, 0x4b, 0x9c, 0x93,
|
||||
0x0d, 0x5e, 0xe7, 0x34, 0xf5, 0x41, 0xe7, 0xbd, 0x07, 0xb6, 0x13, 0x0f, 0x47, 0x5b, 0xbc, 0x6e,
|
||||
0xae, 0xec, 0x3b, 0x9e, 0xe7, 0xec, 0xc7, 0xd8, 0x1f, 0xae, 0x48, 0xc2, 0xff, 0x3e, 0x70, 0x58,
|
||||
0x4c, 0x9d, 0xad, 0x51, 0x8c, 0x83, 0x95, 0x94, 0xf6, 0x15, 0x91, 0x05, 0x99, 0x18, 0x6d, 0x6d,
|
||||
0x55, 0x84, 0xe6, 0xde, 0x6f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x9e, 0x9f, 0x3d, 0x09, 0x18,
|
||||
0x00, 0x00,
|
||||
// 1416 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x58, 0x4b, 0x6f, 0x1c, 0xc5,
|
||||
0x13, 0x4f, 0xef, 0xac, 0xf7, 0x51, 0x6b, 0xaf, 0xc7, 0x6d, 0x3b, 0xd9, 0x24, 0x7f, 0x25, 0xce,
|
||||
0xe4, 0x2f, 0x62, 0x82, 0xb0, 0x85, 0xc3, 0x81, 0xdc, 0x20, 0xde, 0x43, 0x96, 0xc8, 0x51, 0x18,
|
||||
0x5b, 0x20, 0xa1, 0x48, 0xa3, 0xf1, 0x6e, 0x65, 0x77, 0x34, 0x4f, 0x77, 0xcf, 0xda, 0x59, 0x1f,
|
||||
0x38, 0xe5, 0x03, 0xc0, 0x81, 0x03, 0x07, 0x24, 0x8e, 0x9c, 0x22, 0xf8, 0x16, 0xbc, 0xae, 0x1c,
|
||||
0xf8, 0x0a, 0x20, 0x88, 0x04, 0xe1, 0x8e, 0xba, 0x7b, 0x1e, 0x3b, 0x7e, 0x46, 0x4a, 0x0c, 0x96,
|
||||
0x7c, 0x9b, 0xaa, 0xe9, 0xe9, 0xaa, 0xfa, 0xfd, 0xaa, 0x6b, 0xaa, 0x1a, 0xa8, 0x13, 0xc4, 0xc8,
|
||||
0x02, 0xdb, 0xb3, 0x7c, 0xde, 0x5f, 0x8a, 0x58, 0x18, 0x87, 0x74, 0xde, 0x77, 0xbc, 0xed, 0x21,
|
||||
0x57, 0xd2, 0x52, 0xba, 0xe0, 0xd2, 0x64, 0x37, 0xf4, 0xfd, 0x30, 0x50, 0xea, 0x4b, 0x33, 0x1c,
|
||||
0xd9, 0xb6, 0xd3, 0xc5, 0xfc, 0x3b, 0x23, 0x80, 0x7a, 0xa7, 0x6d, 0xe2, 0xd6, 0x10, 0x79, 0x4c,
|
||||
0xcf, 0x43, 0x25, 0x42, 0x64, 0x9d, 0x76, 0x8b, 0x2c, 0x90, 0x45, 0xcd, 0x4c, 0x24, 0x7a, 0x0b,
|
||||
0xca, 0x2c, 0xf4, 0xb0, 0x55, 0x5a, 0x20, 0x8b, 0xcd, 0x95, 0xab, 0x4b, 0x07, 0xda, 0x5a, 0x7a,
|
||||
0x80, 0xc8, 0xcc, 0xd0, 0x43, 0x53, 0x2e, 0xa6, 0x73, 0x30, 0xd1, 0x0d, 0x87, 0x41, 0xdc, 0xd2,
|
||||
0x16, 0xc8, 0xe2, 0x94, 0xa9, 0x04, 0xa3, 0x0f, 0x20, 0xec, 0xf1, 0x28, 0x0c, 0x38, 0xd2, 0x5b,
|
||||
0x50, 0xe1, 0xb1, 0x1d, 0x0f, 0xb9, 0x34, 0xd8, 0x58, 0xb9, 0x5c, 0xdc, 0x3a, 0x71, 0x7e, 0x5d,
|
||||
0x2e, 0x31, 0x93, 0xa5, 0xb4, 0x09, 0xa5, 0x4e, 0x5b, 0xfa, 0xa2, 0x99, 0xa5, 0x4e, 0xfb, 0x10,
|
||||
0x43, 0x21, 0xc0, 0x06, 0x0f, 0xff, 0xc5, 0xc8, 0xb6, 0xa1, 0x21, 0x0d, 0xbe, 0x4c, 0x68, 0xff,
|
||||
0x83, 0x7a, 0xec, 0xf8, 0xc8, 0x63, 0xdb, 0x8f, 0xa4, 0x4f, 0x65, 0x33, 0x57, 0x1c, 0x62, 0xf7,
|
||||
0x09, 0x81, 0xc9, 0x75, 0xec, 0xe7, 0x2c, 0x66, 0xcb, 0xc8, 0xd8, 0x32, 0xb1, 0x75, 0x77, 0x60,
|
||||
0x07, 0x01, 0x7a, 0x09, 0x78, 0x13, 0x66, 0xae, 0xa0, 0x97, 0xa1, 0xde, 0x0d, 0x3d, 0xcf, 0x0a,
|
||||
0x6c, 0x1f, 0xe5, 0xf6, 0x75, 0xb3, 0x26, 0x14, 0xf7, 0x6d, 0x1f, 0xe9, 0x75, 0x98, 0x8a, 0x6c,
|
||||
0x16, 0x3b, 0xb1, 0x13, 0x06, 0x56, 0x6c, 0xf7, 0x5b, 0x65, 0xb9, 0x60, 0x32, 0x53, 0x6e, 0xd8,
|
||||
0x7d, 0xe3, 0x29, 0x01, 0xfa, 0x1e, 0xe7, 0x4e, 0x3f, 0x28, 0x38, 0xf3, 0x4a, 0x81, 0xbf, 0x07,
|
||||
0xd3, 0x11, 0x32, 0x2b, 0x71, 0xdb, 0x62, 0xb8, 0xd5, 0xd2, 0x16, 0xb4, 0xc5, 0xc6, 0xca, 0xf5,
|
||||
0x43, 0xbe, 0x1f, 0x77, 0xc5, 0x9c, 0x8a, 0x90, 0xad, 0xaa, 0x4f, 0x4d, 0xdc, 0x32, 0xbe, 0x24,
|
||||
0x30, 0x2d, 0xdf, 0x2b, 0xaf, 0x7d, 0x0c, 0x24, 0x74, 0x5c, 0xa8, 0x12, 0x67, 0x95, 0x70, 0x0c,
|
||||
0x74, 0x07, 0xb2, 0x52, 0x04, 0xb4, 0x7c, 0x1c, 0xa0, 0x13, 0x07, 0x00, 0xfa, 0x8c, 0xc0, 0x6c,
|
||||
0x01, 0xd0, 0x93, 0x4b, 0xac, 0x1b, 0x30, 0x8d, 0x8f, 0x23, 0x87, 0xa1, 0xd5, 0x1b, 0x32, 0x5b,
|
||||
0x38, 0x20, 0x83, 0x29, 0x9b, 0x4d, 0xa5, 0x6e, 0x27, 0x5a, 0xfa, 0x10, 0xce, 0x8f, 0x13, 0x60,
|
||||
0x67, 0xc8, 0xb5, 0xca, 0x92, 0x87, 0xd7, 0x8e, 0xe2, 0x21, 0xc7, 0xd9, 0x9c, 0xcb, 0xa9, 0xc8,
|
||||
0xb5, 0xc6, 0xcf, 0x04, 0x2e, 0xac, 0x32, 0xb4, 0x63, 0x5c, 0x0d, 0x3d, 0x0f, 0xbb, 0xc2, 0x64,
|
||||
0x9a, 0x47, 0xb7, 0xa1, 0xe6, 0xf3, 0xbe, 0x15, 0x8f, 0x22, 0x94, 0x71, 0x37, 0x57, 0xae, 0x1c,
|
||||
0x62, 0x6b, 0x8d, 0xf7, 0x37, 0x46, 0x11, 0x9a, 0x55, 0x5f, 0x3d, 0x08, 0x82, 0x18, 0x6e, 0x65,
|
||||
0x25, 0x43, 0x09, 0x45, 0x44, 0xb4, 0xbd, 0x88, 0xb4, 0xa0, 0x1a, 0xb1, 0xf0, 0xf1, 0xa8, 0xd3,
|
||||
0x96, 0xe4, 0x69, 0x66, 0x2a, 0xd2, 0xb7, 0xa0, 0xc2, 0xbb, 0x03, 0xf4, 0x6d, 0x49, 0x5a, 0x63,
|
||||
0xe5, 0xe2, 0x81, 0xf0, 0xdf, 0xf1, 0xc2, 0x4d, 0x33, 0x59, 0x28, 0x98, 0x9c, 0x6f, 0xb3, 0x30,
|
||||
0x3a, 0xc5, 0x51, 0xad, 0xc1, 0x74, 0x37, 0xf3, 0x4e, 0x25, 0xad, 0x0a, 0xef, 0xff, 0x45, 0x7f,
|
||||
0x92, 0x1f, 0xc8, 0x52, 0x1e, 0x8a, 0x48, 0x68, 0xb3, 0xd9, 0x2d, 0xc8, 0xc6, 0x1f, 0x04, 0xe6,
|
||||
0xee, 0xda, 0xfc, 0xec, 0x04, 0xfc, 0x17, 0x81, 0x8b, 0x6d, 0xe4, 0x5d, 0xe6, 0x6c, 0xe2, 0xd9,
|
||||
0x89, 0xfa, 0x2b, 0x02, 0xf3, 0xeb, 0x83, 0x70, 0xe7, 0xf4, 0x46, 0x6c, 0xfc, 0x4e, 0xe0, 0xbc,
|
||||
0xaa, 0x29, 0x0f, 0xd2, 0xe2, 0x7a, 0xea, 0x58, 0x79, 0x1f, 0x9a, 0xf9, 0xef, 0x60, 0x8c, 0x94,
|
||||
0xeb, 0x07, 0x93, 0x92, 0x05, 0x22, 0x39, 0xc9, 0xff, 0x24, 0x92, 0x92, 0xdf, 0x08, 0xcc, 0x89,
|
||||
0x5a, 0x73, 0x36, 0xa2, 0xfd, 0x95, 0xc0, 0xec, 0x5d, 0x9b, 0x9f, 0x8d, 0x60, 0x9f, 0x11, 0x68,
|
||||
0xa5, 0x35, 0xe6, 0x6c, 0x44, 0x2c, 0x7e, 0x23, 0xa2, 0xbe, 0x9c, 0xde, 0x68, 0x5f, 0x71, 0x41,
|
||||
0xfd, 0xb3, 0x04, 0x53, 0x9d, 0x80, 0x23, 0x8b, 0x4f, 0x2c, 0xd2, 0x1b, 0xfb, 0x3d, 0x56, 0xfd,
|
||||
0xfe, 0x1e, 0x5f, 0x5e, 0xa8, 0xeb, 0x17, 0xb8, 0x71, 0xec, 0x8b, 0xee, 0xad, 0xd3, 0x96, 0x91,
|
||||
0x6b, 0x66, 0xae, 0x28, 0x36, 0xce, 0x15, 0xf5, 0x36, 0x6f, 0x9c, 0xc7, 0x50, 0xad, 0x16, 0x51,
|
||||
0xbd, 0x02, 0x90, 0x81, 0xcf, 0x5b, 0xb5, 0x05, 0x6d, 0xb1, 0x6c, 0x8e, 0x69, 0xc4, 0x50, 0xc1,
|
||||
0xc2, 0x9d, 0x4e, 0x9b, 0xb7, 0xea, 0x0b, 0x9a, 0x18, 0x2a, 0x94, 0x44, 0xdf, 0x86, 0x1a, 0x0b,
|
||||
0x77, 0xac, 0x9e, 0x1d, 0xdb, 0x2d, 0x90, 0x0d, 0xe9, 0x11, 0xdd, 0x59, 0x95, 0x85, 0x3b, 0x6d,
|
||||
0x3b, 0xb6, 0x8d, 0x27, 0x25, 0x98, 0x6a, 0xa3, 0x87, 0x31, 0xfe, 0xf7, 0xa0, 0x17, 0x10, 0x2b,
|
||||
0x1f, 0x81, 0xd8, 0xc4, 0x51, 0x88, 0x55, 0xf6, 0x21, 0x76, 0x0d, 0x26, 0x23, 0xe6, 0xf8, 0x36,
|
||||
0x1b, 0x59, 0x2e, 0x8e, 0x78, 0xab, 0x2a, 0x71, 0x6b, 0x24, 0xba, 0x7b, 0x38, 0xe2, 0xc6, 0x73,
|
||||
0x02, 0x53, 0xeb, 0x68, 0xb3, 0xee, 0xe0, 0xc4, 0x60, 0x18, 0xf3, 0x5f, 0x2b, 0xfa, 0x5f, 0x38,
|
||||
0x7f, 0xe5, 0xbd, 0xe7, 0xef, 0x75, 0xd0, 0x19, 0xf2, 0xa1, 0x17, 0x5b, 0x39, 0x38, 0x0a, 0x80,
|
||||
0x69, 0xa5, 0x5f, 0xcd, 0x20, 0x5a, 0x86, 0x89, 0xad, 0x21, 0xb2, 0x91, 0x4c, 0xb7, 0x23, 0xf9,
|
||||
0x57, 0xeb, 0x8c, 0xa7, 0x25, 0x31, 0x3e, 0xab, 0xb0, 0xc5, 0x56, 0x2f, 0x13, 0x75, 0x3e, 0x9a,
|
||||
0x95, 0x5e, 0x7c, 0x34, 0xcb, 0xa0, 0xd2, 0x0e, 0x81, 0x6a, 0x4f, 0xc9, 0xb9, 0x06, 0x93, 0xd2,
|
||||
0x73, 0x2b, 0x08, 0x7b, 0x98, 0x01, 0xd1, 0x90, 0xba, 0xfb, 0x52, 0x55, 0x44, 0xb3, 0xf2, 0x22,
|
||||
0x68, 0x56, 0x0f, 0x46, 0x93, 0x42, 0x79, 0xe0, 0xc4, 0xea, 0x08, 0x4e, 0x9a, 0xf2, 0xd9, 0xf8,
|
||||
0x04, 0x1a, 0x1b, 0x8e, 0x8f, 0x1b, 0x4e, 0xd7, 0x5d, 0xe3, 0xfd, 0x97, 0x81, 0x2b, 0xbf, 0x1b,
|
||||
0x28, 0x15, 0xee, 0x06, 0x8e, 0x2c, 0xc6, 0xc6, 0x17, 0x04, 0xaa, 0xf7, 0x70, 0xb4, 0xb2, 0x8e,
|
||||
0x7d, 0x89, 0x9d, 0x38, 0xfa, 0xe9, 0xbc, 0x2e, 0x05, 0x7a, 0x15, 0x1a, 0x63, 0xc9, 0x9e, 0x6c,
|
||||
0x0e, 0x79, 0xae, 0x1f, 0x53, 0xed, 0x2f, 0x42, 0xcd, 0xe1, 0xd6, 0xb6, 0xed, 0x39, 0x3d, 0x89,
|
||||
0x7d, 0xcd, 0xac, 0x3a, 0xfc, 0x43, 0x21, 0x8a, 0x63, 0x96, 0x55, 0x37, 0xde, 0x9a, 0x90, 0x87,
|
||||
0x68, 0x4c, 0x63, 0x3c, 0x04, 0x48, 0x5c, 0x13, 0xd0, 0x64, 0xcc, 0x92, 0x71, 0x66, 0xdf, 0x81,
|
||||
0xaa, 0x8b, 0xa3, 0x15, 0x8e, 0xfd, 0x56, 0x49, 0xd6, 0xa8, 0xc3, 0xf0, 0x4a, 0x76, 0x32, 0xd3,
|
||||
0xe5, 0xc6, 0xe7, 0xea, 0xa6, 0x47, 0x18, 0x13, 0x39, 0xc4, 0x8b, 0xd5, 0x97, 0xec, 0xad, 0xbe,
|
||||
0x57, 0xa1, 0xe1, 0xa3, 0x1f, 0xb2, 0x91, 0xc5, 0x9d, 0x5d, 0x4c, 0x61, 0x50, 0xaa, 0x75, 0x67,
|
||||
0x17, 0x45, 0xa0, 0xc1, 0xd0, 0xb7, 0x58, 0xb8, 0xc3, 0xd3, 0xf3, 0x18, 0x0c, 0x7d, 0x33, 0xdc,
|
||||
0xe1, 0xf4, 0x0d, 0x98, 0x61, 0xd8, 0xc5, 0x20, 0xf6, 0x46, 0x96, 0x1f, 0xf6, 0x9c, 0x47, 0x0e,
|
||||
0xa6, 0x60, 0xe8, 0xe9, 0x8b, 0xb5, 0x44, 0x6f, 0x7c, 0x4d, 0x60, 0xe6, 0x83, 0x34, 0xfd, 0xd6,
|
||||
0xb1, 0xaf, 0x9c, 0x3b, 0x81, 0xc4, 0x78, 0x57, 0xc6, 0x6b, 0x89, 0x83, 0xc3, 0x8f, 0xbf, 0xf9,
|
||||
0xc9, 0x70, 0x32, 0x6b, 0x3c, 0x71, 0xea, 0xe6, 0x2f, 0x25, 0xa8, 0x26, 0xe6, 0x68, 0x1d, 0x26,
|
||||
0xdc, 0xfb, 0x61, 0x80, 0xfa, 0x39, 0x3a, 0x0f, 0x33, 0xee, 0xde, 0x9b, 0x07, 0xbd, 0x47, 0x67,
|
||||
0x61, 0xda, 0x2d, 0x0e, 0xee, 0x3a, 0x52, 0x0a, 0x4d, 0xb7, 0x30, 0xdb, 0xea, 0x8f, 0xe8, 0x05,
|
||||
0x98, 0x75, 0xf7, 0x8f, 0x7f, 0xba, 0x48, 0x01, 0xdd, 0x2d, 0x4e, 0x48, 0x5c, 0x1f, 0xd0, 0x79,
|
||||
0xd0, 0xdd, 0x3d, 0x43, 0x89, 0xfe, 0x1d, 0xa1, 0xb3, 0xd0, 0x74, 0x0b, 0xbd, 0xbb, 0xfe, 0x3d,
|
||||
0xa1, 0x14, 0xa6, 0xdc, 0xf1, 0x16, 0x57, 0xff, 0x81, 0xd0, 0x0b, 0x40, 0xdd, 0x7d, 0x9d, 0xa0,
|
||||
0xfe, 0x23, 0xa1, 0x73, 0x30, 0xed, 0x16, 0x1a, 0x26, 0xae, 0xff, 0x44, 0xe8, 0x24, 0x54, 0x5d,
|
||||
0xd5, 0x55, 0xe8, 0x9f, 0x6a, 0x52, 0x52, 0xbf, 0x3b, 0xfd, 0x33, 0x25, 0xa9, 0xf2, 0xa7, 0x3f,
|
||||
0xd7, 0xa4, 0xb1, 0xf1, 0x62, 0xa8, 0xff, 0xad, 0xd1, 0x26, 0xd4, 0xdd, 0xf4, 0xc0, 0xeb, 0xdf,
|
||||
0xd4, 0xa5, 0xf1, 0x7d, 0x6c, 0xeb, 0xdf, 0xd6, 0x6f, 0xde, 0x86, 0x5a, 0x7a, 0x5f, 0x47, 0x01,
|
||||
0x2a, 0x6b, 0x36, 0x8f, 0x91, 0xe9, 0xe7, 0xc4, 0xb3, 0x89, 0x76, 0x0f, 0x99, 0x4e, 0xc4, 0xf3,
|
||||
0x47, 0xcc, 0x11, 0xfa, 0x92, 0xc0, 0xff, 0x81, 0xa8, 0x69, 0xba, 0x76, 0xa7, 0xfd, 0xf1, 0x9d,
|
||||
0xbe, 0x13, 0x0f, 0x86, 0x9b, 0xa2, 0x46, 0x2e, 0xef, 0x3a, 0x9e, 0xe7, 0xec, 0xc6, 0xd8, 0x1d,
|
||||
0x2c, 0x2b, 0x72, 0xdf, 0xec, 0x39, 0x3c, 0x66, 0xce, 0xe6, 0x30, 0xc6, 0xde, 0x72, 0x4a, 0xf1,
|
||||
0xb2, 0x64, 0x3c, 0x13, 0xa3, 0xcd, 0xcd, 0x8a, 0xd4, 0xdc, 0xfa, 0x27, 0x00, 0x00, 0xff, 0xff,
|
||||
0x56, 0x57, 0x32, 0x28, 0x20, 0x17, 0x00, 0x00,
|
||||
}
|
||||
|
@ -89,15 +89,6 @@ service Master {
|
||||
rpc ShowPartitions(internal.ShowPartitionRequest) returns (service.StringListResponse) {}
|
||||
|
||||
|
||||
/**
|
||||
* @brief This method is used to get system configs
|
||||
*
|
||||
* @param SysConfigRequest, keys or key_prefixes of the configs.
|
||||
*
|
||||
* @return SysConfigResponse
|
||||
*/
|
||||
rpc GetSysConfigs(internal.SysConfigRequest) returns (service.SysConfigResponse) {}
|
||||
|
||||
rpc AllocTimestamp(internal.TsoRequest) returns (internal.TsoResponse) {}
|
||||
rpc AllocID(internal.IDRequest) returns (internal.IDResponse) {}
|
||||
|
||||
|
@ -30,38 +30,36 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
func init() { proto.RegisterFile("master.proto", fileDescriptor_f9c348dec43a6705) }
|
||||
|
||||
var fileDescriptor_f9c348dec43a6705 = []byte{
|
||||
// 484 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x95, 0xdf, 0x6e, 0xd3, 0x30,
|
||||
0x14, 0x87, 0x7b, 0x35, 0x24, 0xd3, 0x3f, 0xcc, 0xdc, 0x95, 0x1b, 0xd6, 0x9b, 0x41, 0xcb, 0x12,
|
||||
0x04, 0x2f, 0xc0, 0xda, 0x48, 0xac, 0x12, 0x48, 0xd3, 0xb2, 0x1b, 0x40, 0x68, 0x24, 0xd9, 0x21,
|
||||
0x35, 0x24, 0x76, 0xf0, 0x39, 0x19, 0xa2, 0x2f, 0xc1, 0x2b, 0xa3, 0x26, 0x75, 0x52, 0xd3, 0xba,
|
||||
0x94, 0xdd, 0xd5, 0xf6, 0xe7, 0xdf, 0x57, 0x9f, 0x73, 0xa4, 0xb0, 0x6e, 0x1e, 0x21, 0x81, 0xf6,
|
||||
0x0a, 0xad, 0x48, 0xf1, 0xc7, 0xb9, 0xc8, 0xee, 0x4a, 0xac, 0x57, 0x5e, 0x7d, 0x34, 0xec, 0x26,
|
||||
0x2a, 0xcf, 0x95, 0xac, 0x37, 0x87, 0x5c, 0x48, 0x02, 0x2d, 0xa3, 0xec, 0x26, 0xc7, 0x74, 0xbd,
|
||||
0x77, 0x8c, 0xa0, 0xef, 0x44, 0x02, 0xed, 0xd6, 0xab, 0xdf, 0x0f, 0xd9, 0xd1, 0xfb, 0xea, 0x3e,
|
||||
0x8f, 0xd8, 0xa3, 0x99, 0x86, 0x88, 0x60, 0xa6, 0xb2, 0x0c, 0x12, 0x12, 0x4a, 0x72, 0xcf, 0xb3,
|
||||
0x4c, 0x26, 0xd3, 0xfb, 0x1b, 0xbc, 0x82, 0x1f, 0x25, 0x20, 0x0d, 0x9f, 0xd8, 0xfc, 0xfa, 0x1f,
|
||||
0x85, 0x14, 0x51, 0x89, 0xa3, 0x0e, 0xff, 0xcc, 0xfa, 0x81, 0x56, 0xc5, 0x86, 0xe0, 0x85, 0x43,
|
||||
0x60, 0x63, 0x07, 0xc6, 0xc7, 0xac, 0x77, 0x11, 0xe1, 0x46, 0xfa, 0xc4, 0x91, 0x6e, 0x51, 0x26,
|
||||
0x7c, 0x64, 0xc3, 0xeb, 0x5a, 0x79, 0x53, 0xa5, 0xb2, 0x2b, 0xc0, 0x42, 0x49, 0x84, 0x51, 0x87,
|
||||
0x97, 0x8c, 0x07, 0x80, 0x89, 0x16, 0xf1, 0x66, 0x9d, 0x5e, 0xba, 0x9e, 0xb1, 0x85, 0x1a, 0xdb,
|
||||
0x64, 0xb7, 0xad, 0x05, 0xeb, 0xab, 0xc5, 0xea, 0xe7, 0xa8, 0xc3, 0xbf, 0xb3, 0x41, 0xb8, 0x50,
|
||||
0x3f, 0xdb, 0x63, 0x74, 0x96, 0xce, 0xe6, 0x8c, 0xef, 0xd9, 0x6e, 0x5f, 0x48, 0x5a, 0xc8, 0xf4,
|
||||
0x9d, 0x40, 0xda, 0x78, 0xe3, 0x0d, 0x1b, 0xd4, 0x0d, 0xbe, 0x8c, 0x34, 0x89, 0xea, 0x81, 0x67,
|
||||
0x7b, 0x07, 0xa1, 0xe1, 0x0e, 0x6c, 0xd4, 0x27, 0xd6, 0x5b, 0x35, 0xb8, 0x8d, 0x9f, 0xec, 0x19,
|
||||
0x83, 0xff, 0x0d, 0xff, 0xc2, 0xba, 0x17, 0x11, 0xb6, 0xd9, 0x63, 0xf7, 0x10, 0x6c, 0x45, 0x1f,
|
||||
0x36, 0x03, 0x9a, 0x1d, 0x9b, 0xc6, 0xb6, 0x1a, 0xff, 0x1f, 0x23, 0xb0, 0xe5, 0x1a, 0xef, 0x76,
|
||||
0x35, 0x9c, 0x3d, 0x00, 0x82, 0xf5, 0x57, 0x8d, 0x6d, 0x4e, 0xd1, 0x59, 0x33, 0x0b, 0xbb, 0x4f,
|
||||
0xfb, 0x13, 0xd6, 0x7b, 0x0b, 0x14, 0xfe, 0xc2, 0x99, 0x92, 0x5f, 0x45, 0x8a, 0xfc, 0xd4, 0x65,
|
||||
0x32, 0x88, 0xb1, 0x9c, 0x3a, 0x2c, 0x2d, 0xd7, 0x48, 0x3e, 0xb0, 0xfe, 0x79, 0x96, 0xa9, 0xe4,
|
||||
0x5a, 0xe4, 0x80, 0x14, 0xe5, 0x05, 0x3f, 0x71, 0x58, 0xae, 0x51, 0x39, 0xda, 0x63, 0x23, 0x4d,
|
||||
0xf4, 0x25, 0x7b, 0x50, 0x45, 0xcf, 0x03, 0xfe, 0xd4, 0x71, 0x61, 0x1e, 0x98, 0xc8, 0x93, 0x3d,
|
||||
0x44, 0x93, 0xf8, 0x8d, 0x0d, 0xce, 0x11, 0x45, 0x2a, 0x43, 0x48, 0x73, 0x90, 0x34, 0x0f, 0xf8,
|
||||
0x73, 0xc7, 0xbd, 0x86, 0x6b, 0x15, 0xe3, 0x43, 0x50, 0xe3, 0x9a, 0x4e, 0x3f, 0xbe, 0x49, 0x05,
|
||||
0x2d, 0xca, 0x78, 0x35, 0xd8, 0xfe, 0x52, 0x64, 0x99, 0x58, 0x12, 0x24, 0x0b, 0xbf, 0x0e, 0x39,
|
||||
0xbb, 0x15, 0x48, 0x5a, 0xc4, 0x25, 0xc1, 0xad, 0x6f, 0xa2, 0xfc, 0x2a, 0xd9, 0xaf, 0x3f, 0x04,
|
||||
0x45, 0x1c, 0x1f, 0x55, 0xeb, 0xd7, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x75, 0x7d, 0xec,
|
||||
0x36, 0x06, 0x00, 0x00,
|
||||
// 458 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x41, 0x6f, 0xd3, 0x30,
|
||||
0x14, 0xc7, 0x7b, 0x1a, 0x92, 0xd5, 0xb5, 0xcc, 0xdc, 0xca, 0x85, 0xf5, 0x04, 0x2d, 0x4b, 0x10,
|
||||
0x7c, 0x01, 0xd6, 0xe5, 0xb0, 0x4a, 0x20, 0x4d, 0xeb, 0x2e, 0x80, 0xd0, 0x70, 0xb2, 0xa7, 0xf4,
|
||||
0x81, 0x13, 0x07, 0xbf, 0x97, 0x21, 0xed, 0x23, 0xf1, 0x29, 0x51, 0x93, 0x26, 0xa9, 0x69, 0x5d,
|
||||
0xca, 0x6e, 0xb5, 0xfd, 0xf3, 0xef, 0x5f, 0xbf, 0xf7, 0x14, 0xd1, 0xcf, 0x14, 0x31, 0xd8, 0xa0,
|
||||
0xb0, 0x86, 0x8d, 0x7c, 0x96, 0xa1, 0xbe, 0x2f, 0xa9, 0x5e, 0x05, 0xf5, 0xd1, 0xa8, 0x9f, 0x98,
|
||||
0x2c, 0x33, 0x79, 0xbd, 0x39, 0x92, 0x98, 0x33, 0xd8, 0x5c, 0xe9, 0xdb, 0x8c, 0xd2, 0xf5, 0xde,
|
||||
0x09, 0x81, 0xbd, 0xc7, 0x04, 0xba, 0xad, 0xb7, 0xbf, 0x85, 0x38, 0xfa, 0x58, 0xdd, 0x97, 0x4a,
|
||||
0x3c, 0xbd, 0xb0, 0xa0, 0x18, 0x2e, 0x8c, 0xd6, 0x90, 0x30, 0x9a, 0x5c, 0x06, 0x81, 0x93, 0xd4,
|
||||
0x38, 0x83, 0xbf, 0xc1, 0x6b, 0xf8, 0x59, 0x02, 0xf1, 0xe8, 0xb9, 0xcb, 0xaf, 0xff, 0xd1, 0x82,
|
||||
0x15, 0x97, 0x34, 0xee, 0xc9, 0xaf, 0x62, 0x10, 0x59, 0x53, 0x6c, 0x04, 0xbc, 0xf6, 0x04, 0xb8,
|
||||
0xd8, 0x81, 0xfa, 0x58, 0x1c, 0x5f, 0x2a, 0xda, 0xb0, 0x4f, 0x3d, 0x76, 0x87, 0x6a, 0xe4, 0x63,
|
||||
0x17, 0x5e, 0xd7, 0x2a, 0x98, 0x19, 0xa3, 0xaf, 0x81, 0x0a, 0x93, 0x13, 0x8c, 0x7b, 0xb2, 0x14,
|
||||
0x32, 0x02, 0x4a, 0x2c, 0xc6, 0x9b, 0x75, 0x7a, 0xe3, 0x7b, 0xc6, 0x16, 0xda, 0xa4, 0x4d, 0x77,
|
||||
0xa7, 0x75, 0x60, 0x7d, 0xb5, 0x58, 0xfd, 0x1c, 0xf7, 0xe4, 0x0f, 0x31, 0x5c, 0x2c, 0xcd, 0xaf,
|
||||
0xee, 0x98, 0xbc, 0xa5, 0x73, 0xb9, 0x26, 0xef, 0xe5, 0xee, 0xbc, 0x05, 0x5b, 0xcc, 0xd3, 0x0f,
|
||||
0x48, 0xbc, 0xf1, 0xc6, 0x5b, 0x31, 0xac, 0x1b, 0x7c, 0xa5, 0x2c, 0x63, 0xf5, 0xc0, 0xb3, 0xbd,
|
||||
0x83, 0xd0, 0x72, 0x07, 0x36, 0xea, 0x8b, 0x38, 0x5e, 0x35, 0xb8, 0xd3, 0x4f, 0xf7, 0x8c, 0xc1,
|
||||
0xff, 0xca, 0xbf, 0x89, 0xfe, 0xa5, 0xa2, 0xce, 0x3d, 0xf1, 0x0f, 0xc1, 0x96, 0xfa, 0xb0, 0x19,
|
||||
0xb0, 0xe2, 0xa4, 0x69, 0x6c, 0x17, 0x13, 0xfe, 0x63, 0x04, 0xb6, 0xb2, 0x26, 0xbb, 0xb3, 0x5a,
|
||||
0xce, 0x1d, 0x00, 0x14, 0x83, 0x55, 0x63, 0xdb, 0x53, 0xf2, 0xd6, 0xcc, 0xc1, 0x1e, 0xd3, 0xfe,
|
||||
0x4f, 0x62, 0x70, 0xae, 0xb5, 0x49, 0x6e, 0x30, 0x03, 0x62, 0x95, 0x15, 0xf2, 0xd4, 0x13, 0x75,
|
||||
0x43, 0xc6, 0x53, 0x39, 0x17, 0x69, 0xd5, 0x57, 0xe2, 0x49, 0xa5, 0x9e, 0x47, 0xf2, 0x85, 0xe7,
|
||||
0xc2, 0x3c, 0x6a, 0x94, 0xa7, 0x7b, 0x88, 0xd6, 0xf8, 0x5d, 0x0c, 0xcf, 0x89, 0x30, 0xcd, 0x17,
|
||||
0x90, 0x66, 0x90, 0xf3, 0x3c, 0x92, 0xaf, 0x3c, 0xf7, 0x5a, 0xae, 0x8b, 0x98, 0x1c, 0x82, 0x36,
|
||||
0x59, 0xb3, 0xd9, 0xe7, 0xf7, 0x29, 0xf2, 0xb2, 0x8c, 0x57, 0x33, 0x17, 0x3e, 0xa0, 0xd6, 0xf8,
|
||||
0xc0, 0x90, 0x2c, 0xc3, 0x5a, 0x72, 0x76, 0x87, 0xc4, 0x16, 0xe3, 0x92, 0xe1, 0x2e, 0x6c, 0x54,
|
||||
0x61, 0x65, 0x0e, 0xeb, 0x6f, 0x74, 0x11, 0xc7, 0x47, 0xd5, 0xfa, 0xdd, 0x9f, 0x00, 0x00, 0x00,
|
||||
0xff, 0xff, 0xa0, 0xb5, 0xeb, 0xf6, 0xd1, 0x05, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
@ -136,13 +134,6 @@ type MasterClient interface {
|
||||
//
|
||||
// @return StringListResponse
|
||||
ShowPartitions(ctx context.Context, in *internalpb.ShowPartitionRequest, opts ...grpc.CallOption) (*servicepb.StringListResponse, error)
|
||||
//*
|
||||
// @brief This method is used to get system configs
|
||||
//
|
||||
// @param SysConfigRequest, keys or key_prefixes of the configs.
|
||||
//
|
||||
// @return SysConfigResponse
|
||||
GetSysConfigs(ctx context.Context, in *internalpb.SysConfigRequest, opts ...grpc.CallOption) (*servicepb.SysConfigResponse, error)
|
||||
AllocTimestamp(ctx context.Context, in *internalpb.TsoRequest, opts ...grpc.CallOption) (*internalpb.TsoResponse, error)
|
||||
AllocID(ctx context.Context, in *internalpb.IDRequest, opts ...grpc.CallOption) (*internalpb.IDResponse, error)
|
||||
AssignSegmentID(ctx context.Context, in *internalpb.AssignSegIDRequest, opts ...grpc.CallOption) (*internalpb.AssignSegIDResponse, error)
|
||||
@ -246,15 +237,6 @@ func (c *masterClient) ShowPartitions(ctx context.Context, in *internalpb.ShowPa
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *masterClient) GetSysConfigs(ctx context.Context, in *internalpb.SysConfigRequest, opts ...grpc.CallOption) (*servicepb.SysConfigResponse, error) {
|
||||
out := new(servicepb.SysConfigResponse)
|
||||
err := c.cc.Invoke(ctx, "/milvus.proto.master.Master/GetSysConfigs", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *masterClient) AllocTimestamp(ctx context.Context, in *internalpb.TsoRequest, opts ...grpc.CallOption) (*internalpb.TsoResponse, error) {
|
||||
out := new(internalpb.TsoResponse)
|
||||
err := c.cc.Invoke(ctx, "/milvus.proto.master.Master/AllocTimestamp", in, out, opts...)
|
||||
@ -344,13 +326,6 @@ type MasterServer interface {
|
||||
//
|
||||
// @return StringListResponse
|
||||
ShowPartitions(context.Context, *internalpb.ShowPartitionRequest) (*servicepb.StringListResponse, error)
|
||||
//*
|
||||
// @brief This method is used to get system configs
|
||||
//
|
||||
// @param SysConfigRequest, keys or key_prefixes of the configs.
|
||||
//
|
||||
// @return SysConfigResponse
|
||||
GetSysConfigs(context.Context, *internalpb.SysConfigRequest) (*servicepb.SysConfigResponse, error)
|
||||
AllocTimestamp(context.Context, *internalpb.TsoRequest) (*internalpb.TsoResponse, error)
|
||||
AllocID(context.Context, *internalpb.IDRequest) (*internalpb.IDResponse, error)
|
||||
AssignSegmentID(context.Context, *internalpb.AssignSegIDRequest) (*internalpb.AssignSegIDResponse, error)
|
||||
@ -390,9 +365,6 @@ func (*UnimplementedMasterServer) DescribePartition(ctx context.Context, req *in
|
||||
func (*UnimplementedMasterServer) ShowPartitions(ctx context.Context, req *internalpb.ShowPartitionRequest) (*servicepb.StringListResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ShowPartitions not implemented")
|
||||
}
|
||||
func (*UnimplementedMasterServer) GetSysConfigs(ctx context.Context, req *internalpb.SysConfigRequest) (*servicepb.SysConfigResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetSysConfigs not implemented")
|
||||
}
|
||||
func (*UnimplementedMasterServer) AllocTimestamp(ctx context.Context, req *internalpb.TsoRequest) (*internalpb.TsoResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method AllocTimestamp not implemented")
|
||||
}
|
||||
@ -587,24 +559,6 @@ func _Master_ShowPartitions_Handler(srv interface{}, ctx context.Context, dec fu
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Master_GetSysConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(internalpb.SysConfigRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MasterServer).GetSysConfigs(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/milvus.proto.master.Master/GetSysConfigs",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MasterServer).GetSysConfigs(ctx, req.(*internalpb.SysConfigRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Master_AllocTimestamp_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(internalpb.TsoRequest)
|
||||
if err := dec(in); err != nil {
|
||||
@ -703,10 +657,6 @@ var _Master_serviceDesc = grpc.ServiceDesc{
|
||||
MethodName: "ShowPartitions",
|
||||
Handler: _Master_ShowPartitions_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetSysConfigs",
|
||||
Handler: _Master_GetSysConfigs_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "AllocTimestamp",
|
||||
Handler: _Master_AllocTimestamp_Handler,
|
||||
|
@ -135,14 +135,6 @@ message PartitionDescription {
|
||||
repeated common.KeyValuePair statistics = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Response of GetSysConfig
|
||||
*/
|
||||
message SysConfigResponse {
|
||||
common.Status status = 1;
|
||||
repeated string keys = 2;
|
||||
repeated string values = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Entities hit by query
|
||||
|
@ -737,63 +737,6 @@ func (m *PartitionDescription) GetStatistics() []*commonpb.KeyValuePair {
|
||||
return nil
|
||||
}
|
||||
|
||||
//*
|
||||
// @brief Response of GetSysConfig
|
||||
type SysConfigResponse struct {
|
||||
Status *commonpb.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"`
|
||||
Keys []string `protobuf:"bytes,2,rep,name=keys,proto3" json:"keys,omitempty"`
|
||||
Values []string `protobuf:"bytes,3,rep,name=values,proto3" json:"values,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SysConfigResponse) Reset() { *m = SysConfigResponse{} }
|
||||
func (m *SysConfigResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*SysConfigResponse) ProtoMessage() {}
|
||||
func (*SysConfigResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b4b40b84dd2f74cb, []int{13}
|
||||
}
|
||||
|
||||
func (m *SysConfigResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SysConfigResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SysConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SysConfigResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *SysConfigResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SysConfigResponse.Merge(m, src)
|
||||
}
|
||||
func (m *SysConfigResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_SysConfigResponse.Size(m)
|
||||
}
|
||||
func (m *SysConfigResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SysConfigResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SysConfigResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *SysConfigResponse) GetStatus() *commonpb.Status {
|
||||
if m != nil {
|
||||
return m.Status
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *SysConfigResponse) GetKeys() []string {
|
||||
if m != nil {
|
||||
return m.Keys
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *SysConfigResponse) GetValues() []string {
|
||||
if m != nil {
|
||||
return m.Values
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//*
|
||||
// @brief Entities hit by query
|
||||
type Hits struct {
|
||||
@ -809,7 +752,7 @@ func (m *Hits) Reset() { *m = Hits{} }
|
||||
func (m *Hits) String() string { return proto.CompactTextString(m) }
|
||||
func (*Hits) ProtoMessage() {}
|
||||
func (*Hits) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b4b40b84dd2f74cb, []int{14}
|
||||
return fileDescriptor_b4b40b84dd2f74cb, []int{13}
|
||||
}
|
||||
|
||||
func (m *Hits) XXX_Unmarshal(b []byte) error {
|
||||
@ -865,7 +808,7 @@ func (m *QueryResult) Reset() { *m = QueryResult{} }
|
||||
func (m *QueryResult) String() string { return proto.CompactTextString(m) }
|
||||
func (*QueryResult) ProtoMessage() {}
|
||||
func (*QueryResult) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b4b40b84dd2f74cb, []int{15}
|
||||
return fileDescriptor_b4b40b84dd2f74cb, []int{14}
|
||||
}
|
||||
|
||||
func (m *QueryResult) XXX_Unmarshal(b []byte) error {
|
||||
@ -915,7 +858,6 @@ func init() {
|
||||
proto.RegisterType((*IntegerRangeResponse)(nil), "milvus.proto.service.IntegerRangeResponse")
|
||||
proto.RegisterType((*CollectionDescription)(nil), "milvus.proto.service.CollectionDescription")
|
||||
proto.RegisterType((*PartitionDescription)(nil), "milvus.proto.service.PartitionDescription")
|
||||
proto.RegisterType((*SysConfigResponse)(nil), "milvus.proto.service.SysConfigResponse")
|
||||
proto.RegisterType((*Hits)(nil), "milvus.proto.service.Hits")
|
||||
proto.RegisterType((*QueryResult)(nil), "milvus.proto.service.QueryResult")
|
||||
}
|
||||
@ -923,53 +865,52 @@ func init() {
|
||||
func init() { proto.RegisterFile("service_msg.proto", fileDescriptor_b4b40b84dd2f74cb) }
|
||||
|
||||
var fileDescriptor_b4b40b84dd2f74cb = []byte{
|
||||
// 762 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xdd, 0x4e, 0xe3, 0x46,
|
||||
0x14, 0xae, 0xe3, 0x90, 0x86, 0x13, 0x27, 0x24, 0xd3, 0x14, 0x19, 0xb8, 0x49, 0x8d, 0x68, 0xa3,
|
||||
0x56, 0x4d, 0x24, 0xa8, 0x54, 0x71, 0x51, 0xa9, 0x49, 0xa0, 0x2d, 0x3f, 0x0a, 0x74, 0x12, 0x21,
|
||||
0xd1, 0x4a, 0x8d, 0x26, 0xf6, 0xd4, 0x1e, 0xd5, 0xf1, 0x58, 0x9e, 0x31, 0x51, 0x78, 0x90, 0xbe,
|
||||
0xc4, 0x3e, 0xc8, 0xde, 0xed, 0x33, 0xad, 0x3c, 0x36, 0xf9, 0x61, 0x59, 0x2d, 0x4b, 0xb8, 0x3b,
|
||||
0xe7, 0xcc, 0x9c, 0xf3, 0x9d, 0xdf, 0x0f, 0x6a, 0x82, 0x46, 0x77, 0xcc, 0xa6, 0xa3, 0x89, 0x70,
|
||||
0x5b, 0x61, 0xc4, 0x25, 0x47, 0xf5, 0x09, 0xf3, 0xef, 0x62, 0x91, 0x6a, 0xad, 0xec, 0x7d, 0xd7,
|
||||
0xb0, 0xf9, 0x64, 0xc2, 0x83, 0xd4, 0xba, 0x6b, 0x08, 0xdb, 0xa3, 0x13, 0x92, 0x6a, 0xd6, 0x31,
|
||||
0x54, 0x7a, 0xdc, 0xf7, 0xa9, 0x2d, 0x19, 0x0f, 0xfa, 0x64, 0x42, 0xd1, 0x77, 0xb0, 0x65, 0xcf,
|
||||
0x2d, 0xa3, 0x80, 0x4c, 0xa8, 0xa9, 0x35, 0xb4, 0xe6, 0x26, 0xae, 0xd8, 0x2b, 0x1f, 0xad, 0x73,
|
||||
0x28, 0x5f, 0x93, 0x48, 0xb2, 0xcf, 0xf6, 0x44, 0x55, 0xd0, 0x25, 0x71, 0xcd, 0x9c, 0x7a, 0x4c,
|
||||
0x44, 0xeb, 0x8d, 0x06, 0x45, 0xcc, 0xa7, 0x5d, 0x22, 0x6d, 0xef, 0xf9, 0x71, 0xf6, 0xa1, 0x1c,
|
||||
0x3e, 0x64, 0x30, 0x5a, 0x44, 0x34, 0xe6, 0xc6, 0x21, 0x71, 0xd1, 0x4f, 0x50, 0x8c, 0xf8, 0x74,
|
||||
0xe4, 0x10, 0x49, 0x4c, 0xbd, 0xa1, 0x37, 0x4b, 0x87, 0x3b, 0xad, 0x95, 0x36, 0x65, 0xdd, 0xe9,
|
||||
0xfa, 0x7c, 0x8c, 0xbf, 0x8c, 0xf8, 0xf4, 0x84, 0x48, 0x82, 0xf6, 0x60, 0xd3, 0x23, 0xc2, 0x1b,
|
||||
0xfd, 0x47, 0x67, 0xc2, 0xcc, 0x37, 0xf4, 0x66, 0x19, 0x17, 0x13, 0xc3, 0x05, 0x9d, 0x09, 0x6b,
|
||||
0x0a, 0xd5, 0x6b, 0x9f, 0xd8, 0xd4, 0xe3, 0xbe, 0x43, 0xa3, 0x1b, 0xe2, 0xc7, 0xf3, 0x9a, 0xb4,
|
||||
0x79, 0x4d, 0xe8, 0x18, 0xf2, 0x72, 0x16, 0x52, 0x95, 0x54, 0xe5, 0xf0, 0xa0, 0xf5, 0xd4, 0x6c,
|
||||
0x5a, 0x4b, 0x71, 0x86, 0xb3, 0x90, 0x62, 0xe5, 0x82, 0xb6, 0xa1, 0x70, 0x97, 0x44, 0x15, 0x2a,
|
||||
0x63, 0x03, 0x67, 0x9a, 0xf5, 0xcf, 0x0a, 0xf0, 0xef, 0x11, 0x8f, 0x43, 0x74, 0x0e, 0x46, 0xb8,
|
||||
0xb0, 0x09, 0x53, 0x53, 0x35, 0x7e, 0xfb, 0x49, 0x38, 0x95, 0x36, 0x5e, 0xf1, 0xb5, 0xfe, 0xd7,
|
||||
0x60, 0xe3, 0xcf, 0x98, 0x46, 0xb3, 0xe7, 0xcf, 0xe0, 0x00, 0x2a, 0x2b, 0x33, 0x10, 0x66, 0xae,
|
||||
0xa1, 0x37, 0x37, 0x71, 0x79, 0x79, 0x08, 0x22, 0x69, 0x8f, 0x23, 0x7c, 0x53, 0x4f, 0xdb, 0xe3,
|
||||
0x08, 0x1f, 0xfd, 0x00, 0xb5, 0x25, 0xec, 0x91, 0x9b, 0x14, 0x63, 0xe6, 0x1b, 0x5a, 0xd3, 0xc0,
|
||||
0xd5, 0xf0, 0x51, 0x91, 0xd6, 0xdf, 0x50, 0x19, 0xc8, 0x88, 0x05, 0x2e, 0xa6, 0x22, 0xe4, 0x81,
|
||||
0xa0, 0xe8, 0x08, 0x0a, 0x42, 0x12, 0x19, 0x0b, 0x95, 0x57, 0xe9, 0x70, 0xef, 0xc9, 0xa1, 0x0e,
|
||||
0xd4, 0x17, 0x9c, 0x7d, 0x45, 0x75, 0xd8, 0x50, 0x9d, 0xcc, 0x16, 0x25, 0x55, 0xac, 0x5b, 0x30,
|
||||
0xba, 0x9c, 0xfb, 0xaf, 0x18, 0xba, 0xf8, 0x10, 0x9a, 0x00, 0x4a, 0xf3, 0xbe, 0x64, 0x42, 0xae,
|
||||
0x07, 0xb0, 0xd8, 0x89, 0xb4, 0xc1, 0x0f, 0x3b, 0x31, 0x86, 0xaf, 0xce, 0x02, 0x49, 0x5d, 0x1a,
|
||||
0xbd, 0x36, 0x86, 0x3e, 0xc7, 0x10, 0x50, 0xcf, 0x30, 0x30, 0x09, 0x5c, 0xba, 0x76, 0xa7, 0xc6,
|
||||
0xd4, 0x65, 0x81, 0xea, 0x94, 0x8e, 0x53, 0x25, 0x59, 0x10, 0x1a, 0x38, 0x6a, 0x41, 0x74, 0x9c,
|
||||
0x88, 0xd6, 0x3b, 0x0d, 0xbe, 0x5e, 0x70, 0xd3, 0x09, 0x15, 0x76, 0xc4, 0xc2, 0x44, 0x7c, 0x19,
|
||||
0xec, 0x2f, 0x50, 0x48, 0x99, 0x4f, 0xe1, 0x96, 0x3e, 0x38, 0xc8, 0x94, 0x15, 0x17, 0x80, 0x03,
|
||||
0x65, 0xc0, 0x99, 0x13, 0xea, 0x00, 0x24, 0x81, 0x98, 0x90, 0xcc, 0x16, 0x19, 0x91, 0x7c, 0xf3,
|
||||
0x24, 0xee, 0x05, 0x9d, 0xa9, 0xdb, 0xba, 0x26, 0x2c, 0xc2, 0x4b, 0x4e, 0xd6, 0x5b, 0x0d, 0xea,
|
||||
0x73, 0xc6, 0x5c, 0xbb, 0x9e, 0x9f, 0x21, 0xaf, 0xce, 0x32, 0xad, 0x66, 0xff, 0x23, 0xf7, 0xbe,
|
||||
0x4c, 0xd0, 0x58, 0x39, 0xbc, 0x46, 0x25, 0x12, 0x6a, 0x83, 0x99, 0xe8, 0xf1, 0xe0, 0x5f, 0xb6,
|
||||
0xe6, 0x45, 0x22, 0xc8, 0x2b, 0x8a, 0x4d, 0x77, 0x5a, 0xc9, 0x8f, 0xd8, 0x6f, 0xb1, 0xe9, 0x17,
|
||||
0x90, 0xff, 0x83, 0x49, 0xc5, 0x25, 0x67, 0x27, 0x29, 0xd1, 0xe9, 0x38, 0x11, 0xd1, 0xce, 0x12,
|
||||
0xc7, 0xe7, 0x14, 0x63, 0xce, 0x89, 0x7c, 0x3b, 0x19, 0x3b, 0x8f, 0xb2, 0x60, 0x39, 0x9c, 0x69,
|
||||
0xd6, 0x0d, 0x94, 0x14, 0xd3, 0x61, 0x2a, 0x62, 0x5f, 0xbe, 0x38, 0x79, 0x8f, 0x49, 0x91, 0x41,
|
||||
0x2a, 0xf9, 0xfb, 0x5f, 0x61, 0xeb, 0x11, 0xa7, 0xa3, 0x22, 0xe4, 0xfb, 0x57, 0xfd, 0xd3, 0xea,
|
||||
0x17, 0xa8, 0x06, 0xe5, 0x9b, 0xd3, 0xde, 0xf0, 0x0a, 0x8f, 0xba, 0x67, 0xfd, 0x0e, 0xbe, 0xad,
|
||||
0x3a, 0xa8, 0x0a, 0x46, 0x66, 0xfa, 0xed, 0xf2, 0xaa, 0x33, 0xac, 0xd2, 0x6e, 0xef, 0xaf, 0x8e,
|
||||
0xcb, 0xa4, 0x17, 0x8f, 0x13, 0xd4, 0xf6, 0x3d, 0xf3, 0x7d, 0x76, 0x2f, 0xa9, 0xed, 0xb5, 0xd3,
|
||||
0x8c, 0x7e, 0x74, 0x98, 0x90, 0x11, 0x1b, 0xc7, 0x92, 0x3a, 0x6d, 0x16, 0x48, 0x1a, 0x05, 0xc4,
|
||||
0x6f, 0xab, 0x34, 0xdb, 0xd9, 0xd8, 0xc3, 0xf1, 0xb8, 0xa0, 0x0c, 0x47, 0xef, 0x03, 0x00, 0x00,
|
||||
0xff, 0xff, 0x8d, 0x5a, 0x44, 0x98, 0x25, 0x08, 0x00, 0x00,
|
||||
// 739 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xdd, 0x4e, 0xdb, 0x4a,
|
||||
0x10, 0x3e, 0x8e, 0x43, 0x4e, 0x98, 0x38, 0x21, 0xec, 0xc9, 0x41, 0x06, 0x6e, 0x72, 0x8c, 0x38,
|
||||
0x8d, 0x5a, 0x35, 0x91, 0xa0, 0x52, 0xc5, 0x45, 0xa5, 0x26, 0x40, 0x5b, 0x7e, 0x14, 0xe8, 0x12,
|
||||
0x21, 0xd1, 0x4a, 0x8d, 0x36, 0xf6, 0xca, 0x5e, 0xd5, 0xf1, 0x5a, 0xde, 0x35, 0x51, 0x78, 0x90,
|
||||
0xbe, 0x44, 0x1f, 0xa4, 0x77, 0x7d, 0xa6, 0xca, 0x6b, 0x93, 0x1f, 0x4a, 0x55, 0x0a, 0xdc, 0xcd,
|
||||
0xcc, 0xee, 0xcc, 0x37, 0xbf, 0x1f, 0x2c, 0x0b, 0x1a, 0x5d, 0x32, 0x9b, 0xf6, 0x87, 0xc2, 0x6d,
|
||||
0x86, 0x11, 0x97, 0x1c, 0xd5, 0x86, 0xcc, 0xbf, 0x8c, 0x45, 0xaa, 0x35, 0xb3, 0xf7, 0x35, 0xc3,
|
||||
0xe6, 0xc3, 0x21, 0x0f, 0x52, 0xeb, 0x9a, 0x21, 0x6c, 0x8f, 0x0e, 0x49, 0xaa, 0x59, 0x3b, 0x50,
|
||||
0xd9, 0xe5, 0xbe, 0x4f, 0x6d, 0xc9, 0x78, 0xd0, 0x25, 0x43, 0x8a, 0x9e, 0xc0, 0x92, 0x3d, 0xb1,
|
||||
0xf4, 0x03, 0x32, 0xa4, 0xa6, 0x56, 0xd7, 0x1a, 0x8b, 0xb8, 0x62, 0xcf, 0x7d, 0xb4, 0x0e, 0xa1,
|
||||
0x7c, 0x4a, 0x22, 0xc9, 0xfe, 0xd8, 0x13, 0x55, 0x41, 0x97, 0xc4, 0x35, 0x73, 0xea, 0x31, 0x11,
|
||||
0xad, 0xaf, 0x1a, 0x14, 0x31, 0x1f, 0x75, 0x88, 0xb4, 0xbd, 0xbb, 0xc7, 0xd9, 0x80, 0x72, 0x78,
|
||||
0x9d, 0x41, 0x7f, 0x1a, 0xd1, 0x98, 0x18, 0x7b, 0xc4, 0x45, 0x2f, 0xa0, 0x18, 0xf1, 0x51, 0xdf,
|
||||
0x21, 0x92, 0x98, 0x7a, 0x5d, 0x6f, 0x94, 0xb6, 0x56, 0x9b, 0x73, 0x6d, 0xca, 0xba, 0xd3, 0xf1,
|
||||
0xf9, 0x00, 0xff, 0x1d, 0xf1, 0xd1, 0x1e, 0x91, 0x04, 0xad, 0xc3, 0xa2, 0x47, 0x84, 0xd7, 0xff,
|
||||
0x4c, 0xc7, 0xc2, 0xcc, 0xd7, 0xf5, 0x46, 0x19, 0x17, 0x13, 0xc3, 0x11, 0x1d, 0x0b, 0x6b, 0x04,
|
||||
0xd5, 0x53, 0x9f, 0xd8, 0xd4, 0xe3, 0xbe, 0x43, 0xa3, 0x73, 0xe2, 0xc7, 0x93, 0x9a, 0xb4, 0x49,
|
||||
0x4d, 0x68, 0x07, 0xf2, 0x72, 0x1c, 0x52, 0x95, 0x54, 0x65, 0x6b, 0xb3, 0x79, 0xdb, 0x6c, 0x9a,
|
||||
0x33, 0x71, 0x7a, 0xe3, 0x90, 0x62, 0xe5, 0x82, 0x56, 0xa0, 0x70, 0x99, 0x44, 0x15, 0x2a, 0x63,
|
||||
0x03, 0x67, 0x9a, 0xf5, 0x69, 0x0e, 0xf8, 0x6d, 0xc4, 0xe3, 0x10, 0x1d, 0x82, 0x11, 0x4e, 0x6d,
|
||||
0xc2, 0xd4, 0x54, 0x8d, 0xff, 0xff, 0x16, 0x4e, 0xa5, 0x8d, 0xe7, 0x7c, 0xad, 0x2f, 0x1a, 0x2c,
|
||||
0xbc, 0x8f, 0x69, 0x34, 0xbe, 0xfb, 0x0c, 0x36, 0xa1, 0x32, 0x37, 0x03, 0x61, 0xe6, 0xea, 0x7a,
|
||||
0x63, 0x11, 0x97, 0x67, 0x87, 0x20, 0x92, 0xf6, 0x38, 0xc2, 0x37, 0xf5, 0xb4, 0x3d, 0x8e, 0xf0,
|
||||
0xd1, 0x33, 0x58, 0x9e, 0xc1, 0xee, 0xbb, 0x49, 0x31, 0x66, 0xbe, 0xae, 0x35, 0x0c, 0x5c, 0x0d,
|
||||
0x6f, 0x14, 0x69, 0x7d, 0x84, 0xca, 0x99, 0x8c, 0x58, 0xe0, 0x62, 0x2a, 0x42, 0x1e, 0x08, 0x8a,
|
||||
0xb6, 0xa1, 0x20, 0x24, 0x91, 0xb1, 0x50, 0x79, 0x95, 0xb6, 0xd6, 0x6f, 0x1d, 0xea, 0x99, 0xfa,
|
||||
0x82, 0xb3, 0xaf, 0xa8, 0x06, 0x0b, 0xaa, 0x93, 0xd9, 0xa2, 0xa4, 0x8a, 0x75, 0x01, 0x46, 0x87,
|
||||
0x73, 0xff, 0x11, 0x43, 0x17, 0xaf, 0x43, 0x13, 0x40, 0x69, 0xde, 0xc7, 0x4c, 0xc8, 0x87, 0x01,
|
||||
0x4c, 0x77, 0x22, 0x6d, 0xf0, 0xf5, 0x4e, 0x0c, 0xe0, 0x9f, 0x83, 0x40, 0x52, 0x97, 0x46, 0x8f,
|
||||
0x8d, 0xa1, 0x4f, 0x30, 0x04, 0xd4, 0x32, 0x0c, 0x4c, 0x02, 0x97, 0x3e, 0xb8, 0x53, 0x03, 0xea,
|
||||
0xb2, 0x40, 0x75, 0x4a, 0xc7, 0xa9, 0x92, 0x2c, 0x08, 0x0d, 0x1c, 0xb5, 0x20, 0x3a, 0x4e, 0x44,
|
||||
0xeb, 0xbb, 0x06, 0xff, 0x4e, 0xb9, 0x69, 0x8f, 0x0a, 0x3b, 0x62, 0x61, 0x22, 0xde, 0x0f, 0xf6,
|
||||
0x15, 0x14, 0x52, 0xe6, 0x53, 0xb8, 0xa5, 0x9f, 0x0e, 0x32, 0x65, 0xc5, 0x29, 0xe0, 0x99, 0x32,
|
||||
0xe0, 0xcc, 0x09, 0xb5, 0x01, 0x92, 0x40, 0x4c, 0x48, 0x66, 0x8b, 0x8c, 0x48, 0xfe, 0xbb, 0x15,
|
||||
0xf7, 0x88, 0x8e, 0xd5, 0x6d, 0x9d, 0x12, 0x16, 0xe1, 0x19, 0x27, 0xeb, 0x9b, 0x06, 0xb5, 0x09,
|
||||
0x63, 0x3e, 0xb8, 0x9e, 0x97, 0x90, 0x57, 0x67, 0x99, 0x56, 0xb3, 0xf1, 0x8b, 0x7b, 0x9f, 0x25,
|
||||
0x68, 0xac, 0x1c, 0x1e, 0xa3, 0x92, 0x23, 0xc8, 0xbf, 0x63, 0x52, 0x5d, 0xf5, 0xc1, 0x5e, 0x4a,
|
||||
0x39, 0x3a, 0x4e, 0x44, 0xb4, 0x3a, 0xc3, 0xb6, 0x39, 0xc5, 0x5d, 0x13, 0x4a, 0x5d, 0x49, 0x06,
|
||||
0xc0, 0xa3, 0x8c, 0xd4, 0x72, 0x38, 0xd3, 0xac, 0x73, 0x28, 0x29, 0xce, 0xc1, 0x54, 0xc4, 0xbe,
|
||||
0xbc, 0x5f, 0x33, 0x10, 0xe4, 0x3d, 0x26, 0x45, 0x06, 0xa9, 0xe4, 0xa7, 0xaf, 0x61, 0xe9, 0x06,
|
||||
0xbb, 0xa2, 0x22, 0xe4, 0xbb, 0x27, 0xdd, 0xfd, 0xea, 0x5f, 0x68, 0x19, 0xca, 0xe7, 0xfb, 0xbb,
|
||||
0xbd, 0x13, 0xdc, 0xef, 0x1c, 0x74, 0xdb, 0xf8, 0xa2, 0xea, 0xa0, 0x2a, 0x18, 0x99, 0xe9, 0xcd,
|
||||
0xf1, 0x49, 0xbb, 0x57, 0xa5, 0x9d, 0xdd, 0x0f, 0x6d, 0x97, 0x49, 0x2f, 0x1e, 0x24, 0xa8, 0xad,
|
||||
0x2b, 0xe6, 0xfb, 0xec, 0x4a, 0x52, 0xdb, 0x6b, 0xa5, 0x19, 0x3d, 0x77, 0x98, 0x90, 0x11, 0x1b,
|
||||
0xc4, 0x92, 0x3a, 0x2d, 0x16, 0x48, 0x1a, 0x05, 0xc4, 0x6f, 0xa9, 0x34, 0x5b, 0xd9, 0x00, 0xc2,
|
||||
0xc1, 0xa0, 0xa0, 0x0c, 0xdb, 0x3f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xee, 0x08, 0x5d, 0xa4, 0xaf,
|
||||
0x07, 0x00, 0x00,
|
||||
}
|
||||
|
@ -19,19 +19,8 @@ var Params ParamTable
|
||||
|
||||
func (pt *ParamTable) Init() {
|
||||
pt.BaseTable.Init()
|
||||
err := pt.LoadYaml("milvus.yaml")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = pt.LoadYaml("advanced/proxy.yaml")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = pt.LoadYaml("advanced/channel.yaml")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = pt.LoadYaml("advanced/common.yaml")
|
||||
|
||||
err := pt.LoadYaml("advanced/proxy.yaml")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -48,15 +37,24 @@ func (pt *ParamTable) Init() {
|
||||
pt.Save("_proxyID", proxyIDStr)
|
||||
}
|
||||
|
||||
func (pt *ParamTable) NetWorkAddress() string {
|
||||
addr, err := pt.Load("proxy.network.address")
|
||||
func (pt *ParamTable) NetworkPort() int {
|
||||
return pt.ParseInt("proxy.port")
|
||||
}
|
||||
|
||||
func (pt *ParamTable) NetworkAddress() string {
|
||||
addr, err := pt.Load("proxy.address")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if ip := net.ParseIP(addr); ip == nil {
|
||||
panic("invalid ip proxy.network.address")
|
||||
|
||||
hostName, _ := net.LookupHost(addr)
|
||||
if len(hostName) <= 0 {
|
||||
if ip := net.ParseIP(addr); ip == nil {
|
||||
panic("invalid ip proxy.address")
|
||||
}
|
||||
}
|
||||
port, err := pt.Load("proxy.network.port")
|
||||
|
||||
port, err := pt.Load("proxy.port")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -150,25 +148,6 @@ func (pt *ParamTable) TimeTickInterval() time.Duration {
|
||||
return time.Duration(interval) * time.Millisecond
|
||||
}
|
||||
|
||||
func (pt *ParamTable) convertRangeToSlice(rangeStr, sep string) []int {
|
||||
channelIDs := strings.Split(rangeStr, sep)
|
||||
startStr := channelIDs[0]
|
||||
endStr := channelIDs[1]
|
||||
start, err := strconv.Atoi(startStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
end, err := strconv.Atoi(endStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var ret []int
|
||||
for i := start; i < end; i++ {
|
||||
ret = append(ret, i)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (pt *ParamTable) sliceIndex() int {
|
||||
proxyID := pt.ProxyID()
|
||||
proxyIDList := pt.ProxyIDList()
|
||||
@ -190,7 +169,7 @@ func (pt *ParamTable) InsertChannelNames() []string {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
channelIDs := pt.convertRangeToSlice(iRangeStr, ",")
|
||||
channelIDs := paramtable.ConvertRangeToIntSlice(iRangeStr, ",")
|
||||
var ret []string
|
||||
for _, ID := range channelIDs {
|
||||
ret = append(ret, prefix+strconv.Itoa(ID))
|
||||
@ -216,19 +195,12 @@ func (pt *ParamTable) DeleteChannelNames() []string {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
channelIDs := pt.convertRangeToSlice(dRangeStr, ",")
|
||||
channelIDs := paramtable.ConvertRangeToIntSlice(dRangeStr, ",")
|
||||
var ret []string
|
||||
for _, ID := range channelIDs {
|
||||
ret = append(ret, prefix+strconv.Itoa(ID))
|
||||
}
|
||||
proxyNum := pt.ProxyNum()
|
||||
sep := len(channelIDs) / proxyNum
|
||||
index := pt.sliceIndex()
|
||||
if index == -1 {
|
||||
panic("ProxyID not Match with Config")
|
||||
}
|
||||
start := index * sep
|
||||
return ret[start : start+sep]
|
||||
return ret
|
||||
}
|
||||
|
||||
func (pt *ParamTable) K2SChannelNames() []string {
|
||||
@ -241,19 +213,12 @@ func (pt *ParamTable) K2SChannelNames() []string {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
channelIDs := pt.convertRangeToSlice(k2sRangeStr, ",")
|
||||
channelIDs := paramtable.ConvertRangeToIntSlice(k2sRangeStr, ",")
|
||||
var ret []string
|
||||
for _, ID := range channelIDs {
|
||||
ret = append(ret, prefix+strconv.Itoa(ID))
|
||||
}
|
||||
proxyNum := pt.ProxyNum()
|
||||
sep := len(channelIDs) / proxyNum
|
||||
index := pt.sliceIndex()
|
||||
if index == -1 {
|
||||
panic("ProxyID not Match with Config")
|
||||
}
|
||||
start := index * sep
|
||||
return ret[start : start+sep]
|
||||
return ret
|
||||
}
|
||||
|
||||
func (pt *ParamTable) SearchChannelNames() []string {
|
||||
@ -261,8 +226,19 @@ func (pt *ParamTable) SearchChannelNames() []string {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
prefix += "-0"
|
||||
return []string{prefix}
|
||||
prefix += "-"
|
||||
sRangeStr, err := pt.Load("msgChannel.channelRange.search")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
channelIDs := paramtable.ConvertRangeToIntSlice(sRangeStr, ",")
|
||||
var ret []string
|
||||
for _, ID := range channelIDs {
|
||||
ret = append(ret, prefix+strconv.Itoa(ID))
|
||||
}
|
||||
return ret
|
||||
//prefix += "-0"
|
||||
//return []string{prefix}
|
||||
}
|
||||
|
||||
func (pt *ParamTable) SearchResultChannelNames() []string {
|
||||
@ -275,7 +251,7 @@ func (pt *ParamTable) SearchResultChannelNames() []string {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
channelIDs := pt.convertRangeToSlice(sRangeStr, ",")
|
||||
channelIDs := paramtable.ConvertRangeToIntSlice(sRangeStr, ",")
|
||||
var ret []string
|
||||
for _, ID := range channelIDs {
|
||||
ret = append(ret, prefix+strconv.Itoa(ID))
|
||||
@ -321,144 +297,24 @@ func (pt *ParamTable) DataDefinitionChannelNames() []string {
|
||||
return []string{prefix}
|
||||
}
|
||||
|
||||
func (pt *ParamTable) parseInt64(key string) int64 {
|
||||
valueStr, err := pt.Load(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
value, err := strconv.Atoi(valueStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return int64(value)
|
||||
}
|
||||
|
||||
func (pt *ParamTable) MsgStreamInsertBufSize() int64 {
|
||||
return pt.parseInt64("proxy.msgStream.insert.bufSize")
|
||||
return pt.ParseInt64("proxy.msgStream.insert.bufSize")
|
||||
}
|
||||
|
||||
func (pt *ParamTable) MsgStreamSearchBufSize() int64 {
|
||||
return pt.parseInt64("proxy.msgStream.search.bufSize")
|
||||
return pt.ParseInt64("proxy.msgStream.search.bufSize")
|
||||
}
|
||||
|
||||
func (pt *ParamTable) MsgStreamSearchResultBufSize() int64 {
|
||||
return pt.parseInt64("proxy.msgStream.searchResult.recvBufSize")
|
||||
return pt.ParseInt64("proxy.msgStream.searchResult.recvBufSize")
|
||||
}
|
||||
|
||||
func (pt *ParamTable) MsgStreamSearchResultPulsarBufSize() int64 {
|
||||
return pt.parseInt64("proxy.msgStream.searchResult.pulsarBufSize")
|
||||
return pt.ParseInt64("proxy.msgStream.searchResult.pulsarBufSize")
|
||||
}
|
||||
|
||||
func (pt *ParamTable) MsgStreamTimeTickBufSize() int64 {
|
||||
return pt.parseInt64("proxy.msgStream.timeTick.bufSize")
|
||||
}
|
||||
|
||||
func (pt *ParamTable) insertChannelNames() []string {
|
||||
ch, err := pt.Load("msgChannel.chanNamePrefix.insert")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
channelRange, err := pt.Load("msgChannel.channelRange.insert")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
chanRange := strings.Split(channelRange, ",")
|
||||
if len(chanRange) != 2 {
|
||||
panic("Illegal channel range num")
|
||||
}
|
||||
channelBegin, err := strconv.Atoi(chanRange[0])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
channelEnd, err := strconv.Atoi(chanRange[1])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if channelBegin < 0 || channelEnd < 0 {
|
||||
panic("Illegal channel range value")
|
||||
}
|
||||
if channelBegin > channelEnd {
|
||||
panic("Illegal channel range value")
|
||||
}
|
||||
|
||||
channels := make([]string, channelEnd-channelBegin)
|
||||
for i := 0; i < channelEnd-channelBegin; i++ {
|
||||
channels[i] = ch + "-" + strconv.Itoa(channelBegin+i)
|
||||
}
|
||||
return channels
|
||||
}
|
||||
|
||||
func (pt *ParamTable) searchChannelNames() []string {
|
||||
ch, err := pt.Load("msgChannel.chanNamePrefix.search")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
channelRange, err := pt.Load("msgChannel.channelRange.search")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
chanRange := strings.Split(channelRange, ",")
|
||||
if len(chanRange) != 2 {
|
||||
panic("Illegal channel range num")
|
||||
}
|
||||
channelBegin, err := strconv.Atoi(chanRange[0])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
channelEnd, err := strconv.Atoi(chanRange[1])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if channelBegin < 0 || channelEnd < 0 {
|
||||
panic("Illegal channel range value")
|
||||
}
|
||||
if channelBegin > channelEnd {
|
||||
panic("Illegal channel range value")
|
||||
}
|
||||
|
||||
channels := make([]string, channelEnd-channelBegin)
|
||||
for i := 0; i < channelEnd-channelBegin; i++ {
|
||||
channels[i] = ch + "-" + strconv.Itoa(channelBegin+i)
|
||||
}
|
||||
return channels
|
||||
}
|
||||
|
||||
func (pt *ParamTable) searchResultChannelNames() []string {
|
||||
ch, err := pt.Load("msgChannel.chanNamePrefix.searchResult")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
channelRange, err := pt.Load("msgChannel.channelRange.searchResult")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
chanRange := strings.Split(channelRange, ",")
|
||||
if len(chanRange) != 2 {
|
||||
panic("Illegal channel range num")
|
||||
}
|
||||
channelBegin, err := strconv.Atoi(chanRange[0])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
channelEnd, err := strconv.Atoi(chanRange[1])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if channelBegin < 0 || channelEnd < 0 {
|
||||
panic("Illegal channel range value")
|
||||
}
|
||||
if channelBegin > channelEnd {
|
||||
panic("Illegal channel range value")
|
||||
}
|
||||
|
||||
channels := make([]string, channelEnd-channelBegin)
|
||||
for i := 0; i < channelEnd-channelBegin; i++ {
|
||||
channels[i] = ch + "-" + strconv.Itoa(channelBegin+i)
|
||||
}
|
||||
return channels
|
||||
return pt.ParseInt64("proxy.msgStream.timeTick.bufSize")
|
||||
}
|
||||
|
||||
func (pt *ParamTable) MaxNameLength() int64 {
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -59,7 +60,7 @@ func CreateProxy(ctx context.Context) (*Proxy, error) {
|
||||
|
||||
p.queryMsgStream = msgstream.NewPulsarMsgStream(p.proxyLoopCtx, Params.MsgStreamSearchBufSize())
|
||||
p.queryMsgStream.SetPulsarClient(pulsarAddress)
|
||||
p.queryMsgStream.CreatePulsarProducers(Params.searchChannelNames())
|
||||
p.queryMsgStream.CreatePulsarProducers(Params.SearchChannelNames())
|
||||
|
||||
masterAddr := Params.MasterAddress()
|
||||
idAllocator, err := allocator.NewIDAllocator(p.proxyLoopCtx, masterAddr)
|
||||
@ -83,7 +84,7 @@ func CreateProxy(ctx context.Context) (*Proxy, error) {
|
||||
|
||||
p.manipulationMsgStream = msgstream.NewPulsarMsgStream(p.proxyLoopCtx, Params.MsgStreamInsertBufSize())
|
||||
p.manipulationMsgStream.SetPulsarClient(pulsarAddress)
|
||||
p.manipulationMsgStream.CreatePulsarProducers(Params.insertChannelNames())
|
||||
p.manipulationMsgStream.CreatePulsarProducers(Params.InsertChannelNames())
|
||||
repackFuncImpl := func(tsMsgs []msgstream.TsMsg, hashKeys [][]int32) (map[int32]*msgstream.MsgPack, error) {
|
||||
return insertRepackFunc(tsMsgs, hashKeys, p.segAssigner, false)
|
||||
}
|
||||
@ -137,7 +138,7 @@ func (p *Proxy) AddCloseCallback(callbacks ...func()) {
|
||||
func (p *Proxy) grpcLoop() {
|
||||
defer p.proxyLoopWg.Done()
|
||||
|
||||
lis, err := net.Listen("tcp", Params.NetWorkAddress())
|
||||
lis, err := net.Listen("tcp", ":"+strconv.Itoa(Params.NetworkPort()))
|
||||
if err != nil {
|
||||
log.Fatalf("Proxy grpc server fatal error=%v", err)
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ func setup() {
|
||||
|
||||
startMaster(ctx)
|
||||
startProxy(ctx)
|
||||
proxyAddr := Params.NetWorkAddress()
|
||||
proxyAddr := Params.NetworkAddress()
|
||||
addr := strings.Split(proxyAddr, ":")
|
||||
if addr[0] == "0.0.0.0" {
|
||||
proxyAddr = "127.0.0.1:" + addr[1]
|
||||
|
@ -364,7 +364,7 @@ func (sched *TaskScheduler) queryResultLoop() {
|
||||
unmarshal := msgstream.NewUnmarshalDispatcher()
|
||||
queryResultMsgStream := msgstream.NewPulsarMsgStream(sched.ctx, Params.MsgStreamSearchResultBufSize())
|
||||
queryResultMsgStream.SetPulsarClient(Params.PulsarAddress())
|
||||
queryResultMsgStream.CreatePulsarConsumers(Params.searchResultChannelNames(),
|
||||
queryResultMsgStream.CreatePulsarConsumers(Params.SearchResultChannelNames(),
|
||||
Params.ProxySubName(),
|
||||
unmarshal,
|
||||
Params.MsgStreamSearchResultPulsarBufSize())
|
||||
|
@ -31,7 +31,7 @@ import (
|
||||
* is up-to-date.
|
||||
*/
|
||||
type collectionReplica interface {
|
||||
getTSafe() *tSafe
|
||||
getTSafe() tSafe
|
||||
|
||||
// collection
|
||||
getCollectionNum() int
|
||||
@ -68,11 +68,11 @@ type collectionReplicaImpl struct {
|
||||
collections []*Collection
|
||||
segments map[UniqueID]*Segment
|
||||
|
||||
tSafe *tSafe
|
||||
tSafe tSafe
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------------------------------------- tSafe
|
||||
func (colReplica *collectionReplicaImpl) getTSafe() *tSafe {
|
||||
func (colReplica *collectionReplicaImpl) getTSafe() tSafe {
|
||||
return colReplica.tSafe
|
||||
}
|
||||
|
||||
@ -111,6 +111,7 @@ func (colReplica *collectionReplicaImpl) removeCollection(collectionID UniqueID)
|
||||
if col.ID() == collectionID {
|
||||
for _, p := range *col.Partitions() {
|
||||
for _, s := range *p.Segments() {
|
||||
deleteSegment(colReplica.segments[s.ID()])
|
||||
delete(colReplica.segments, s.ID())
|
||||
}
|
||||
}
|
||||
@ -202,6 +203,7 @@ func (colReplica *collectionReplicaImpl) removePartition(collectionID UniqueID,
|
||||
for _, p := range *collection.Partitions() {
|
||||
if p.Tag() == partitionTag {
|
||||
for _, s := range *p.Segments() {
|
||||
deleteSegment(colReplica.segments[s.ID()])
|
||||
delete(colReplica.segments, s.ID())
|
||||
}
|
||||
} else {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,179 +1,48 @@
|
||||
package querynode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
)
|
||||
|
||||
func TestCollection_Partitions(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node := NewQueryNode(ctx, 0)
|
||||
|
||||
node := newQueryNode()
|
||||
collectionName := "collection0"
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
collectionID := UniqueID(0)
|
||||
initTestMeta(t, node, collectionName, collectionID, 0)
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: collectionName,
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
collectionMetaBlob := proto.MarshalTextString(&collectionMeta)
|
||||
assert.NotEqual(t, "", collectionMetaBlob)
|
||||
|
||||
var err = (*node.replica).addCollection(&collectionMeta, collectionMetaBlob)
|
||||
collection, err := node.replica.getCollectionByName(collectionName)
|
||||
assert.NoError(t, err)
|
||||
|
||||
collection, err := (*node.replica).getCollectionByName(collectionName)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, collection.meta.Schema.Name, "collection0")
|
||||
assert.Equal(t, collection.meta.ID, UniqueID(0))
|
||||
assert.Equal(t, (*node.replica).getCollectionNum(), 1)
|
||||
|
||||
for _, tag := range collectionMeta.PartitionTags {
|
||||
err := (*node.replica).addPartition(collection.ID(), tag)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
partitions := collection.Partitions()
|
||||
assert.Equal(t, len(collectionMeta.PartitionTags), len(*partitions))
|
||||
assert.Equal(t, 1, len(*partitions))
|
||||
}
|
||||
|
||||
func TestCollection_newCollection(t *testing.T) {
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
collectionName := "collection0"
|
||||
collectionID := UniqueID(0)
|
||||
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: "collection0",
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
collectionMetaBlob := proto.MarshalTextString(&collectionMeta)
|
||||
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
|
||||
assert.NotEqual(t, "", collectionMetaBlob)
|
||||
|
||||
collection := newCollection(&collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, "collection0")
|
||||
assert.Equal(t, collection.meta.ID, UniqueID(0))
|
||||
collection := newCollection(collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, collectionName)
|
||||
assert.Equal(t, collection.meta.ID, collectionID)
|
||||
}
|
||||
|
||||
func TestCollection_deleteCollection(t *testing.T) {
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
collectionName := "collection0"
|
||||
collectionID := UniqueID(0)
|
||||
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: "collection0",
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
collectionMetaBlob := proto.MarshalTextString(&collectionMeta)
|
||||
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
|
||||
assert.NotEqual(t, "", collectionMetaBlob)
|
||||
|
||||
collection := newCollection(&collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, "collection0")
|
||||
assert.Equal(t, collection.meta.ID, UniqueID(0))
|
||||
|
||||
collection := newCollection(collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, collectionName)
|
||||
assert.Equal(t, collection.meta.ID, collectionID)
|
||||
deleteCollection(collection)
|
||||
}
|
||||
|
@ -11,10 +11,10 @@ type dataSyncService struct {
|
||||
ctx context.Context
|
||||
fg *flowgraph.TimeTickedFlowGraph
|
||||
|
||||
replica *collectionReplica
|
||||
replica collectionReplica
|
||||
}
|
||||
|
||||
func newDataSyncService(ctx context.Context, replica *collectionReplica) *dataSyncService {
|
||||
func newDataSyncService(ctx context.Context, replica collectionReplica) *dataSyncService {
|
||||
|
||||
return &dataSyncService{
|
||||
ctx: ctx,
|
||||
|
@ -1,101 +1,22 @@
|
||||
package querynode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
)
|
||||
|
||||
// NOTE: start pulsar before test
|
||||
func TestDataSyncService_Start(t *testing.T) {
|
||||
Params.Init()
|
||||
var ctx context.Context
|
||||
|
||||
if closeWithDeadline {
|
||||
var cancel context.CancelFunc
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, cancel = context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
} else {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
// init query node
|
||||
pulsarURL, _ := Params.pulsarAddress()
|
||||
node := NewQueryNode(ctx, 0)
|
||||
|
||||
// init meta
|
||||
collectionName := "collection0"
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: collectionName,
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
collectionMetaBlob := proto.MarshalTextString(&collectionMeta)
|
||||
assert.NotEqual(t, "", collectionMetaBlob)
|
||||
|
||||
var err = (*node.replica).addCollection(&collectionMeta, collectionMetaBlob)
|
||||
assert.NoError(t, err)
|
||||
|
||||
collection, err := (*node.replica).getCollectionByName(collectionName)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, collection.meta.Schema.Name, "collection0")
|
||||
assert.Equal(t, collection.meta.ID, UniqueID(0))
|
||||
assert.Equal(t, (*node.replica).getCollectionNum(), 1)
|
||||
|
||||
err = (*node.replica).addPartition(collection.ID(), collectionMeta.PartitionTags[0])
|
||||
assert.NoError(t, err)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
err = (*node.replica).addSegment(segmentID, collectionMeta.PartitionTags[0], UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
|
||||
node := newQueryNode()
|
||||
initTestMeta(t, node, "collection0", 0, 0)
|
||||
// test data generate
|
||||
const msgLength = 10
|
||||
const DIM = 16
|
||||
@ -179,25 +100,25 @@ func TestDataSyncService_Start(t *testing.T) {
|
||||
// pulsar produce
|
||||
const receiveBufSize = 1024
|
||||
producerChannels := Params.insertChannelNames()
|
||||
pulsarURL, _ := Params.pulsarAddress()
|
||||
|
||||
insertStream := msgstream.NewPulsarMsgStream(ctx, receiveBufSize)
|
||||
insertStream := msgstream.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
|
||||
insertStream.SetPulsarClient(pulsarURL)
|
||||
insertStream.CreatePulsarProducers(producerChannels)
|
||||
|
||||
var insertMsgStream msgstream.MsgStream = insertStream
|
||||
insertMsgStream.Start()
|
||||
|
||||
err = insertMsgStream.Produce(&msgPack)
|
||||
err := insertMsgStream.Produce(&msgPack)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = insertMsgStream.Broadcast(&timeTickMsgPack)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// dataSync
|
||||
node.dataSyncService = newDataSyncService(node.ctx, node.replica)
|
||||
node.dataSyncService = newDataSyncService(node.queryNodeLoopCtx, node.replica)
|
||||
go node.dataSyncService.start()
|
||||
|
||||
node.Close()
|
||||
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
|
||||
type insertNode struct {
|
||||
BaseNode
|
||||
replica *collectionReplica
|
||||
replica collectionReplica
|
||||
}
|
||||
|
||||
type InsertData struct {
|
||||
@ -58,13 +58,13 @@ func (iNode *insertNode) Operate(in []*Msg) []*Msg {
|
||||
insertData.insertRecords[task.SegmentID] = append(insertData.insertRecords[task.SegmentID], task.RowData...)
|
||||
|
||||
// check if segment exists, if not, create this segment
|
||||
if !(*iNode.replica).hasSegment(task.SegmentID) {
|
||||
collection, err := (*iNode.replica).getCollectionByName(task.CollectionName)
|
||||
if !iNode.replica.hasSegment(task.SegmentID) {
|
||||
collection, err := iNode.replica.getCollectionByName(task.CollectionName)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
continue
|
||||
}
|
||||
err = (*iNode.replica).addSegment(task.SegmentID, task.PartitionTag, collection.ID())
|
||||
err = iNode.replica.addSegment(task.SegmentID, task.PartitionTag, collection.ID())
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
continue
|
||||
@ -74,7 +74,7 @@ func (iNode *insertNode) Operate(in []*Msg) []*Msg {
|
||||
|
||||
// 2. do preInsert
|
||||
for segmentID := range insertData.insertRecords {
|
||||
var targetSegment, err = (*iNode.replica).getSegmentByID(segmentID)
|
||||
var targetSegment, err = iNode.replica.getSegmentByID(segmentID)
|
||||
if err != nil {
|
||||
log.Println("preInsert failed")
|
||||
// TODO: add error handling
|
||||
@ -102,7 +102,7 @@ func (iNode *insertNode) Operate(in []*Msg) []*Msg {
|
||||
}
|
||||
|
||||
func (iNode *insertNode) insert(insertData *InsertData, segmentID int64, wg *sync.WaitGroup) {
|
||||
var targetSegment, err = (*iNode.replica).getSegmentByID(segmentID)
|
||||
var targetSegment, err = iNode.replica.getSegmentByID(segmentID)
|
||||
if err != nil {
|
||||
log.Println("cannot find segment:", segmentID)
|
||||
// TODO: add error handling
|
||||
@ -127,7 +127,7 @@ func (iNode *insertNode) insert(insertData *InsertData, segmentID int64, wg *syn
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
func newInsertNode(replica *collectionReplica) *insertNode {
|
||||
func newInsertNode(replica collectionReplica) *insertNode {
|
||||
maxQueueLength := Params.flowGraphMaxQueueLength()
|
||||
maxParallelism := Params.flowGraphMaxParallelism()
|
||||
|
||||
|
@ -6,7 +6,7 @@ import (
|
||||
|
||||
type serviceTimeNode struct {
|
||||
BaseNode
|
||||
replica *collectionReplica
|
||||
replica collectionReplica
|
||||
}
|
||||
|
||||
func (stNode *serviceTimeNode) Name() string {
|
||||
@ -28,12 +28,12 @@ func (stNode *serviceTimeNode) Operate(in []*Msg) []*Msg {
|
||||
}
|
||||
|
||||
// update service time
|
||||
(*(*stNode.replica).getTSafe()).set(serviceTimeMsg.timeRange.timestampMax)
|
||||
stNode.replica.getTSafe().set(serviceTimeMsg.timeRange.timestampMax)
|
||||
//fmt.Println("update tSafe to:", getPhysicalTime(serviceTimeMsg.timeRange.timestampMax))
|
||||
return nil
|
||||
}
|
||||
|
||||
func newServiceTimeNode(replica *collectionReplica) *serviceTimeNode {
|
||||
func newServiceTimeNode(replica collectionReplica) *serviceTimeNode {
|
||||
maxQueueLength := Params.flowGraphMaxQueueLength()
|
||||
maxParallelism := Params.flowGraphMaxParallelism()
|
||||
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
"github.com/zilliztech/milvus-distributed/internal/kv"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
"go.etcd.io/etcd/mvcc/mvccpb"
|
||||
@ -25,11 +25,11 @@ const (
|
||||
|
||||
type metaService struct {
|
||||
ctx context.Context
|
||||
kvBase *etcdkv.EtcdKV
|
||||
replica *collectionReplica
|
||||
kvBase *kv.EtcdKV
|
||||
replica collectionReplica
|
||||
}
|
||||
|
||||
func newMetaService(ctx context.Context, replica *collectionReplica) *metaService {
|
||||
func newMetaService(ctx context.Context, replica collectionReplica) *metaService {
|
||||
ETCDAddr := Params.etcdAddress()
|
||||
MetaRootPath := Params.metaRootPath()
|
||||
|
||||
@ -40,7 +40,7 @@ func newMetaService(ctx context.Context, replica *collectionReplica) *metaServic
|
||||
|
||||
return &metaService{
|
||||
ctx: ctx,
|
||||
kvBase: etcdkv.NewEtcdKV(cli, MetaRootPath),
|
||||
kvBase: kv.NewEtcdKV(cli, MetaRootPath),
|
||||
replica: replica,
|
||||
}
|
||||
}
|
||||
@ -149,12 +149,12 @@ func (mService *metaService) processCollectionCreate(id string, value string) {
|
||||
|
||||
col := mService.collectionUnmarshal(value)
|
||||
if col != nil {
|
||||
err := (*mService.replica).addCollection(col, value)
|
||||
err := mService.replica.addCollection(col, value)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
for _, partitionTag := range col.PartitionTags {
|
||||
err = (*mService.replica).addPartition(col.ID, partitionTag)
|
||||
err = mService.replica.addPartition(col.ID, partitionTag)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
@ -173,7 +173,7 @@ func (mService *metaService) processSegmentCreate(id string, value string) {
|
||||
|
||||
// TODO: what if seg == nil? We need to notify master and return rpc request failed
|
||||
if seg != nil {
|
||||
err := (*mService.replica).addSegment(seg.SegmentID, seg.PartitionTag, seg.CollectionID)
|
||||
err := mService.replica.addSegment(seg.SegmentID, seg.PartitionTag, seg.CollectionID)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
@ -202,7 +202,7 @@ func (mService *metaService) processSegmentModify(id string, value string) {
|
||||
}
|
||||
|
||||
if seg != nil {
|
||||
targetSegment, err := (*mService.replica).getSegmentByID(seg.SegmentID)
|
||||
targetSegment, err := mService.replica.getSegmentByID(seg.SegmentID)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
@ -218,11 +218,11 @@ func (mService *metaService) processCollectionModify(id string, value string) {
|
||||
|
||||
col := mService.collectionUnmarshal(value)
|
||||
if col != nil {
|
||||
err := (*mService.replica).addPartitionsByCollectionMeta(col)
|
||||
err := mService.replica.addPartitionsByCollectionMeta(col)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
err = (*mService.replica).removePartitionsByCollectionMeta(col)
|
||||
err = mService.replica.removePartitionsByCollectionMeta(col)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
@ -249,7 +249,7 @@ func (mService *metaService) processSegmentDelete(id string) {
|
||||
log.Println("Cannot parse segment id:" + id)
|
||||
}
|
||||
|
||||
err = (*mService.replica).removeSegment(segmentID)
|
||||
err = mService.replica.removeSegment(segmentID)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
@ -264,7 +264,7 @@ func (mService *metaService) processCollectionDelete(id string) {
|
||||
log.Println("Cannot parse collection id:" + id)
|
||||
}
|
||||
|
||||
err = (*mService.replica).removeCollection(collectionID)
|
||||
err = mService.replica.removeCollection(collectionID)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
|
@ -3,23 +3,13 @@ package querynode
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
Params.Init()
|
||||
exitCode := m.Run()
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
||||
func TestMetaService_start(t *testing.T) {
|
||||
var ctx context.Context
|
||||
|
||||
@ -37,6 +27,7 @@ func TestMetaService_start(t *testing.T) {
|
||||
node.metaService = newMetaService(ctx, node.replica)
|
||||
|
||||
(*node.metaService).start()
|
||||
node.Close()
|
||||
}
|
||||
|
||||
func TestMetaService_getCollectionObjId(t *testing.T) {
|
||||
@ -119,47 +110,9 @@ func TestMetaService_isSegmentChannelRangeInQueryNodeChannelRange(t *testing.T)
|
||||
|
||||
func TestMetaService_printCollectionStruct(t *testing.T) {
|
||||
collectionName := "collection0"
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: collectionName,
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
printCollectionStruct(&collectionMeta)
|
||||
collectionID := UniqueID(0)
|
||||
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
|
||||
printCollectionStruct(collectionMeta)
|
||||
}
|
||||
|
||||
func TestMetaService_printSegmentStruct(t *testing.T) {
|
||||
@ -178,13 +131,8 @@ func TestMetaService_printSegmentStruct(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMetaService_processCollectionCreate(t *testing.T) {
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
// init metaService
|
||||
node := NewQueryNode(ctx, 0)
|
||||
node.metaService = newMetaService(ctx, node.replica)
|
||||
node := newQueryNode()
|
||||
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
|
||||
|
||||
id := "0"
|
||||
value := `schema: <
|
||||
@ -196,6 +144,10 @@ func TestMetaService_processCollectionCreate(t *testing.T) {
|
||||
key: "dim"
|
||||
value: "16"
|
||||
>
|
||||
index_params: <
|
||||
key: "metric_type"
|
||||
value: "L2"
|
||||
>
|
||||
>
|
||||
fields: <
|
||||
name: "age"
|
||||
@ -212,71 +164,21 @@ func TestMetaService_processCollectionCreate(t *testing.T) {
|
||||
|
||||
node.metaService.processCollectionCreate(id, value)
|
||||
|
||||
collectionNum := (*node.replica).getCollectionNum()
|
||||
collectionNum := node.replica.getCollectionNum()
|
||||
assert.Equal(t, collectionNum, 1)
|
||||
|
||||
collection, err := (*node.replica).getCollectionByName("test")
|
||||
collection, err := node.replica.getCollectionByName("test")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, collection.ID(), UniqueID(0))
|
||||
node.Close()
|
||||
}
|
||||
|
||||
func TestMetaService_processSegmentCreate(t *testing.T) {
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
// init metaService
|
||||
node := NewQueryNode(ctx, 0)
|
||||
node.metaService = newMetaService(ctx, node.replica)
|
||||
|
||||
node := newQueryNode()
|
||||
collectionName := "collection0"
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: collectionName,
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
colMetaBlob := proto.MarshalTextString(&collectionMeta)
|
||||
|
||||
err := (*node.replica).addCollection(&collectionMeta, string(colMetaBlob))
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = (*node.replica).addPartition(UniqueID(0), "default")
|
||||
assert.NoError(t, err)
|
||||
collectionID := UniqueID(0)
|
||||
initTestMeta(t, node, collectionName, collectionID, 0)
|
||||
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
|
||||
|
||||
id := "0"
|
||||
value := `partition_tag: "default"
|
||||
@ -287,19 +189,15 @@ func TestMetaService_processSegmentCreate(t *testing.T) {
|
||||
|
||||
(*node.metaService).processSegmentCreate(id, value)
|
||||
|
||||
s, err := (*node.replica).getSegmentByID(UniqueID(0))
|
||||
s, err := node.replica.getSegmentByID(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, s.segmentID, UniqueID(0))
|
||||
node.Close()
|
||||
}
|
||||
|
||||
func TestMetaService_processCreate(t *testing.T) {
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
// init metaService
|
||||
node := NewQueryNode(ctx, 0)
|
||||
node.metaService = newMetaService(ctx, node.replica)
|
||||
node := newQueryNode()
|
||||
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
|
||||
|
||||
key1 := "by-dev/meta/collection/0"
|
||||
msg1 := `schema: <
|
||||
@ -311,6 +209,10 @@ func TestMetaService_processCreate(t *testing.T) {
|
||||
key: "dim"
|
||||
value: "16"
|
||||
>
|
||||
index_params: <
|
||||
key: "metric_type"
|
||||
value: "L2"
|
||||
>
|
||||
>
|
||||
fields: <
|
||||
name: "age"
|
||||
@ -326,10 +228,10 @@ func TestMetaService_processCreate(t *testing.T) {
|
||||
`
|
||||
|
||||
(*node.metaService).processCreate(key1, msg1)
|
||||
collectionNum := (*node.replica).getCollectionNum()
|
||||
collectionNum := node.replica.getCollectionNum()
|
||||
assert.Equal(t, collectionNum, 1)
|
||||
|
||||
collection, err := (*node.replica).getCollectionByName("test")
|
||||
collection, err := node.replica.getCollectionByName("test")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, collection.ID(), UniqueID(0))
|
||||
|
||||
@ -341,68 +243,19 @@ func TestMetaService_processCreate(t *testing.T) {
|
||||
`
|
||||
|
||||
(*node.metaService).processCreate(key2, msg2)
|
||||
s, err := (*node.replica).getSegmentByID(UniqueID(0))
|
||||
s, err := node.replica.getSegmentByID(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, s.segmentID, UniqueID(0))
|
||||
node.Close()
|
||||
}
|
||||
|
||||
func TestMetaService_processSegmentModify(t *testing.T) {
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
// init metaService
|
||||
node := NewQueryNode(ctx, 0)
|
||||
node.metaService = newMetaService(ctx, node.replica)
|
||||
|
||||
node := newQueryNode()
|
||||
collectionName := "collection0"
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: collectionName,
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
colMetaBlob := proto.MarshalTextString(&collectionMeta)
|
||||
|
||||
err := (*node.replica).addCollection(&collectionMeta, string(colMetaBlob))
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = (*node.replica).addPartition(UniqueID(0), "default")
|
||||
assert.NoError(t, err)
|
||||
collectionID := UniqueID(0)
|
||||
segmentID := UniqueID(0)
|
||||
initTestMeta(t, node, collectionName, collectionID, segmentID)
|
||||
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
|
||||
|
||||
id := "0"
|
||||
value := `partition_tag: "default"
|
||||
@ -412,9 +265,9 @@ func TestMetaService_processSegmentModify(t *testing.T) {
|
||||
`
|
||||
|
||||
(*node.metaService).processSegmentCreate(id, value)
|
||||
s, err := (*node.replica).getSegmentByID(UniqueID(0))
|
||||
s, err := node.replica.getSegmentByID(segmentID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, s.segmentID, UniqueID(0))
|
||||
assert.Equal(t, s.segmentID, segmentID)
|
||||
|
||||
newValue := `partition_tag: "default"
|
||||
channel_start: 0
|
||||
@ -424,19 +277,15 @@ func TestMetaService_processSegmentModify(t *testing.T) {
|
||||
|
||||
// TODO: modify segment for testing processCollectionModify
|
||||
(*node.metaService).processSegmentModify(id, newValue)
|
||||
seg, err := (*node.replica).getSegmentByID(UniqueID(0))
|
||||
seg, err := node.replica.getSegmentByID(segmentID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, seg.segmentID, UniqueID(0))
|
||||
assert.Equal(t, seg.segmentID, segmentID)
|
||||
node.Close()
|
||||
}
|
||||
|
||||
func TestMetaService_processCollectionModify(t *testing.T) {
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
// init metaService
|
||||
node := NewQueryNode(ctx, 0)
|
||||
node.metaService = newMetaService(ctx, node.replica)
|
||||
node := newQueryNode()
|
||||
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
|
||||
|
||||
id := "0"
|
||||
value := `schema: <
|
||||
@ -448,6 +297,10 @@ func TestMetaService_processCollectionModify(t *testing.T) {
|
||||
key: "dim"
|
||||
value: "16"
|
||||
>
|
||||
index_params: <
|
||||
key: "metric_type"
|
||||
value: "L2"
|
||||
>
|
||||
>
|
||||
fields: <
|
||||
name: "age"
|
||||
@ -465,24 +318,24 @@ func TestMetaService_processCollectionModify(t *testing.T) {
|
||||
`
|
||||
|
||||
(*node.metaService).processCollectionCreate(id, value)
|
||||
collectionNum := (*node.replica).getCollectionNum()
|
||||
collectionNum := node.replica.getCollectionNum()
|
||||
assert.Equal(t, collectionNum, 1)
|
||||
|
||||
collection, err := (*node.replica).getCollectionByName("test")
|
||||
collection, err := node.replica.getCollectionByName("test")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, collection.ID(), UniqueID(0))
|
||||
|
||||
partitionNum, err := (*node.replica).getPartitionNum(UniqueID(0))
|
||||
partitionNum, err := node.replica.getPartitionNum(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, partitionNum, 3)
|
||||
|
||||
hasPartition := (*node.replica).hasPartition(UniqueID(0), "p0")
|
||||
hasPartition := node.replica.hasPartition(UniqueID(0), "p0")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
hasPartition = (*node.replica).hasPartition(UniqueID(0), "p1")
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p1")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
hasPartition = (*node.replica).hasPartition(UniqueID(0), "p2")
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p2")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
hasPartition = (*node.replica).hasPartition(UniqueID(0), "p3")
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p3")
|
||||
assert.Equal(t, hasPartition, false)
|
||||
|
||||
newValue := `schema: <
|
||||
@ -494,6 +347,10 @@ func TestMetaService_processCollectionModify(t *testing.T) {
|
||||
key: "dim"
|
||||
value: "16"
|
||||
>
|
||||
index_params: <
|
||||
key: "metric_type"
|
||||
value: "L2"
|
||||
>
|
||||
>
|
||||
fields: <
|
||||
name: "age"
|
||||
@ -511,32 +368,28 @@ func TestMetaService_processCollectionModify(t *testing.T) {
|
||||
`
|
||||
|
||||
(*node.metaService).processCollectionModify(id, newValue)
|
||||
collection, err = (*node.replica).getCollectionByName("test")
|
||||
collection, err = node.replica.getCollectionByName("test")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, collection.ID(), UniqueID(0))
|
||||
|
||||
partitionNum, err = (*node.replica).getPartitionNum(UniqueID(0))
|
||||
partitionNum, err = node.replica.getPartitionNum(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, partitionNum, 3)
|
||||
|
||||
hasPartition = (*node.replica).hasPartition(UniqueID(0), "p0")
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p0")
|
||||
assert.Equal(t, hasPartition, false)
|
||||
hasPartition = (*node.replica).hasPartition(UniqueID(0), "p1")
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p1")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
hasPartition = (*node.replica).hasPartition(UniqueID(0), "p2")
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p2")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
hasPartition = (*node.replica).hasPartition(UniqueID(0), "p3")
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p3")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
node.Close()
|
||||
}
|
||||
|
||||
func TestMetaService_processModify(t *testing.T) {
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
// init metaService
|
||||
node := NewQueryNode(ctx, 0)
|
||||
node.metaService = newMetaService(ctx, node.replica)
|
||||
node := newQueryNode()
|
||||
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
|
||||
|
||||
key1 := "by-dev/meta/collection/0"
|
||||
msg1 := `schema: <
|
||||
@ -548,6 +401,10 @@ func TestMetaService_processModify(t *testing.T) {
|
||||
key: "dim"
|
||||
value: "16"
|
||||
>
|
||||
index_params: <
|
||||
key: "metric_type"
|
||||
value: "L2"
|
||||
>
|
||||
>
|
||||
fields: <
|
||||
name: "age"
|
||||
@ -565,24 +422,24 @@ func TestMetaService_processModify(t *testing.T) {
|
||||
`
|
||||
|
||||
(*node.metaService).processCreate(key1, msg1)
|
||||
collectionNum := (*node.replica).getCollectionNum()
|
||||
collectionNum := node.replica.getCollectionNum()
|
||||
assert.Equal(t, collectionNum, 1)
|
||||
|
||||
collection, err := (*node.replica).getCollectionByName("test")
|
||||
collection, err := node.replica.getCollectionByName("test")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, collection.ID(), UniqueID(0))
|
||||
|
||||
partitionNum, err := (*node.replica).getPartitionNum(UniqueID(0))
|
||||
partitionNum, err := node.replica.getPartitionNum(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, partitionNum, 3)
|
||||
|
||||
hasPartition := (*node.replica).hasPartition(UniqueID(0), "p0")
|
||||
hasPartition := node.replica.hasPartition(UniqueID(0), "p0")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
hasPartition = (*node.replica).hasPartition(UniqueID(0), "p1")
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p1")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
hasPartition = (*node.replica).hasPartition(UniqueID(0), "p2")
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p2")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
hasPartition = (*node.replica).hasPartition(UniqueID(0), "p3")
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p3")
|
||||
assert.Equal(t, hasPartition, false)
|
||||
|
||||
key2 := "by-dev/meta/segment/0"
|
||||
@ -593,7 +450,7 @@ func TestMetaService_processModify(t *testing.T) {
|
||||
`
|
||||
|
||||
(*node.metaService).processCreate(key2, msg2)
|
||||
s, err := (*node.replica).getSegmentByID(UniqueID(0))
|
||||
s, err := node.replica.getSegmentByID(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, s.segmentID, UniqueID(0))
|
||||
|
||||
@ -608,6 +465,10 @@ func TestMetaService_processModify(t *testing.T) {
|
||||
key: "dim"
|
||||
value: "16"
|
||||
>
|
||||
index_params: <
|
||||
key: "metric_type"
|
||||
value: "L2"
|
||||
>
|
||||
>
|
||||
fields: <
|
||||
name: "age"
|
||||
@ -625,21 +486,21 @@ func TestMetaService_processModify(t *testing.T) {
|
||||
`
|
||||
|
||||
(*node.metaService).processModify(key1, msg3)
|
||||
collection, err = (*node.replica).getCollectionByName("test")
|
||||
collection, err = node.replica.getCollectionByName("test")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, collection.ID(), UniqueID(0))
|
||||
|
||||
partitionNum, err = (*node.replica).getPartitionNum(UniqueID(0))
|
||||
partitionNum, err = node.replica.getPartitionNum(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, partitionNum, 3)
|
||||
|
||||
hasPartition = (*node.replica).hasPartition(UniqueID(0), "p0")
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p0")
|
||||
assert.Equal(t, hasPartition, false)
|
||||
hasPartition = (*node.replica).hasPartition(UniqueID(0), "p1")
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p1")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
hasPartition = (*node.replica).hasPartition(UniqueID(0), "p2")
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p2")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
hasPartition = (*node.replica).hasPartition(UniqueID(0), "p3")
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p3")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
|
||||
msg4 := `partition_tag: "p1"
|
||||
@ -649,68 +510,18 @@ func TestMetaService_processModify(t *testing.T) {
|
||||
`
|
||||
|
||||
(*node.metaService).processModify(key2, msg4)
|
||||
seg, err := (*node.replica).getSegmentByID(UniqueID(0))
|
||||
seg, err := node.replica.getSegmentByID(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, seg.segmentID, UniqueID(0))
|
||||
node.Close()
|
||||
}
|
||||
|
||||
func TestMetaService_processSegmentDelete(t *testing.T) {
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
// init metaService
|
||||
node := NewQueryNode(ctx, 0)
|
||||
node.metaService = newMetaService(ctx, node.replica)
|
||||
|
||||
node := newQueryNode()
|
||||
collectionName := "collection0"
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: collectionName,
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
colMetaBlob := proto.MarshalTextString(&collectionMeta)
|
||||
|
||||
err := (*node.replica).addCollection(&collectionMeta, string(colMetaBlob))
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = (*node.replica).addPartition(UniqueID(0), "default")
|
||||
assert.NoError(t, err)
|
||||
collectionID := UniqueID(0)
|
||||
initTestMeta(t, node, collectionName, collectionID, 0)
|
||||
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
|
||||
|
||||
id := "0"
|
||||
value := `partition_tag: "default"
|
||||
@ -720,23 +531,19 @@ func TestMetaService_processSegmentDelete(t *testing.T) {
|
||||
`
|
||||
|
||||
(*node.metaService).processSegmentCreate(id, value)
|
||||
seg, err := (*node.replica).getSegmentByID(UniqueID(0))
|
||||
seg, err := node.replica.getSegmentByID(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, seg.segmentID, UniqueID(0))
|
||||
|
||||
(*node.metaService).processSegmentDelete("0")
|
||||
mapSize := (*node.replica).getSegmentNum()
|
||||
mapSize := node.replica.getSegmentNum()
|
||||
assert.Equal(t, mapSize, 0)
|
||||
node.Close()
|
||||
}
|
||||
|
||||
func TestMetaService_processCollectionDelete(t *testing.T) {
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
// init metaService
|
||||
node := NewQueryNode(ctx, 0)
|
||||
node.metaService = newMetaService(ctx, node.replica)
|
||||
node := newQueryNode()
|
||||
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
|
||||
|
||||
id := "0"
|
||||
value := `schema: <
|
||||
@ -748,6 +555,10 @@ func TestMetaService_processCollectionDelete(t *testing.T) {
|
||||
key: "dim"
|
||||
value: "16"
|
||||
>
|
||||
index_params: <
|
||||
key: "metric_type"
|
||||
value: "L2"
|
||||
>
|
||||
>
|
||||
fields: <
|
||||
name: "age"
|
||||
@ -763,26 +574,22 @@ func TestMetaService_processCollectionDelete(t *testing.T) {
|
||||
`
|
||||
|
||||
(*node.metaService).processCollectionCreate(id, value)
|
||||
collectionNum := (*node.replica).getCollectionNum()
|
||||
collectionNum := node.replica.getCollectionNum()
|
||||
assert.Equal(t, collectionNum, 1)
|
||||
|
||||
collection, err := (*node.replica).getCollectionByName("test")
|
||||
collection, err := node.replica.getCollectionByName("test")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, collection.ID(), UniqueID(0))
|
||||
|
||||
(*node.metaService).processCollectionDelete(id)
|
||||
collectionNum = (*node.replica).getCollectionNum()
|
||||
collectionNum = node.replica.getCollectionNum()
|
||||
assert.Equal(t, collectionNum, 0)
|
||||
node.Close()
|
||||
}
|
||||
|
||||
func TestMetaService_processDelete(t *testing.T) {
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
// init metaService
|
||||
node := NewQueryNode(ctx, 0)
|
||||
node.metaService = newMetaService(ctx, node.replica)
|
||||
node := newQueryNode()
|
||||
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
|
||||
|
||||
key1 := "by-dev/meta/collection/0"
|
||||
msg1 := `schema: <
|
||||
@ -794,6 +601,10 @@ func TestMetaService_processDelete(t *testing.T) {
|
||||
key: "dim"
|
||||
value: "16"
|
||||
>
|
||||
index_params: <
|
||||
key: "metric_type"
|
||||
value: "L2"
|
||||
>
|
||||
>
|
||||
fields: <
|
||||
name: "age"
|
||||
@ -809,10 +620,10 @@ func TestMetaService_processDelete(t *testing.T) {
|
||||
`
|
||||
|
||||
(*node.metaService).processCreate(key1, msg1)
|
||||
collectionNum := (*node.replica).getCollectionNum()
|
||||
collectionNum := node.replica.getCollectionNum()
|
||||
assert.Equal(t, collectionNum, 1)
|
||||
|
||||
collection, err := (*node.replica).getCollectionByName("test")
|
||||
collection, err := node.replica.getCollectionByName("test")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, collection.ID(), UniqueID(0))
|
||||
|
||||
@ -824,77 +635,48 @@ func TestMetaService_processDelete(t *testing.T) {
|
||||
`
|
||||
|
||||
(*node.metaService).processCreate(key2, msg2)
|
||||
seg, err := (*node.replica).getSegmentByID(UniqueID(0))
|
||||
seg, err := node.replica.getSegmentByID(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, seg.segmentID, UniqueID(0))
|
||||
|
||||
(*node.metaService).processDelete(key1)
|
||||
collectionsSize := (*node.replica).getCollectionNum()
|
||||
collectionsSize := node.replica.getCollectionNum()
|
||||
assert.Equal(t, collectionsSize, 0)
|
||||
|
||||
mapSize := (*node.replica).getSegmentNum()
|
||||
mapSize := node.replica.getSegmentNum()
|
||||
assert.Equal(t, mapSize, 0)
|
||||
node.Close()
|
||||
}
|
||||
|
||||
func TestMetaService_processResp(t *testing.T) {
|
||||
var ctx context.Context
|
||||
if closeWithDeadline {
|
||||
var cancel context.CancelFunc
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, cancel = context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
} else {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
// init metaService
|
||||
node := NewQueryNode(ctx, 0)
|
||||
node.metaService = newMetaService(ctx, node.replica)
|
||||
node := newQueryNode()
|
||||
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
|
||||
|
||||
metaChan := (*node.metaService).kvBase.WatchWithPrefix("")
|
||||
|
||||
select {
|
||||
case <-node.ctx.Done():
|
||||
case <-node.queryNodeLoopCtx.Done():
|
||||
return
|
||||
case resp := <-metaChan:
|
||||
_ = (*node.metaService).processResp(resp)
|
||||
}
|
||||
node.Close()
|
||||
}
|
||||
|
||||
func TestMetaService_loadCollections(t *testing.T) {
|
||||
var ctx context.Context
|
||||
if closeWithDeadline {
|
||||
var cancel context.CancelFunc
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, cancel = context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
} else {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
// init metaService
|
||||
node := NewQueryNode(ctx, 0)
|
||||
node.metaService = newMetaService(ctx, node.replica)
|
||||
node := newQueryNode()
|
||||
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
|
||||
|
||||
err2 := (*node.metaService).loadCollections()
|
||||
assert.Nil(t, err2)
|
||||
node.Close()
|
||||
}
|
||||
|
||||
func TestMetaService_loadSegments(t *testing.T) {
|
||||
var ctx context.Context
|
||||
if closeWithDeadline {
|
||||
var cancel context.CancelFunc
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, cancel = context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
} else {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
// init metaService
|
||||
node := NewQueryNode(ctx, 0)
|
||||
node.metaService = newMetaService(ctx, node.replica)
|
||||
node := newQueryNode()
|
||||
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
|
||||
|
||||
err2 := (*node.metaService).loadSegments()
|
||||
assert.Nil(t, err2)
|
||||
node.Close()
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ package querynode
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@ -21,15 +22,16 @@ func (p *ParamTable) Init() {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = p.LoadYaml("milvus.yaml")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = p.LoadYaml("advanced/channel.yaml")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
queryNodeIDStr := os.Getenv("QUERY_NODE_ID")
|
||||
if queryNodeIDStr == "" {
|
||||
queryNodeIDList := p.queryNodeIDList()
|
||||
if len(queryNodeIDList) <= 0 {
|
||||
queryNodeIDStr = "0"
|
||||
} else {
|
||||
queryNodeIDStr = strconv.Itoa(int(queryNodeIDList[0]))
|
||||
}
|
||||
}
|
||||
p.Save("_queryNodeID", queryNodeIDStr)
|
||||
}
|
||||
|
||||
func (p *ParamTable) pulsarAddress() (string, error) {
|
||||
@ -40,8 +42,25 @@ func (p *ParamTable) pulsarAddress() (string, error) {
|
||||
return url, nil
|
||||
}
|
||||
|
||||
func (p *ParamTable) queryNodeID() int {
|
||||
queryNodeID, err := p.Load("reader.clientid")
|
||||
func (p *ParamTable) queryNodeIDList() []UniqueID {
|
||||
queryNodeIDStr, err := p.Load("nodeID.queryNodeIDList")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var ret []UniqueID
|
||||
queryNodeIDs := strings.Split(queryNodeIDStr, ",")
|
||||
for _, i := range queryNodeIDs {
|
||||
v, err := strconv.Atoi(i)
|
||||
if err != nil {
|
||||
log.Panicf("load proxy id list error, %s", err.Error())
|
||||
}
|
||||
ret = append(ret, UniqueID(v))
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (p *ParamTable) QueryNodeID() UniqueID {
|
||||
queryNodeID, err := p.Load("_queryNodeID")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -49,7 +68,7 @@ func (p *ParamTable) queryNodeID() int {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
return UniqueID(id)
|
||||
}
|
||||
|
||||
func (p *ParamTable) insertChannelRange() []int {
|
||||
@ -57,138 +76,47 @@ func (p *ParamTable) insertChannelRange() []int {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
channelRange := strings.Split(insertChannelRange, ",")
|
||||
if len(channelRange) != 2 {
|
||||
panic("Illegal channel range num")
|
||||
}
|
||||
channelBegin, err := strconv.Atoi(channelRange[0])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
channelEnd, err := strconv.Atoi(channelRange[1])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if channelBegin < 0 || channelEnd < 0 {
|
||||
panic("Illegal channel range value")
|
||||
}
|
||||
if channelBegin > channelEnd {
|
||||
panic("Illegal channel range value")
|
||||
}
|
||||
return []int{channelBegin, channelEnd}
|
||||
return paramtable.ConvertRangeToIntRange(insertChannelRange, ",")
|
||||
}
|
||||
|
||||
// advanced params
|
||||
// stats
|
||||
func (p *ParamTable) statsPublishInterval() int {
|
||||
timeInterval, err := p.Load("queryNode.stats.publishInterval")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
interval, err := strconv.Atoi(timeInterval)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return interval
|
||||
return p.ParseInt("queryNode.stats.publishInterval")
|
||||
}
|
||||
|
||||
// dataSync:
|
||||
func (p *ParamTable) flowGraphMaxQueueLength() int32 {
|
||||
queueLength, err := p.Load("queryNode.dataSync.flowGraph.maxQueueLength")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
length, err := strconv.Atoi(queueLength)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return int32(length)
|
||||
return p.ParseInt32("queryNode.dataSync.flowGraph.maxQueueLength")
|
||||
}
|
||||
|
||||
func (p *ParamTable) flowGraphMaxParallelism() int32 {
|
||||
maxParallelism, err := p.Load("queryNode.dataSync.flowGraph.maxParallelism")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
maxPara, err := strconv.Atoi(maxParallelism)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return int32(maxPara)
|
||||
return p.ParseInt32("queryNode.dataSync.flowGraph.maxParallelism")
|
||||
}
|
||||
|
||||
// msgStream
|
||||
func (p *ParamTable) insertReceiveBufSize() int64 {
|
||||
revBufSize, err := p.Load("queryNode.msgStream.insert.recvBufSize")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bufSize, err := strconv.Atoi(revBufSize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return int64(bufSize)
|
||||
return p.ParseInt64("queryNode.msgStream.insert.recvBufSize")
|
||||
}
|
||||
|
||||
func (p *ParamTable) insertPulsarBufSize() int64 {
|
||||
pulsarBufSize, err := p.Load("queryNode.msgStream.insert.pulsarBufSize")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bufSize, err := strconv.Atoi(pulsarBufSize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return int64(bufSize)
|
||||
return p.ParseInt64("queryNode.msgStream.insert.pulsarBufSize")
|
||||
}
|
||||
|
||||
func (p *ParamTable) searchReceiveBufSize() int64 {
|
||||
revBufSize, err := p.Load("queryNode.msgStream.search.recvBufSize")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bufSize, err := strconv.Atoi(revBufSize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return int64(bufSize)
|
||||
return p.ParseInt64("queryNode.msgStream.search.recvBufSize")
|
||||
}
|
||||
|
||||
func (p *ParamTable) searchPulsarBufSize() int64 {
|
||||
pulsarBufSize, err := p.Load("queryNode.msgStream.search.pulsarBufSize")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bufSize, err := strconv.Atoi(pulsarBufSize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return int64(bufSize)
|
||||
return p.ParseInt64("queryNode.msgStream.search.pulsarBufSize")
|
||||
}
|
||||
|
||||
func (p *ParamTable) searchResultReceiveBufSize() int64 {
|
||||
revBufSize, err := p.Load("queryNode.msgStream.searchResult.recvBufSize")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bufSize, err := strconv.Atoi(revBufSize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return int64(bufSize)
|
||||
return p.ParseInt64("queryNode.msgStream.searchResult.recvBufSize")
|
||||
}
|
||||
|
||||
func (p *ParamTable) statsReceiveBufSize() int64 {
|
||||
revBufSize, err := p.Load("queryNode.msgStream.stats.recvBufSize")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bufSize, err := strconv.Atoi(revBufSize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return int64(bufSize)
|
||||
return p.ParseInt64("queryNode.msgStream.stats.recvBufSize")
|
||||
}
|
||||
|
||||
func (p *ParamTable) etcdAddress() string {
|
||||
@ -212,123 +140,73 @@ func (p *ParamTable) metaRootPath() string {
|
||||
}
|
||||
|
||||
func (p *ParamTable) gracefulTime() int64 {
|
||||
gracefulTime, err := p.Load("queryNode.gracefulTime")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
time, err := strconv.Atoi(gracefulTime)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return int64(time)
|
||||
return p.ParseInt64("queryNode.gracefulTime")
|
||||
}
|
||||
|
||||
func (p *ParamTable) insertChannelNames() []string {
|
||||
ch, err := p.Load("msgChannel.chanNamePrefix.insert")
|
||||
|
||||
prefix, err := p.Load("msgChannel.chanNamePrefix.insert")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
prefix += "-"
|
||||
channelRange, err := p.Load("msgChannel.channelRange.insert")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
channelIDs := paramtable.ConvertRangeToIntSlice(channelRange, ",")
|
||||
|
||||
chanRange := strings.Split(channelRange, ",")
|
||||
if len(chanRange) != 2 {
|
||||
panic("Illegal channel range num")
|
||||
var ret []string
|
||||
for _, ID := range channelIDs {
|
||||
ret = append(ret, prefix+strconv.Itoa(ID))
|
||||
}
|
||||
channelBegin, err := strconv.Atoi(chanRange[0])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
sep := len(channelIDs) / p.queryNodeNum()
|
||||
index := p.sliceIndex()
|
||||
if index == -1 {
|
||||
panic("queryNodeID not Match with Config")
|
||||
}
|
||||
channelEnd, err := strconv.Atoi(chanRange[1])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if channelBegin < 0 || channelEnd < 0 {
|
||||
panic("Illegal channel range value")
|
||||
}
|
||||
if channelBegin > channelEnd {
|
||||
panic("Illegal channel range value")
|
||||
}
|
||||
|
||||
channels := make([]string, channelEnd-channelBegin)
|
||||
for i := 0; i < channelEnd-channelBegin; i++ {
|
||||
channels[i] = ch + "-" + strconv.Itoa(channelBegin+i)
|
||||
}
|
||||
return channels
|
||||
start := index * sep
|
||||
return ret[start : start+sep]
|
||||
}
|
||||
|
||||
func (p *ParamTable) searchChannelNames() []string {
|
||||
ch, err := p.Load("msgChannel.chanNamePrefix.search")
|
||||
prefix, err := p.Load("msgChannel.chanNamePrefix.search")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
prefix += "-"
|
||||
channelRange, err := p.Load("msgChannel.channelRange.search")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
chanRange := strings.Split(channelRange, ",")
|
||||
if len(chanRange) != 2 {
|
||||
panic("Illegal channel range num")
|
||||
}
|
||||
channelBegin, err := strconv.Atoi(chanRange[0])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
channelEnd, err := strconv.Atoi(chanRange[1])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if channelBegin < 0 || channelEnd < 0 {
|
||||
panic("Illegal channel range value")
|
||||
}
|
||||
if channelBegin > channelEnd {
|
||||
panic("Illegal channel range value")
|
||||
}
|
||||
channelIDs := paramtable.ConvertRangeToIntSlice(channelRange, ",")
|
||||
|
||||
channels := make([]string, channelEnd-channelBegin)
|
||||
for i := 0; i < channelEnd-channelBegin; i++ {
|
||||
channels[i] = ch + "-" + strconv.Itoa(channelBegin+i)
|
||||
var ret []string
|
||||
for _, ID := range channelIDs {
|
||||
ret = append(ret, prefix+strconv.Itoa(ID))
|
||||
}
|
||||
return channels
|
||||
return ret
|
||||
}
|
||||
|
||||
func (p *ParamTable) searchResultChannelNames() []string {
|
||||
ch, err := p.Load("msgChannel.chanNamePrefix.searchResult")
|
||||
prefix, err := p.Load("msgChannel.chanNamePrefix.searchResult")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
prefix += "-"
|
||||
channelRange, err := p.Load("msgChannel.channelRange.searchResult")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
chanRange := strings.Split(channelRange, ",")
|
||||
if len(chanRange) != 2 {
|
||||
panic("Illegal channel range num")
|
||||
}
|
||||
channelBegin, err := strconv.Atoi(chanRange[0])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
channelEnd, err := strconv.Atoi(chanRange[1])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if channelBegin < 0 || channelEnd < 0 {
|
||||
panic("Illegal channel range value")
|
||||
}
|
||||
if channelBegin > channelEnd {
|
||||
panic("Illegal channel range value")
|
||||
}
|
||||
channelIDs := paramtable.ConvertRangeToIntSlice(channelRange, ",")
|
||||
|
||||
channels := make([]string, channelEnd-channelBegin)
|
||||
for i := 0; i < channelEnd-channelBegin; i++ {
|
||||
channels[i] = ch + "-" + strconv.Itoa(channelBegin+i)
|
||||
var ret []string
|
||||
for _, ID := range channelIDs {
|
||||
ret = append(ret, prefix+strconv.Itoa(ID))
|
||||
}
|
||||
return channels
|
||||
return ret
|
||||
}
|
||||
|
||||
func (p *ParamTable) msgChannelSubName() string {
|
||||
@ -337,7 +215,11 @@ func (p *ParamTable) msgChannelSubName() string {
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
return name
|
||||
queryNodeIDStr, err := p.Load("_QueryNodeID")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return name + "-" + queryNodeIDStr
|
||||
}
|
||||
|
||||
func (p *ParamTable) statsChannelName() string {
|
||||
@ -347,3 +229,18 @@ func (p *ParamTable) statsChannelName() string {
|
||||
}
|
||||
return channels
|
||||
}
|
||||
|
||||
func (p *ParamTable) sliceIndex() int {
|
||||
queryNodeID := p.QueryNodeID()
|
||||
queryNodeIDList := p.queryNodeIDList()
|
||||
for i := 0; i < len(queryNodeIDList); i++ {
|
||||
if queryNodeID == queryNodeIDList[i] {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (p *ParamTable) queryNodeNum() int {
|
||||
return len(p.queryNodeIDList())
|
||||
}
|
||||
|
@ -1,128 +1,113 @@
|
||||
package querynode
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParamTable_Init(t *testing.T) {
|
||||
Params.Init()
|
||||
}
|
||||
|
||||
func TestParamTable_PulsarAddress(t *testing.T) {
|
||||
Params.Init()
|
||||
address, err := Params.pulsarAddress()
|
||||
assert.NoError(t, err)
|
||||
split := strings.Split(address, ":")
|
||||
assert.Equal(t, split[0], "pulsar")
|
||||
assert.Equal(t, split[len(split)-1], "6650")
|
||||
assert.Equal(t, "pulsar", split[0])
|
||||
assert.Equal(t, "6650", split[len(split)-1])
|
||||
}
|
||||
|
||||
func TestParamTable_QueryNodeID(t *testing.T) {
|
||||
Params.Init()
|
||||
id := Params.queryNodeID()
|
||||
assert.Equal(t, id, 0)
|
||||
id := Params.QueryNodeID()
|
||||
assert.Contains(t, Params.queryNodeIDList(), id)
|
||||
}
|
||||
|
||||
func TestParamTable_insertChannelRange(t *testing.T) {
|
||||
Params.Init()
|
||||
channelRange := Params.insertChannelRange()
|
||||
assert.Equal(t, len(channelRange), 2)
|
||||
assert.Equal(t, channelRange[0], 0)
|
||||
assert.Equal(t, channelRange[1], 1)
|
||||
assert.Equal(t, 2, len(channelRange))
|
||||
}
|
||||
|
||||
func TestParamTable_statsServiceTimeInterval(t *testing.T) {
|
||||
Params.Init()
|
||||
interval := Params.statsPublishInterval()
|
||||
assert.Equal(t, interval, 1000)
|
||||
assert.Equal(t, 1000, interval)
|
||||
}
|
||||
|
||||
func TestParamTable_statsMsgStreamReceiveBufSize(t *testing.T) {
|
||||
Params.Init()
|
||||
bufSize := Params.statsReceiveBufSize()
|
||||
assert.Equal(t, bufSize, int64(64))
|
||||
assert.Equal(t, int64(64), bufSize)
|
||||
}
|
||||
|
||||
func TestParamTable_insertMsgStreamReceiveBufSize(t *testing.T) {
|
||||
Params.Init()
|
||||
bufSize := Params.insertReceiveBufSize()
|
||||
assert.Equal(t, bufSize, int64(1024))
|
||||
assert.Equal(t, int64(1024), bufSize)
|
||||
}
|
||||
|
||||
func TestParamTable_searchMsgStreamReceiveBufSize(t *testing.T) {
|
||||
Params.Init()
|
||||
bufSize := Params.searchReceiveBufSize()
|
||||
assert.Equal(t, bufSize, int64(512))
|
||||
assert.Equal(t, int64(512), bufSize)
|
||||
}
|
||||
|
||||
func TestParamTable_searchResultMsgStreamReceiveBufSize(t *testing.T) {
|
||||
Params.Init()
|
||||
bufSize := Params.searchResultReceiveBufSize()
|
||||
assert.Equal(t, bufSize, int64(64))
|
||||
assert.Equal(t, int64(64), bufSize)
|
||||
}
|
||||
|
||||
func TestParamTable_searchPulsarBufSize(t *testing.T) {
|
||||
Params.Init()
|
||||
bufSize := Params.searchPulsarBufSize()
|
||||
assert.Equal(t, bufSize, int64(512))
|
||||
assert.Equal(t, int64(512), bufSize)
|
||||
}
|
||||
|
||||
func TestParamTable_insertPulsarBufSize(t *testing.T) {
|
||||
Params.Init()
|
||||
bufSize := Params.insertPulsarBufSize()
|
||||
assert.Equal(t, bufSize, int64(1024))
|
||||
assert.Equal(t, int64(1024), bufSize)
|
||||
}
|
||||
|
||||
func TestParamTable_flowGraphMaxQueueLength(t *testing.T) {
|
||||
Params.Init()
|
||||
length := Params.flowGraphMaxQueueLength()
|
||||
assert.Equal(t, length, int32(1024))
|
||||
assert.Equal(t, int32(1024), length)
|
||||
}
|
||||
|
||||
func TestParamTable_flowGraphMaxParallelism(t *testing.T) {
|
||||
Params.Init()
|
||||
maxParallelism := Params.flowGraphMaxParallelism()
|
||||
assert.Equal(t, maxParallelism, int32(1024))
|
||||
assert.Equal(t, int32(1024), maxParallelism)
|
||||
}
|
||||
|
||||
func TestParamTable_insertChannelNames(t *testing.T) {
|
||||
Params.Init()
|
||||
names := Params.insertChannelNames()
|
||||
assert.Equal(t, len(names), 1)
|
||||
assert.Equal(t, names[0], "insert-0")
|
||||
channelRange := Params.insertChannelRange()
|
||||
num := channelRange[1] - channelRange[0]
|
||||
num = num / Params.queryNodeNum()
|
||||
assert.Equal(t, num, len(names))
|
||||
start := num * Params.sliceIndex()
|
||||
assert.Equal(t, fmt.Sprintf("insert-%d", channelRange[start]), names[0])
|
||||
}
|
||||
|
||||
func TestParamTable_searchChannelNames(t *testing.T) {
|
||||
Params.Init()
|
||||
names := Params.searchChannelNames()
|
||||
assert.Equal(t, len(names), 1)
|
||||
assert.Equal(t, names[0], "search-0")
|
||||
assert.Equal(t, "search-0", names[0])
|
||||
}
|
||||
|
||||
func TestParamTable_searchResultChannelNames(t *testing.T) {
|
||||
Params.Init()
|
||||
names := Params.searchResultChannelNames()
|
||||
assert.Equal(t, len(names), 1)
|
||||
assert.Equal(t, names[0], "searchResult-0")
|
||||
num := Params.queryNodeNum()
|
||||
assert.Equal(t, num, len(names))
|
||||
for i := 0; i < num; i++ {
|
||||
assert.Equal(t, fmt.Sprintf("searchResult-%d", i), names[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestParamTable_msgChannelSubName(t *testing.T) {
|
||||
Params.Init()
|
||||
name := Params.msgChannelSubName()
|
||||
assert.Equal(t, name, "queryNode")
|
||||
expectName := fmt.Sprintf("queryNode-%d", Params.QueryNodeID())
|
||||
assert.Equal(t, expectName, name)
|
||||
}
|
||||
|
||||
func TestParamTable_statsChannelName(t *testing.T) {
|
||||
Params.Init()
|
||||
name := Params.statsChannelName()
|
||||
assert.Equal(t, name, "query-node-stats")
|
||||
assert.Equal(t, "query-node-stats", name)
|
||||
}
|
||||
|
||||
func TestParamTable_metaRootPath(t *testing.T) {
|
||||
Params.Init()
|
||||
path := Params.metaRootPath()
|
||||
assert.Equal(t, path, "by-dev/meta")
|
||||
assert.Equal(t, "by-dev/meta", path)
|
||||
}
|
||||
|
@ -1,77 +1,20 @@
|
||||
package querynode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
)
|
||||
|
||||
func TestPartition_Segments(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node := NewQueryNode(ctx, 0)
|
||||
|
||||
node := newQueryNode()
|
||||
collectionName := "collection0"
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
collectionID := UniqueID(0)
|
||||
initTestMeta(t, node, collectionName, collectionID, 0)
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: collectionName,
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
collectionMetaBlob := proto.MarshalTextString(&collectionMeta)
|
||||
assert.NotEqual(t, "", collectionMetaBlob)
|
||||
|
||||
var err = (*node.replica).addCollection(&collectionMeta, collectionMetaBlob)
|
||||
collection, err := node.replica.getCollectionByName(collectionName)
|
||||
assert.NoError(t, err)
|
||||
|
||||
collection, err := (*node.replica).getCollectionByName(collectionName)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, collection.meta.Schema.Name, "collection0")
|
||||
assert.Equal(t, collection.meta.ID, UniqueID(0))
|
||||
assert.Equal(t, (*node.replica).getCollectionNum(), 1)
|
||||
|
||||
for _, tag := range collectionMeta.PartitionTags {
|
||||
err := (*node.replica).addPartition(collection.ID(), tag)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
collectionMeta := collection.meta
|
||||
|
||||
partitions := collection.Partitions()
|
||||
assert.Equal(t, len(collectionMeta.PartitionTags), len(*partitions))
|
||||
@ -80,12 +23,12 @@ func TestPartition_Segments(t *testing.T) {
|
||||
|
||||
const segmentNum = 3
|
||||
for i := 0; i < segmentNum; i++ {
|
||||
err := (*node.replica).addSegment(UniqueID(i), targetPartition.partitionTag, collection.ID())
|
||||
err := node.replica.addSegment(UniqueID(i), targetPartition.partitionTag, collection.ID())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
segments := targetPartition.Segments()
|
||||
assert.Equal(t, segmentNum, len(*segments))
|
||||
assert.Equal(t, segmentNum+1, len(*segments))
|
||||
}
|
||||
|
||||
func TestPartition_newPartition(t *testing.T) {
|
||||
|
@ -8,59 +8,17 @@ import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
|
||||
)
|
||||
|
||||
func TestPlan_Plan(t *testing.T) {
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: "collection0",
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
collectionMetaBlob := proto.MarshalTextString(&collectionMeta)
|
||||
collectionName := "collection0"
|
||||
collectionID := UniqueID(0)
|
||||
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
|
||||
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
|
||||
assert.NotEqual(t, "", collectionMetaBlob)
|
||||
|
||||
collection := newCollection(&collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, "collection0")
|
||||
assert.Equal(t, collection.meta.ID, UniqueID(0))
|
||||
collection := newCollection(collectionMeta, collectionMetaBlob)
|
||||
|
||||
dslString := "{\"bool\": { \n\"vector\": {\n \"vec\": {\n \"metric_type\": \"L2\", \n \"params\": {\n \"nprobe\": 10 \n},\n \"query\": \"$0\",\"topk\": 10 \n } \n } \n } \n }"
|
||||
|
||||
@ -74,52 +32,13 @@ func TestPlan_Plan(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPlan_PlaceholderGroup(t *testing.T) {
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: "collection0",
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
collectionMetaBlob := proto.MarshalTextString(&collectionMeta)
|
||||
collectionName := "collection0"
|
||||
collectionID := UniqueID(0)
|
||||
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
|
||||
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
|
||||
assert.NotEqual(t, "", collectionMetaBlob)
|
||||
|
||||
collection := newCollection(&collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, "collection0")
|
||||
assert.Equal(t, collection.meta.ID, UniqueID(0))
|
||||
collection := newCollection(collectionMeta, collectionMetaBlob)
|
||||
|
||||
dslString := "{\"bool\": { \n\"vector\": {\n \"vec\": {\n \"metric_type\": \"L2\", \n \"params\": {\n \"nprobe\": 10 \n},\n \"query\": \"$0\",\"topk\": 10 \n } \n } \n } \n }"
|
||||
|
||||
|
@ -17,11 +17,12 @@ import (
|
||||
)
|
||||
|
||||
type QueryNode struct {
|
||||
ctx context.Context
|
||||
queryNodeLoopCtx context.Context
|
||||
queryNodeLoopCancel func()
|
||||
|
||||
QueryNodeID uint64
|
||||
|
||||
replica *collectionReplica
|
||||
replica collectionReplica
|
||||
|
||||
dataSyncService *dataSyncService
|
||||
metaService *metaService
|
||||
@ -29,7 +30,14 @@ type QueryNode struct {
|
||||
statsService *statsService
|
||||
}
|
||||
|
||||
func Init() {
|
||||
Params.Init()
|
||||
}
|
||||
|
||||
func NewQueryNode(ctx context.Context, queryNodeID uint64) *QueryNode {
|
||||
|
||||
ctx1, cancel := context.WithCancel(ctx)
|
||||
|
||||
segmentsMap := make(map[int64]*Segment)
|
||||
collections := make([]*Collection, 0)
|
||||
|
||||
@ -43,11 +51,11 @@ func NewQueryNode(ctx context.Context, queryNodeID uint64) *QueryNode {
|
||||
}
|
||||
|
||||
return &QueryNode{
|
||||
ctx: ctx,
|
||||
queryNodeLoopCtx: ctx1,
|
||||
queryNodeLoopCancel: cancel,
|
||||
QueryNodeID: queryNodeID,
|
||||
|
||||
QueryNodeID: queryNodeID,
|
||||
|
||||
replica: &replica,
|
||||
replica: replica,
|
||||
|
||||
dataSyncService: nil,
|
||||
metaService: nil,
|
||||
@ -56,31 +64,34 @@ func NewQueryNode(ctx context.Context, queryNodeID uint64) *QueryNode {
|
||||
}
|
||||
}
|
||||
|
||||
func (node *QueryNode) Start() {
|
||||
node.dataSyncService = newDataSyncService(node.ctx, node.replica)
|
||||
node.searchService = newSearchService(node.ctx, node.replica)
|
||||
node.metaService = newMetaService(node.ctx, node.replica)
|
||||
node.statsService = newStatsService(node.ctx, node.replica)
|
||||
func (node *QueryNode) Start() error {
|
||||
// todo add connectMaster logic
|
||||
node.dataSyncService = newDataSyncService(node.queryNodeLoopCtx, node.replica)
|
||||
node.searchService = newSearchService(node.queryNodeLoopCtx, node.replica)
|
||||
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
|
||||
node.statsService = newStatsService(node.queryNodeLoopCtx, node.replica)
|
||||
|
||||
go node.dataSyncService.start()
|
||||
go node.searchService.start()
|
||||
go node.metaService.start()
|
||||
node.statsService.start()
|
||||
go node.statsService.start()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (node *QueryNode) Close() {
|
||||
<-node.ctx.Done()
|
||||
node.queryNodeLoopCancel()
|
||||
|
||||
// free collectionReplica
|
||||
(*node.replica).freeAll()
|
||||
node.replica.freeAll()
|
||||
|
||||
// close services
|
||||
if node.dataSyncService != nil {
|
||||
(*node.dataSyncService).close()
|
||||
node.dataSyncService.close()
|
||||
}
|
||||
if node.searchService != nil {
|
||||
(*node.searchService).close()
|
||||
node.searchService.close()
|
||||
}
|
||||
if node.statsService != nil {
|
||||
(*node.statsService).close()
|
||||
node.statsService.close()
|
||||
}
|
||||
}
|
||||
|
@ -2,18 +2,93 @@ package querynode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
)
|
||||
|
||||
const ctxTimeInMillisecond = 200
|
||||
const closeWithDeadline = true
|
||||
|
||||
// NOTE: start pulsar and etcd before test
|
||||
func TestQueryNode_start(t *testing.T) {
|
||||
func setup() {
|
||||
Params.Init()
|
||||
}
|
||||
|
||||
func genTestCollectionMeta(collectionName string, collectionID UniqueID) *etcdpb.CollectionMeta {
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
IndexParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "metric_type",
|
||||
Value: "L2",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: collectionName,
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: collectionID,
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
return &collectionMeta
|
||||
}
|
||||
|
||||
func initTestMeta(t *testing.T, node *QueryNode, collectionName string, collectionID UniqueID, segmentID UniqueID) {
|
||||
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
|
||||
|
||||
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
|
||||
assert.NotEqual(t, "", collectionMetaBlob)
|
||||
|
||||
var err = node.replica.addCollection(collectionMeta, collectionMetaBlob)
|
||||
assert.NoError(t, err)
|
||||
|
||||
collection, err := node.replica.getCollectionByName(collectionName)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, collection.meta.Schema.Name, collectionName)
|
||||
assert.Equal(t, collection.meta.ID, collectionID)
|
||||
assert.Equal(t, node.replica.getCollectionNum(), 1)
|
||||
|
||||
err = node.replica.addPartition(collection.ID(), collectionMeta.PartitionTags[0])
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = node.replica.addSegment(segmentID, collectionMeta.PartitionTags[0], collectionID)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func newQueryNode() *QueryNode {
|
||||
|
||||
var ctx context.Context
|
||||
|
||||
if closeWithDeadline {
|
||||
var cancel context.CancelFunc
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
@ -23,7 +98,21 @@ func TestQueryNode_start(t *testing.T) {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
node := NewQueryNode(ctx, 0)
|
||||
node.Start()
|
||||
node.Close()
|
||||
svr := NewQueryNode(ctx, 0)
|
||||
return svr
|
||||
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
setup()
|
||||
exitCode := m.Run()
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
||||
// NOTE: start pulsar and etcd before test
|
||||
func TestQueryNode_Start(t *testing.T) {
|
||||
localNode := newQueryNode()
|
||||
err := localNode.Start()
|
||||
assert.Nil(t, err)
|
||||
localNode.Close()
|
||||
}
|
||||
|
@ -1,15 +0,0 @@
|
||||
package querynode
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
func Init() {
|
||||
Params.Init()
|
||||
}
|
||||
|
||||
func StartQueryNode(ctx context.Context) {
|
||||
node := NewQueryNode(ctx, 0)
|
||||
|
||||
node.Start()
|
||||
}
|
@ -9,63 +9,19 @@ import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
|
||||
)
|
||||
|
||||
func TestReduce_AllFunc(t *testing.T) {
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: "collection0",
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
collectionMetaBlob := proto.MarshalTextString(&collectionMeta)
|
||||
collectionName := "collection0"
|
||||
collectionID := UniqueID(0)
|
||||
segmentID := UniqueID(0)
|
||||
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
|
||||
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
|
||||
assert.NotEqual(t, "", collectionMetaBlob)
|
||||
|
||||
collection := newCollection(&collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, "collection0")
|
||||
assert.Equal(t, collection.meta.ID, UniqueID(0))
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
collection := newCollection(collectionMeta, collectionMetaBlob)
|
||||
segment := newSegment(collection, segmentID)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
|
||||
const DIM = 16
|
||||
var vec = [DIM]float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
|
||||
|
@ -4,10 +4,12 @@ import "C"
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
||||
@ -19,7 +21,7 @@ type searchService struct {
|
||||
wait sync.WaitGroup
|
||||
cancel context.CancelFunc
|
||||
|
||||
replica *collectionReplica
|
||||
replica collectionReplica
|
||||
tSafeWatcher *tSafeWatcher
|
||||
|
||||
serviceableTime Timestamp
|
||||
@ -27,13 +29,14 @@ type searchService struct {
|
||||
|
||||
msgBuffer chan msgstream.TsMsg
|
||||
unsolvedMsg []msgstream.TsMsg
|
||||
searchMsgStream *msgstream.MsgStream
|
||||
searchResultMsgStream *msgstream.MsgStream
|
||||
searchMsgStream msgstream.MsgStream
|
||||
searchResultMsgStream msgstream.MsgStream
|
||||
queryNodeID UniqueID
|
||||
}
|
||||
|
||||
type ResultEntityIds []UniqueID
|
||||
|
||||
func newSearchService(ctx context.Context, replica *collectionReplica) *searchService {
|
||||
func newSearchService(ctx context.Context, replica collectionReplica) *searchService {
|
||||
receiveBufSize := Params.searchReceiveBufSize()
|
||||
pulsarBufSize := Params.searchPulsarBufSize()
|
||||
|
||||
@ -69,14 +72,15 @@ func newSearchService(ctx context.Context, replica *collectionReplica) *searchSe
|
||||
replica: replica,
|
||||
tSafeWatcher: newTSafeWatcher(),
|
||||
|
||||
searchMsgStream: &inputStream,
|
||||
searchResultMsgStream: &outputStream,
|
||||
searchMsgStream: inputStream,
|
||||
searchResultMsgStream: outputStream,
|
||||
queryNodeID: Params.QueryNodeID(),
|
||||
}
|
||||
}
|
||||
|
||||
func (ss *searchService) start() {
|
||||
(*ss.searchMsgStream).Start()
|
||||
(*ss.searchResultMsgStream).Start()
|
||||
ss.searchMsgStream.Start()
|
||||
ss.searchResultMsgStream.Start()
|
||||
ss.register()
|
||||
ss.wait.Add(2)
|
||||
go ss.receiveSearchMsg()
|
||||
@ -85,20 +89,24 @@ func (ss *searchService) start() {
|
||||
}
|
||||
|
||||
func (ss *searchService) close() {
|
||||
(*ss.searchMsgStream).Close()
|
||||
(*ss.searchResultMsgStream).Close()
|
||||
if ss.searchMsgStream != nil {
|
||||
ss.searchMsgStream.Close()
|
||||
}
|
||||
if ss.searchResultMsgStream != nil {
|
||||
ss.searchResultMsgStream.Close()
|
||||
}
|
||||
ss.cancel()
|
||||
}
|
||||
|
||||
func (ss *searchService) register() {
|
||||
tSafe := (*(ss.replica)).getTSafe()
|
||||
(*tSafe).registerTSafeWatcher(ss.tSafeWatcher)
|
||||
tSafe := ss.replica.getTSafe()
|
||||
tSafe.registerTSafeWatcher(ss.tSafeWatcher)
|
||||
}
|
||||
|
||||
func (ss *searchService) waitNewTSafe() Timestamp {
|
||||
// block until dataSyncService updating tSafe
|
||||
ss.tSafeWatcher.hasUpdate()
|
||||
timestamp := (*(*ss.replica).getTSafe()).get()
|
||||
timestamp := ss.replica.getTSafe().get()
|
||||
return timestamp
|
||||
}
|
||||
|
||||
@ -122,7 +130,7 @@ func (ss *searchService) receiveSearchMsg() {
|
||||
case <-ss.ctx.Done():
|
||||
return
|
||||
default:
|
||||
msgPack := (*ss.searchMsgStream).Consume()
|
||||
msgPack := ss.searchMsgStream.Consume()
|
||||
if msgPack == nil || len(msgPack.Msgs) <= 0 {
|
||||
continue
|
||||
}
|
||||
@ -219,7 +227,7 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
|
||||
}
|
||||
collectionName := query.CollectionName
|
||||
partitionTags := query.PartitionTags
|
||||
collection, err := (*ss.replica).getCollectionByName(collectionName)
|
||||
collection, err := ss.replica.getCollectionByName(collectionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -241,14 +249,14 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
|
||||
matchedSegments := make([]*Segment, 0)
|
||||
|
||||
for _, partitionTag := range partitionTags {
|
||||
hasPartition := (*ss.replica).hasPartition(collectionID, partitionTag)
|
||||
hasPartition := ss.replica.hasPartition(collectionID, partitionTag)
|
||||
if !hasPartition {
|
||||
return errors.New("search Failed, invalid partitionTag")
|
||||
}
|
||||
}
|
||||
|
||||
for _, partitionTag := range partitionTags {
|
||||
partition, _ := (*ss.replica).getPartitionByTag(collectionID, partitionTag)
|
||||
partition, _ := ss.replica.getPartitionByTag(collectionID, partitionTag)
|
||||
for _, segment := range partition.segments {
|
||||
//fmt.Println("dsl = ", dsl)
|
||||
|
||||
@ -268,13 +276,13 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
|
||||
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_SUCCESS},
|
||||
ReqID: searchMsg.ReqID,
|
||||
ProxyID: searchMsg.ProxyID,
|
||||
QueryNodeID: searchMsg.ProxyID,
|
||||
QueryNodeID: ss.queryNodeID,
|
||||
Timestamp: searchTimestamp,
|
||||
ResultChannelID: searchMsg.ResultChannelID,
|
||||
Hits: nil,
|
||||
}
|
||||
searchResultMsg := &msgstream.SearchResultMsg{
|
||||
BaseMsg: msgstream.BaseMsg{HashValues: []uint32{0}},
|
||||
BaseMsg: msgstream.BaseMsg{HashValues: []uint32{uint32(searchMsg.ResultChannelID)}},
|
||||
SearchResult: results,
|
||||
}
|
||||
err = ss.publishSearchResult(searchResultMsg)
|
||||
@ -333,7 +341,7 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
|
||||
Hits: hits,
|
||||
}
|
||||
searchResultMsg := &msgstream.SearchResultMsg{
|
||||
BaseMsg: msgstream.BaseMsg{HashValues: []uint32{0}},
|
||||
BaseMsg: msgstream.BaseMsg{HashValues: []uint32{uint32(searchMsg.ResultChannelID)}},
|
||||
SearchResult: results,
|
||||
}
|
||||
err = ss.publishSearchResult(searchResultMsg)
|
||||
@ -350,9 +358,10 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
|
||||
}
|
||||
|
||||
func (ss *searchService) publishSearchResult(msg msgstream.TsMsg) error {
|
||||
fmt.Println("Public SearchResult", msg.HashKeys())
|
||||
msgPack := msgstream.MsgPack{}
|
||||
msgPack.Msgs = append(msgPack.Msgs, msg)
|
||||
err := (*ss.searchResultMsgStream).Produce(&msgPack)
|
||||
err := ss.searchResultMsgStream.Produce(&msgPack)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -377,11 +386,11 @@ func (ss *searchService) publishFailedSearchResult(msg msgstream.TsMsg, errMsg s
|
||||
}
|
||||
|
||||
tsMsg := &msgstream.SearchResultMsg{
|
||||
BaseMsg: msgstream.BaseMsg{HashValues: []uint32{0}},
|
||||
BaseMsg: msgstream.BaseMsg{HashValues: []uint32{uint32(searchMsg.ResultChannelID)}},
|
||||
SearchResult: results,
|
||||
}
|
||||
msgPack.Msgs = append(msgPack.Msgs, tsMsg)
|
||||
err := (*ss.searchResultMsgStream).Produce(&msgPack)
|
||||
err := ss.searchResultMsgStream.Produce(&msgPack)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1,7 +1,6 @@
|
||||
package querynode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"log"
|
||||
"math"
|
||||
@ -13,80 +12,18 @@ import (
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
|
||||
//"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
//"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
|
||||
)
|
||||
|
||||
func TestSearch_Search(t *testing.T) {
|
||||
Params.Init()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
node := newQueryNode()
|
||||
initTestMeta(t, node, "collection0", 0, 0)
|
||||
|
||||
// init query node
|
||||
pulsarURL, _ := Params.pulsarAddress()
|
||||
node := NewQueryNode(ctx, 0)
|
||||
|
||||
// init meta
|
||||
collectionName := "collection0"
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: collectionName,
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
collectionMetaBlob := proto.MarshalTextString(&collectionMeta)
|
||||
assert.NotEqual(t, "", collectionMetaBlob)
|
||||
|
||||
var err = (*node.replica).addCollection(&collectionMeta, collectionMetaBlob)
|
||||
assert.NoError(t, err)
|
||||
|
||||
collection, err := (*node.replica).getCollectionByName(collectionName)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, collection.meta.Schema.Name, "collection0")
|
||||
assert.Equal(t, collection.meta.ID, UniqueID(0))
|
||||
assert.Equal(t, (*node.replica).getCollectionNum(), 1)
|
||||
|
||||
err = (*node.replica).addPartition(collection.ID(), collectionMeta.PartitionTags[0])
|
||||
assert.NoError(t, err)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
err = (*node.replica).addSegment(segmentID, collectionMeta.PartitionTags[0], UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
|
||||
// test data generate
|
||||
const msgLength = 10
|
||||
@ -158,14 +95,14 @@ func TestSearch_Search(t *testing.T) {
|
||||
msgPackSearch := msgstream.MsgPack{}
|
||||
msgPackSearch.Msgs = append(msgPackSearch.Msgs, searchMsg)
|
||||
|
||||
searchStream := msgstream.NewPulsarMsgStream(ctx, receiveBufSize)
|
||||
searchStream := msgstream.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
|
||||
searchStream.SetPulsarClient(pulsarURL)
|
||||
searchStream.CreatePulsarProducers(searchProducerChannels)
|
||||
searchStream.Start()
|
||||
err = searchStream.Produce(&msgPackSearch)
|
||||
assert.NoError(t, err)
|
||||
|
||||
node.searchService = newSearchService(node.ctx, node.replica)
|
||||
node.searchService = newSearchService(node.queryNodeLoopCtx, node.replica)
|
||||
go node.searchService.start()
|
||||
|
||||
// start insert
|
||||
@ -235,7 +172,7 @@ func TestSearch_Search(t *testing.T) {
|
||||
timeTickMsgPack.Msgs = append(timeTickMsgPack.Msgs, timeTickMsg)
|
||||
|
||||
// pulsar produce
|
||||
insertStream := msgstream.NewPulsarMsgStream(ctx, receiveBufSize)
|
||||
insertStream := msgstream.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
|
||||
insertStream.SetPulsarClient(pulsarURL)
|
||||
insertStream.CreatePulsarProducers(insertProducerChannels)
|
||||
insertStream.Start()
|
||||
@ -245,83 +182,19 @@ func TestSearch_Search(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// dataSync
|
||||
node.dataSyncService = newDataSyncService(node.ctx, node.replica)
|
||||
node.dataSyncService = newDataSyncService(node.queryNodeLoopCtx, node.replica)
|
||||
go node.dataSyncService.start()
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
cancel()
|
||||
node.Close()
|
||||
}
|
||||
|
||||
func TestSearch_SearchMultiSegments(t *testing.T) {
|
||||
Params.Init()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
node := newQueryNode()
|
||||
initTestMeta(t, node, "collection0", 0, 0)
|
||||
|
||||
// init query node
|
||||
pulsarURL, _ := Params.pulsarAddress()
|
||||
node := NewQueryNode(ctx, 0)
|
||||
|
||||
// init meta
|
||||
collectionName := "collection0"
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: collectionName,
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
collectionMetaBlob := proto.MarshalTextString(&collectionMeta)
|
||||
assert.NotEqual(t, "", collectionMetaBlob)
|
||||
|
||||
var err = (*node.replica).addCollection(&collectionMeta, collectionMetaBlob)
|
||||
assert.NoError(t, err)
|
||||
|
||||
collection, err := (*node.replica).getCollectionByName(collectionName)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, collection.meta.Schema.Name, "collection0")
|
||||
assert.Equal(t, collection.meta.ID, UniqueID(0))
|
||||
assert.Equal(t, (*node.replica).getCollectionNum(), 1)
|
||||
|
||||
err = (*node.replica).addPartition(collection.ID(), collectionMeta.PartitionTags[0])
|
||||
assert.NoError(t, err)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
err = (*node.replica).addSegment(segmentID, collectionMeta.PartitionTags[0], UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
|
||||
// test data generate
|
||||
const msgLength = 1024
|
||||
@ -393,14 +266,14 @@ func TestSearch_SearchMultiSegments(t *testing.T) {
|
||||
msgPackSearch := msgstream.MsgPack{}
|
||||
msgPackSearch.Msgs = append(msgPackSearch.Msgs, searchMsg)
|
||||
|
||||
searchStream := msgstream.NewPulsarMsgStream(ctx, receiveBufSize)
|
||||
searchStream := msgstream.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
|
||||
searchStream.SetPulsarClient(pulsarURL)
|
||||
searchStream.CreatePulsarProducers(searchProducerChannels)
|
||||
searchStream.Start()
|
||||
err = searchStream.Produce(&msgPackSearch)
|
||||
assert.NoError(t, err)
|
||||
|
||||
node.searchService = newSearchService(node.ctx, node.replica)
|
||||
node.searchService = newSearchService(node.queryNodeLoopCtx, node.replica)
|
||||
go node.searchService.start()
|
||||
|
||||
// start insert
|
||||
@ -474,7 +347,7 @@ func TestSearch_SearchMultiSegments(t *testing.T) {
|
||||
timeTickMsgPack.Msgs = append(timeTickMsgPack.Msgs, timeTickMsg)
|
||||
|
||||
// pulsar produce
|
||||
insertStream := msgstream.NewPulsarMsgStream(ctx, receiveBufSize)
|
||||
insertStream := msgstream.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
|
||||
insertStream.SetPulsarClient(pulsarURL)
|
||||
insertStream.CreatePulsarProducers(insertProducerChannels)
|
||||
insertStream.Start()
|
||||
@ -484,11 +357,10 @@ func TestSearch_SearchMultiSegments(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// dataSync
|
||||
node.dataSyncService = newDataSyncService(node.ctx, node.replica)
|
||||
node.dataSyncService = newDataSyncService(node.queryNodeLoopCtx, node.replica)
|
||||
go node.dataSyncService.start()
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
cancel()
|
||||
node.Close()
|
||||
}
|
||||
|
@ -1,7 +1,6 @@
|
||||
package querynode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"log"
|
||||
"math"
|
||||
@ -10,61 +9,21 @@ import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
|
||||
)
|
||||
|
||||
//-------------------------------------------------------------------------------------- constructor and destructor
|
||||
func TestSegment_newSegment(t *testing.T) {
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: "collection0",
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
collectionMetaBlob := proto.MarshalTextString(&collectionMeta)
|
||||
collectionName := "collection0"
|
||||
collectionID := UniqueID(0)
|
||||
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
|
||||
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
|
||||
assert.NotEqual(t, "", collectionMetaBlob)
|
||||
|
||||
collection := newCollection(&collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, "collection0")
|
||||
assert.Equal(t, collection.meta.ID, UniqueID(0))
|
||||
collection := newCollection(collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, collectionName)
|
||||
assert.Equal(t, collection.meta.ID, collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID)
|
||||
@ -74,52 +33,15 @@ func TestSegment_newSegment(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSegment_deleteSegment(t *testing.T) {
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: "collection0",
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
collectionMetaBlob := proto.MarshalTextString(&collectionMeta)
|
||||
collectionName := "collection0"
|
||||
collectionID := UniqueID(0)
|
||||
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
|
||||
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
|
||||
assert.NotEqual(t, "", collectionMetaBlob)
|
||||
|
||||
collection := newCollection(&collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, "collection0")
|
||||
assert.Equal(t, collection.meta.ID, UniqueID(0))
|
||||
collection := newCollection(collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, collectionName)
|
||||
assert.Equal(t, collection.meta.ID, collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID)
|
||||
@ -131,52 +53,15 @@ func TestSegment_deleteSegment(t *testing.T) {
|
||||
|
||||
//-------------------------------------------------------------------------------------- stats functions
|
||||
func TestSegment_getRowCount(t *testing.T) {
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: "collection0",
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
collectionMetaBlob := proto.MarshalTextString(&collectionMeta)
|
||||
collectionName := "collection0"
|
||||
collectionID := UniqueID(0)
|
||||
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
|
||||
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
|
||||
assert.NotEqual(t, "", collectionMetaBlob)
|
||||
|
||||
collection := newCollection(&collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, "collection0")
|
||||
assert.Equal(t, collection.meta.ID, UniqueID(0))
|
||||
collection := newCollection(collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, collectionName)
|
||||
assert.Equal(t, collection.meta.ID, collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID)
|
||||
@ -219,52 +104,15 @@ func TestSegment_getRowCount(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSegment_getDeletedCount(t *testing.T) {
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: "collection0",
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
collectionMetaBlob := proto.MarshalTextString(&collectionMeta)
|
||||
collectionName := "collection0"
|
||||
collectionID := UniqueID(0)
|
||||
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
|
||||
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
|
||||
assert.NotEqual(t, "", collectionMetaBlob)
|
||||
|
||||
collection := newCollection(&collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, "collection0")
|
||||
assert.Equal(t, collection.meta.ID, UniqueID(0))
|
||||
collection := newCollection(collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, collectionName)
|
||||
assert.Equal(t, collection.meta.ID, collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID)
|
||||
@ -313,52 +161,15 @@ func TestSegment_getDeletedCount(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSegment_getMemSize(t *testing.T) {
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: "collection0",
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
collectionMetaBlob := proto.MarshalTextString(&collectionMeta)
|
||||
collectionName := "collection0"
|
||||
collectionID := UniqueID(0)
|
||||
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
|
||||
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
|
||||
assert.NotEqual(t, "", collectionMetaBlob)
|
||||
|
||||
collection := newCollection(&collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, "collection0")
|
||||
assert.Equal(t, collection.meta.ID, UniqueID(0))
|
||||
collection := newCollection(collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, collectionName)
|
||||
assert.Equal(t, collection.meta.ID, collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID)
|
||||
@ -402,53 +213,15 @@ func TestSegment_getMemSize(t *testing.T) {
|
||||
|
||||
//-------------------------------------------------------------------------------------- dm & search functions
|
||||
func TestSegment_segmentInsert(t *testing.T) {
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: "collection0",
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
collectionMetaBlob := proto.MarshalTextString(&collectionMeta)
|
||||
collectionName := "collection0"
|
||||
collectionID := UniqueID(0)
|
||||
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
|
||||
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
|
||||
assert.NotEqual(t, "", collectionMetaBlob)
|
||||
|
||||
collection := newCollection(&collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, "collection0")
|
||||
assert.Equal(t, collection.meta.ID, UniqueID(0))
|
||||
|
||||
collection := newCollection(collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, collectionName)
|
||||
assert.Equal(t, collection.meta.ID, collectionID)
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
@ -486,52 +259,15 @@ func TestSegment_segmentInsert(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSegment_segmentDelete(t *testing.T) {
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: "collection0",
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
collectionMetaBlob := proto.MarshalTextString(&collectionMeta)
|
||||
collectionName := "collection0"
|
||||
collectionID := UniqueID(0)
|
||||
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
|
||||
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
|
||||
assert.NotEqual(t, "", collectionMetaBlob)
|
||||
|
||||
collection := newCollection(&collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, "collection0")
|
||||
assert.Equal(t, collection.meta.ID, UniqueID(0))
|
||||
collection := newCollection(collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, collectionName)
|
||||
assert.Equal(t, collection.meta.ID, collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID)
|
||||
@ -576,55 +312,15 @@ func TestSegment_segmentDelete(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSegment_segmentSearch(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: "collection0",
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
collectionMetaBlob := proto.MarshalTextString(&collectionMeta)
|
||||
collectionName := "collection0"
|
||||
collectionID := UniqueID(0)
|
||||
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
|
||||
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
|
||||
assert.NotEqual(t, "", collectionMetaBlob)
|
||||
|
||||
collection := newCollection(&collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, "collection0")
|
||||
assert.Equal(t, collection.meta.ID, UniqueID(0))
|
||||
collection := newCollection(collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, collectionName)
|
||||
assert.Equal(t, collection.meta.ID, collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID)
|
||||
@ -661,13 +357,6 @@ func TestSegment_segmentSearch(t *testing.T) {
|
||||
|
||||
dslString := "{\"bool\": { \n\"vector\": {\n \"vec\": {\n \"metric_type\": \"L2\", \n \"params\": {\n \"nprobe\": 10 \n},\n \"query\": \"$0\",\"topk\": 10 \n } \n } \n } \n }"
|
||||
|
||||
pulsarURL, _ := Params.pulsarAddress()
|
||||
const receiveBufSize = 1024
|
||||
searchProducerChannels := Params.searchChannelNames()
|
||||
searchStream := msgstream.NewPulsarMsgStream(ctx, receiveBufSize)
|
||||
searchStream.SetPulsarClient(pulsarURL)
|
||||
searchStream.CreatePulsarProducers(searchProducerChannels)
|
||||
|
||||
var searchRawData []byte
|
||||
for _, ele := range vec {
|
||||
buf := make([]byte, 4)
|
||||
@ -708,52 +397,15 @@ func TestSegment_segmentSearch(t *testing.T) {
|
||||
|
||||
//-------------------------------------------------------------------------------------- preDm functions
|
||||
func TestSegment_segmentPreInsert(t *testing.T) {
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: "collection0",
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
collectionMetaBlob := proto.MarshalTextString(&collectionMeta)
|
||||
collectionName := "collection0"
|
||||
collectionID := UniqueID(0)
|
||||
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
|
||||
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
|
||||
assert.NotEqual(t, "", collectionMetaBlob)
|
||||
|
||||
collection := newCollection(&collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, "collection0")
|
||||
assert.Equal(t, collection.meta.ID, UniqueID(0))
|
||||
collection := newCollection(collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, collectionName)
|
||||
assert.Equal(t, collection.meta.ID, collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID)
|
||||
@ -787,52 +439,15 @@ func TestSegment_segmentPreInsert(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSegment_segmentPreDelete(t *testing.T) {
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: "collection0",
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
collectionMetaBlob := proto.MarshalTextString(&collectionMeta)
|
||||
collectionName := "collection0"
|
||||
collectionID := UniqueID(0)
|
||||
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
|
||||
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
|
||||
assert.NotEqual(t, "", collectionMetaBlob)
|
||||
|
||||
collection := newCollection(&collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, "collection0")
|
||||
assert.Equal(t, collection.meta.ID, UniqueID(0))
|
||||
collection := newCollection(collectionMeta, collectionMetaBlob)
|
||||
assert.Equal(t, collection.meta.Schema.Name, collectionName)
|
||||
assert.Equal(t, collection.meta.ID, collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID)
|
||||
|
@ -14,11 +14,11 @@ import (
|
||||
|
||||
type statsService struct {
|
||||
ctx context.Context
|
||||
statsStream *msgstream.MsgStream
|
||||
replica *collectionReplica
|
||||
statsStream msgstream.MsgStream
|
||||
replica collectionReplica
|
||||
}
|
||||
|
||||
func newStatsService(ctx context.Context, replica *collectionReplica) *statsService {
|
||||
func newStatsService(ctx context.Context, replica collectionReplica) *statsService {
|
||||
|
||||
return &statsService{
|
||||
ctx: ctx,
|
||||
@ -44,8 +44,8 @@ func (sService *statsService) start() {
|
||||
|
||||
var statsMsgStream msgstream.MsgStream = statsStream
|
||||
|
||||
sService.statsStream = &statsMsgStream
|
||||
(*sService.statsStream).Start()
|
||||
sService.statsStream = statsMsgStream
|
||||
sService.statsStream.Start()
|
||||
|
||||
// start service
|
||||
fmt.Println("do segments statistic in ", strconv.Itoa(sleepTimeInterval), "ms")
|
||||
@ -60,11 +60,13 @@ func (sService *statsService) start() {
|
||||
}
|
||||
|
||||
func (sService *statsService) close() {
|
||||
(*sService.statsStream).Close()
|
||||
if sService.statsStream != nil {
|
||||
sService.statsStream.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (sService *statsService) sendSegmentStatistic() {
|
||||
statisticData := (*sService.replica).getSegmentStatistics()
|
||||
statisticData := sService.replica.getSegmentStatistics()
|
||||
|
||||
// fmt.Println("Publish segment statistic")
|
||||
// fmt.Println(statisticData)
|
||||
@ -82,7 +84,7 @@ func (sService *statsService) publicStatistic(statistic *internalpb.QueryNodeSeg
|
||||
var msgPack = msgstream.MsgPack{
|
||||
Msgs: []msgstream.TsMsg{msg},
|
||||
}
|
||||
err := (*sService.statsStream).Produce(&msgPack)
|
||||
err := sService.statsStream.Produce(&msgPack)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
|
@ -1,193 +1,42 @@
|
||||
package querynode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
)
|
||||
|
||||
// NOTE: start pulsar before test
|
||||
func TestStatsService_start(t *testing.T) {
|
||||
Params.Init()
|
||||
var ctx context.Context
|
||||
|
||||
if closeWithDeadline {
|
||||
var cancel context.CancelFunc
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, cancel = context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
} else {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
// init query node
|
||||
node := NewQueryNode(ctx, 0)
|
||||
|
||||
// init meta
|
||||
collectionName := "collection0"
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: collectionName,
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
collectionMetaBlob := proto.MarshalTextString(&collectionMeta)
|
||||
assert.NotEqual(t, "", collectionMetaBlob)
|
||||
|
||||
var err = (*node.replica).addCollection(&collectionMeta, collectionMetaBlob)
|
||||
assert.NoError(t, err)
|
||||
|
||||
collection, err := (*node.replica).getCollectionByName(collectionName)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, collection.meta.Schema.Name, "collection0")
|
||||
assert.Equal(t, collection.meta.ID, UniqueID(0))
|
||||
assert.Equal(t, (*node.replica).getCollectionNum(), 1)
|
||||
|
||||
err = (*node.replica).addPartition(collection.ID(), collectionMeta.PartitionTags[0])
|
||||
assert.NoError(t, err)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
err = (*node.replica).addSegment(segmentID, collectionMeta.PartitionTags[0], UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
|
||||
// start stats service
|
||||
node.statsService = newStatsService(node.ctx, node.replica)
|
||||
node := newQueryNode()
|
||||
initTestMeta(t, node, "collection0", 0, 0)
|
||||
node.statsService = newStatsService(node.queryNodeLoopCtx, node.replica)
|
||||
node.statsService.start()
|
||||
node.Close()
|
||||
}
|
||||
|
||||
// NOTE: start pulsar before test
|
||||
//NOTE: start pulsar before test
|
||||
func TestSegmentManagement_SegmentStatisticService(t *testing.T) {
|
||||
Params.Init()
|
||||
var ctx context.Context
|
||||
|
||||
if closeWithDeadline {
|
||||
var cancel context.CancelFunc
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, cancel = context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
} else {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
// init query node
|
||||
pulsarURL, _ := Params.pulsarAddress()
|
||||
node := NewQueryNode(ctx, 0)
|
||||
|
||||
// init meta
|
||||
collectionName := "collection0"
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: collectionName,
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(0),
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
collectionMetaBlob := proto.MarshalTextString(&collectionMeta)
|
||||
assert.NotEqual(t, "", collectionMetaBlob)
|
||||
|
||||
var err = (*node.replica).addCollection(&collectionMeta, collectionMetaBlob)
|
||||
assert.NoError(t, err)
|
||||
|
||||
collection, err := (*node.replica).getCollectionByName(collectionName)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, collection.meta.Schema.Name, "collection0")
|
||||
assert.Equal(t, collection.meta.ID, UniqueID(0))
|
||||
assert.Equal(t, (*node.replica).getCollectionNum(), 1)
|
||||
|
||||
err = (*node.replica).addPartition(collection.ID(), collectionMeta.PartitionTags[0])
|
||||
assert.NoError(t, err)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
err = (*node.replica).addSegment(segmentID, collectionMeta.PartitionTags[0], UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
node := newQueryNode()
|
||||
initTestMeta(t, node, "collection0", 0, 0)
|
||||
|
||||
const receiveBufSize = 1024
|
||||
// start pulsar
|
||||
producerChannels := []string{Params.statsChannelName()}
|
||||
|
||||
statsStream := msgstream.NewPulsarMsgStream(ctx, receiveBufSize)
|
||||
pulsarURL, _ := Params.pulsarAddress()
|
||||
|
||||
statsStream := msgstream.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
|
||||
statsStream.SetPulsarClient(pulsarURL)
|
||||
statsStream.CreatePulsarProducers(producerChannels)
|
||||
|
||||
var statsMsgStream msgstream.MsgStream = statsStream
|
||||
|
||||
node.statsService = newStatsService(node.ctx, node.replica)
|
||||
node.statsService.statsStream = &statsMsgStream
|
||||
(*node.statsService.statsStream).Start()
|
||||
node.statsService = newStatsService(node.queryNodeLoopCtx, node.replica)
|
||||
node.statsService.statsStream = statsMsgStream
|
||||
node.statsService.statsStream.Start()
|
||||
|
||||
// send stats
|
||||
node.statsService.sendSegmentStatistic()
|
||||
node.Close()
|
||||
}
|
||||
|
@ -36,11 +36,11 @@ type tSafeImpl struct {
|
||||
watcherList []*tSafeWatcher
|
||||
}
|
||||
|
||||
func newTSafe() *tSafe {
|
||||
func newTSafe() tSafe {
|
||||
var t tSafe = &tSafeImpl{
|
||||
watcherList: make([]*tSafeWatcher, 0),
|
||||
}
|
||||
return &t
|
||||
return t
|
||||
}
|
||||
|
||||
func (ts *tSafeImpl) registerTSafeWatcher(t *tSafeWatcher) {
|
||||
|
@ -9,13 +9,13 @@ import (
|
||||
func TestTSafe_GetAndSet(t *testing.T) {
|
||||
tSafe := newTSafe()
|
||||
watcher := newTSafeWatcher()
|
||||
(*tSafe).registerTSafeWatcher(watcher)
|
||||
tSafe.registerTSafeWatcher(watcher)
|
||||
|
||||
go func() {
|
||||
watcher.hasUpdate()
|
||||
timestamp := (*tSafe).get()
|
||||
timestamp := tSafe.get()
|
||||
assert.Equal(t, timestamp, Timestamp(1000))
|
||||
}()
|
||||
|
||||
(*tSafe).set(Timestamp(1000))
|
||||
tSafe.set(Timestamp(1000))
|
||||
}
|
||||
|
3
internal/storage/cwrapper/.gitignore
vendored
3
internal/storage/cwrapper/.gitignore
vendored
@ -1,3 +0,0 @@
|
||||
cmake-build-debug
|
||||
.idea
|
||||
cmake_build
|
@ -1,42 +0,0 @@
|
||||
cmake_minimum_required(VERSION 3.14...3.17 FATAL_ERROR)
|
||||
project(wrapper)
|
||||
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
|
||||
###################################################################################################
|
||||
# - cmake modules ---------------------------------------------------------------------------------
|
||||
|
||||
set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules/" ${CMAKE_MODULE_PATH})
|
||||
|
||||
###################################################################################################
|
||||
# - build arrow ------------------------------------------------------------------------------------
|
||||
|
||||
message(STATUS "BUILDING ARROW")
|
||||
include(ConfigureArrow)
|
||||
|
||||
if(ARROW_FOUND)
|
||||
message(STATUS "Apache Arrow found in ${ARROW_INCLUDE_DIR}")
|
||||
else()
|
||||
message(FATAL_ERROR "Apache Arrow not found, please check your settings.")
|
||||
endif(ARROW_FOUND)
|
||||
|
||||
add_library(arrow STATIC IMPORTED ${ARROW_LIB})
|
||||
add_library(parquet STATIC IMPORTED ${PARQUET_LIB})
|
||||
add_library(thrift STATIC IMPORTED ${THRIFT_LIB})
|
||||
add_library(utf8proc STATIC IMPORTED ${UTF8PROC_LIB})
|
||||
|
||||
if(ARROW_FOUND)
|
||||
set_target_properties(arrow PROPERTIES IMPORTED_LOCATION ${ARROW_LIB})
|
||||
set_target_properties(parquet PROPERTIES IMPORTED_LOCATION ${PARQUET_LIB})
|
||||
set_target_properties(thrift PROPERTIES IMPORTED_LOCATION ${THRIFT_LIB})
|
||||
set_target_properties(utf8proc PROPERTIES IMPORTED_LOCATION ${UTF8PROC_LIB})
|
||||
endif(ARROW_FOUND)
|
||||
|
||||
###################################################################################################
|
||||
|
||||
include_directories(${ARROW_INCLUDE_DIR})
|
||||
include_directories(${PROJECT_SOURCE_DIR})
|
||||
|
||||
add_library(wrapper ParquetWrapper.cpp ParquetWrapper.h ColumnType.h PayloadStream.h PayloadStream.cpp)
|
||||
|
||||
add_subdirectory(test)
|
@ -1,42 +0,0 @@
|
||||
#pragma once
|
||||
enum ColumnType : int {
|
||||
NONE = 0,
|
||||
BOOL = 1,
|
||||
INT8 = 2,
|
||||
INT16 = 3,
|
||||
INT32 = 4,
|
||||
INT64 = 5,
|
||||
FLOAT = 10,
|
||||
DOUBLE = 11,
|
||||
STRING = 20,
|
||||
VECTOR_BINARY = 100,
|
||||
VECTOR_FLOAT = 101
|
||||
};
|
||||
|
||||
enum ErrorCode : int {
|
||||
SUCCESS = 0,
|
||||
UNEXPECTED_ERROR = 1,
|
||||
CONNECT_FAILED = 2,
|
||||
PERMISSION_DENIED = 3,
|
||||
COLLECTION_NOT_EXISTS = 4,
|
||||
ILLEGAL_ARGUMENT = 5,
|
||||
ILLEGAL_DIMENSION = 7,
|
||||
ILLEGAL_INDEX_TYPE = 8,
|
||||
ILLEGAL_COLLECTION_NAME = 9,
|
||||
ILLEGAL_TOPK = 10,
|
||||
ILLEGAL_ROWRECORD = 11,
|
||||
ILLEGAL_VECTOR_ID = 12,
|
||||
ILLEGAL_SEARCH_RESULT = 13,
|
||||
FILE_NOT_FOUND = 14,
|
||||
META_FAILED = 15,
|
||||
CACHE_FAILED = 16,
|
||||
CANNOT_CREATE_FOLDER = 17,
|
||||
CANNOT_CREATE_FILE = 18,
|
||||
CANNOT_DELETE_FOLDER = 19,
|
||||
CANNOT_DELETE_FILE = 20,
|
||||
BUILD_INDEX_ERROR = 21,
|
||||
ILLEGAL_NLIST = 22,
|
||||
ILLEGAL_METRIC_TYPE = 23,
|
||||
OUT_OF_MEMORY = 24,
|
||||
DD_REQUEST_RACE = 1000
|
||||
};
|
@ -1,497 +0,0 @@
|
||||
#include "ParquetWrapper.h"
|
||||
#include "PayloadStream.h"
|
||||
|
||||
static const char *ErrorMsg(const std::string &msg) {
|
||||
if (msg.empty()) return nullptr;
|
||||
auto ret = (char *) malloc(msg.size() + 1);
|
||||
std::memcpy(ret, msg.c_str(), msg.size());
|
||||
ret[msg.size()] = '\0';
|
||||
return ret;
|
||||
}
|
||||
|
||||
extern "C" CPayloadWriter NewPayloadWriter(int columnType) {
|
||||
auto p = new wrapper::PayloadWriter;
|
||||
p->builder = nullptr;
|
||||
p->schema = nullptr;
|
||||
p->output = nullptr;
|
||||
p->dimension = wrapper::EMPTY_DIMENSION;
|
||||
p->rows = 0;
|
||||
switch (static_cast<ColumnType>(columnType)) {
|
||||
case ColumnType::BOOL : {
|
||||
p->columnType = ColumnType::BOOL;
|
||||
p->builder = std::make_shared<arrow::BooleanBuilder>();
|
||||
p->schema = arrow::schema({arrow::field("val", arrow::boolean())});
|
||||
break;
|
||||
}
|
||||
case ColumnType::INT8 : {
|
||||
p->columnType = ColumnType::INT8;
|
||||
p->builder = std::make_shared<arrow::Int8Builder>();
|
||||
p->schema = arrow::schema({arrow::field("val", arrow::int8())});
|
||||
break;
|
||||
}
|
||||
case ColumnType::INT16 : {
|
||||
p->columnType = ColumnType::INT16;
|
||||
p->builder = std::make_shared<arrow::Int16Builder>();
|
||||
p->schema = arrow::schema({arrow::field("val", arrow::int16())});
|
||||
break;
|
||||
}
|
||||
case ColumnType::INT32 : {
|
||||
p->columnType = ColumnType::INT32;
|
||||
p->builder = std::make_shared<arrow::Int32Builder>();
|
||||
p->schema = arrow::schema({arrow::field("val", arrow::int32())});
|
||||
break;
|
||||
}
|
||||
case ColumnType::INT64 : {
|
||||
p->columnType = ColumnType::INT64;
|
||||
p->builder = std::make_shared<arrow::Int64Builder>();
|
||||
p->schema = arrow::schema({arrow::field("val", arrow::int64())});
|
||||
break;
|
||||
}
|
||||
case ColumnType::FLOAT : {
|
||||
p->columnType = ColumnType::FLOAT;
|
||||
p->builder = std::make_shared<arrow::FloatBuilder>();
|
||||
p->schema = arrow::schema({arrow::field("val", arrow::float32())});
|
||||
break;
|
||||
}
|
||||
case ColumnType::DOUBLE : {
|
||||
p->columnType = ColumnType::DOUBLE;
|
||||
p->builder = std::make_shared<arrow::DoubleBuilder>();
|
||||
p->schema = arrow::schema({arrow::field("val", arrow::float64())});
|
||||
break;
|
||||
}
|
||||
case ColumnType::STRING : {
|
||||
p->columnType = ColumnType::STRING;
|
||||
p->builder = std::make_shared<arrow::StringBuilder>();
|
||||
p->schema = arrow::schema({arrow::field("val", arrow::utf8())});
|
||||
break;
|
||||
}
|
||||
case ColumnType::VECTOR_BINARY : {
|
||||
p->columnType = ColumnType::VECTOR_BINARY;
|
||||
p->dimension = wrapper::EMPTY_DIMENSION;
|
||||
break;
|
||||
}
|
||||
case ColumnType::VECTOR_FLOAT : {
|
||||
p->columnType = ColumnType::VECTOR_FLOAT;
|
||||
p->dimension = wrapper::EMPTY_DIMENSION;
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
delete p;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
return reinterpret_cast<CPayloadWriter>(p);
|
||||
}
|
||||
|
||||
template<typename DT, typename BT>
|
||||
CStatus AddValuesToPayload(CPayloadWriter payloadWriter, DT *values, int length) {
|
||||
CStatus st;
|
||||
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
|
||||
st.error_msg = nullptr;
|
||||
if (length <= 0) return st;
|
||||
|
||||
auto p = reinterpret_cast<wrapper::PayloadWriter *>(payloadWriter);
|
||||
auto builder = std::dynamic_pointer_cast<BT>(p->builder);
|
||||
if (builder == nullptr) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg("incorrect data type");
|
||||
return st;
|
||||
}
|
||||
|
||||
if (p->output != nullptr) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg("payload has finished");
|
||||
return st;
|
||||
}
|
||||
|
||||
auto ast = builder->AppendValues(values, values + length);
|
||||
if (!ast.ok()) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg(ast.message());
|
||||
return st;
|
||||
}
|
||||
p->rows += length;
|
||||
return st;
|
||||
}
|
||||
|
||||
extern "C" CStatus AddBooleanToPayload(CPayloadWriter payloadWriter, bool *values, int length) {
|
||||
return AddValuesToPayload<bool, arrow::BooleanBuilder>(payloadWriter, values, length);
|
||||
}
|
||||
|
||||
extern "C" CStatus AddInt8ToPayload(CPayloadWriter payloadWriter, int8_t *values, int length) {
|
||||
return AddValuesToPayload<int8_t, arrow::Int8Builder>(payloadWriter, values, length);
|
||||
}
|
||||
extern "C" CStatus AddInt16ToPayload(CPayloadWriter payloadWriter, int16_t *values, int length) {
|
||||
return AddValuesToPayload<int16_t, arrow::Int16Builder>(payloadWriter, values, length);
|
||||
}
|
||||
extern "C" CStatus AddInt32ToPayload(CPayloadWriter payloadWriter, int32_t *values, int length) {
|
||||
return AddValuesToPayload<int32_t, arrow::Int32Builder>(payloadWriter, values, length);
|
||||
}
|
||||
extern "C" CStatus AddInt64ToPayload(CPayloadWriter payloadWriter, int64_t *values, int length) {
|
||||
return AddValuesToPayload<int64_t, arrow::Int64Builder>(payloadWriter, values, length);
|
||||
}
|
||||
extern "C" CStatus AddFloatToPayload(CPayloadWriter payloadWriter, float *values, int length) {
|
||||
return AddValuesToPayload<float, arrow::FloatBuilder>(payloadWriter, values, length);
|
||||
}
|
||||
extern "C" CStatus AddDoubleToPayload(CPayloadWriter payloadWriter, double *values, int length) {
|
||||
return AddValuesToPayload<double, arrow::DoubleBuilder>(payloadWriter, values, length);
|
||||
}
|
||||
|
||||
extern "C" CStatus AddOneStringToPayload(CPayloadWriter payloadWriter, char *cstr, int str_size) {
|
||||
CStatus st;
|
||||
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
|
||||
st.error_msg = nullptr;
|
||||
|
||||
auto p = reinterpret_cast<wrapper::PayloadWriter *>(payloadWriter);
|
||||
auto builder = std::dynamic_pointer_cast<arrow::StringBuilder>(p->builder);
|
||||
if (builder == nullptr) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg("incorrect data type");
|
||||
return st;
|
||||
}
|
||||
if (p->output != nullptr) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg("payload has finished");
|
||||
return st;
|
||||
}
|
||||
arrow::Status ast;
|
||||
if (cstr == nullptr || str_size < 0) {
|
||||
ast = builder->AppendNull();
|
||||
} else {
|
||||
ast = builder->Append(cstr, str_size);
|
||||
}
|
||||
if (!ast.ok()) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg(ast.message());
|
||||
return st;
|
||||
}
|
||||
p->rows++;
|
||||
return st;
|
||||
}
|
||||
|
||||
extern "C" CStatus AddBinaryVectorToPayload(CPayloadWriter payloadWriter, uint8_t *values, int dimension, int length) {
|
||||
CStatus st;
|
||||
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
|
||||
st.error_msg = nullptr;
|
||||
if (length <= 0) return st;
|
||||
|
||||
auto p = reinterpret_cast<wrapper::PayloadWriter *>(payloadWriter);
|
||||
if (p->dimension == wrapper::EMPTY_DIMENSION) {
|
||||
if ((dimension % 8) || (dimension <= 0)) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg("incorrect dimension value");
|
||||
return st;
|
||||
}
|
||||
if (p->builder != nullptr) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg("incorrect data type");
|
||||
return st;
|
||||
}
|
||||
p->builder = std::make_shared<arrow::FixedSizeBinaryBuilder>(arrow::fixed_size_binary(dimension / 8));
|
||||
p->schema = arrow::schema({arrow::field("val", arrow::fixed_size_binary(dimension / 8))});
|
||||
p->dimension = dimension;
|
||||
} else if (p->dimension != dimension) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg("dimension changed");
|
||||
return st;
|
||||
}
|
||||
auto builder = std::dynamic_pointer_cast<arrow::FixedSizeBinaryBuilder>(p->builder);
|
||||
if (builder == nullptr) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg("incorrect data type");
|
||||
return st;
|
||||
}
|
||||
if (p->output != nullptr) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg("payload has finished");
|
||||
return st;
|
||||
}
|
||||
auto ast = builder->AppendValues(values, (dimension / 8) * length);
|
||||
if (!ast.ok()) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg(ast.message());
|
||||
return st;
|
||||
}
|
||||
p->rows += length;
|
||||
return st;
|
||||
}
|
||||
|
||||
extern "C" CStatus AddFloatVectorToPayload(CPayloadWriter payloadWriter, float *values, int dimension, int length) {
|
||||
CStatus st;
|
||||
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
|
||||
st.error_msg = nullptr;
|
||||
if (length <= 0) return st;
|
||||
|
||||
auto p = reinterpret_cast<wrapper::PayloadWriter *>(payloadWriter);
|
||||
if (p->dimension == wrapper::EMPTY_DIMENSION) {
|
||||
if (p->builder != nullptr) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg("incorrect data type");
|
||||
return st;
|
||||
}
|
||||
p->builder = std::make_shared<arrow::FixedSizeBinaryBuilder>(
|
||||
arrow::fixed_size_binary(dimension * sizeof(float)));
|
||||
p->schema = arrow::schema({arrow::field("val", arrow::fixed_size_binary(dimension * sizeof(float)))});
|
||||
p->dimension = dimension;
|
||||
} else if (p->dimension != dimension) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg("dimension changed");
|
||||
return st;
|
||||
}
|
||||
auto builder = std::dynamic_pointer_cast<arrow::FixedSizeBinaryBuilder>(p->builder);
|
||||
if (builder == nullptr) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg("incorrect data type");
|
||||
return st;
|
||||
}
|
||||
if (p->output != nullptr) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg("payload has finished");
|
||||
return st;
|
||||
}
|
||||
auto ast = builder->AppendValues(reinterpret_cast<const uint8_t *>(values), dimension * length * sizeof(float));
|
||||
if (!ast.ok()) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg(ast.message());
|
||||
return st;
|
||||
}
|
||||
p->rows += length;
|
||||
return st;
|
||||
}
|
||||
|
||||
extern "C" CStatus FinishPayloadWriter(CPayloadWriter payloadWriter) {
|
||||
CStatus st;
|
||||
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
|
||||
st.error_msg = nullptr;
|
||||
auto p = reinterpret_cast<wrapper::PayloadWriter *>(payloadWriter);
|
||||
if (p->builder == nullptr) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg("arrow builder is nullptr");
|
||||
return st;
|
||||
}
|
||||
if (p->output == nullptr) {
|
||||
std::shared_ptr<arrow::Array> array;
|
||||
auto ast = p->builder->Finish(&array);
|
||||
if (!ast.ok()) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg(ast.message());
|
||||
return st;
|
||||
}
|
||||
auto table = arrow::Table::Make(p->schema, {array});
|
||||
p->output = std::make_shared<wrapper::PayloadOutputStream>();
|
||||
ast = parquet::arrow::WriteTable(*table, arrow::default_memory_pool(), p->output, 1024 * 1024 * 1024);
|
||||
if (!ast.ok()) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg(ast.message());
|
||||
return st;
|
||||
}
|
||||
}
|
||||
return st;
|
||||
}
|
||||
|
||||
CBuffer GetPayloadBufferFromWriter(CPayloadWriter payloadWriter) {
|
||||
CBuffer buf;
|
||||
|
||||
auto p = reinterpret_cast<wrapper::PayloadWriter *>(payloadWriter);
|
||||
if (p->output == nullptr) {
|
||||
buf.length = 0;
|
||||
buf.data = nullptr;
|
||||
}
|
||||
auto &output = p->output->Buffer();
|
||||
buf.length = static_cast<int>(output.size());
|
||||
buf.data = (char *) (output.data());
|
||||
return buf;
|
||||
}
|
||||
|
||||
int GetPayloadLengthFromWriter(CPayloadWriter payloadWriter) {
|
||||
auto p = reinterpret_cast<wrapper::PayloadWriter *>(payloadWriter);
|
||||
return p->rows;
|
||||
}
|
||||
|
||||
extern "C" CStatus ReleasePayloadWriter(CPayloadWriter handler) {
|
||||
CStatus st;
|
||||
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
|
||||
st.error_msg = nullptr;
|
||||
auto p = reinterpret_cast<wrapper::PayloadWriter *>(handler);
|
||||
if (p != nullptr) delete p;
|
||||
return st;
|
||||
}
|
||||
|
||||
extern "C" CPayloadReader NewPayloadReader(int columnType, uint8_t *buffer, int64_t buf_size) {
|
||||
auto p = new wrapper::PayloadReader;
|
||||
p->bValues = nullptr;
|
||||
p->input = std::make_shared<wrapper::PayloadInputStream>(buffer, buf_size);
|
||||
auto st = parquet::arrow::OpenFile(p->input, arrow::default_memory_pool(), &p->reader);
|
||||
if (!st.ok()) {
|
||||
delete p;
|
||||
return nullptr;
|
||||
}
|
||||
st = p->reader->ReadTable(&p->table);
|
||||
if (!st.ok()) {
|
||||
delete p;
|
||||
return nullptr;
|
||||
}
|
||||
p->column = p->table->column(0);
|
||||
assert(p->column != nullptr);
|
||||
assert(p->column->chunks().size() == 1);
|
||||
p->array = p->column->chunk(0);
|
||||
|
||||
switch (columnType) {
|
||||
case ColumnType::BOOL :
|
||||
case ColumnType::INT8 :
|
||||
case ColumnType::INT16 :
|
||||
case ColumnType::INT32 :
|
||||
case ColumnType::INT64 :
|
||||
case ColumnType::FLOAT :
|
||||
case ColumnType::DOUBLE :
|
||||
case ColumnType::STRING :
|
||||
case ColumnType::VECTOR_BINARY :
|
||||
case ColumnType::VECTOR_FLOAT : {
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
delete p;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
return reinterpret_cast<CPayloadReader>(p);
|
||||
}
|
||||
|
||||
extern "C" CStatus GetBoolFromPayload(CPayloadReader payloadReader, bool **values, int *length) {
|
||||
CStatus st;
|
||||
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
|
||||
st.error_msg = nullptr;
|
||||
auto p = reinterpret_cast<wrapper::PayloadReader *>(payloadReader);
|
||||
if (p->bValues == nullptr) {
|
||||
auto array = std::dynamic_pointer_cast<arrow::BooleanArray>(p->array);
|
||||
if (array == nullptr) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg("incorrect data type");
|
||||
return st;
|
||||
}
|
||||
int len = array->length();
|
||||
p->bValues = new bool[len];
|
||||
for (int i = 0; i < len; i++) {
|
||||
p->bValues[i] = array->Value(i);
|
||||
}
|
||||
}
|
||||
*values = p->bValues;
|
||||
*length = p->array->length();
|
||||
return st;
|
||||
}
|
||||
|
||||
template<typename DT, typename AT>
|
||||
CStatus GetValuesFromPayload(CPayloadReader payloadReader, DT **values, int *length) {
|
||||
CStatus st;
|
||||
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
|
||||
st.error_msg = nullptr;
|
||||
auto p = reinterpret_cast<wrapper::PayloadReader *>(payloadReader);
|
||||
auto array = std::dynamic_pointer_cast<AT>(p->array);
|
||||
if (array == nullptr) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg("incorrect data type");
|
||||
return st;
|
||||
}
|
||||
*length = array->length();
|
||||
*values = (DT *) array->raw_values();
|
||||
return st;
|
||||
}
|
||||
|
||||
extern "C" CStatus GetInt8FromPayload(CPayloadReader payloadReader, int8_t **values, int *length) {
|
||||
return GetValuesFromPayload<int8_t, arrow::Int8Array>(payloadReader, values, length);
|
||||
}
|
||||
extern "C" CStatus GetInt16FromPayload(CPayloadReader payloadReader, int16_t **values, int *length) {
|
||||
return GetValuesFromPayload<int16_t, arrow::Int16Array>(payloadReader, values, length);
|
||||
}
|
||||
extern "C" CStatus GetInt32FromPayload(CPayloadReader payloadReader, int32_t **values, int *length) {
|
||||
return GetValuesFromPayload<int32_t, arrow::Int32Array>(payloadReader, values, length);
|
||||
}
|
||||
extern "C" CStatus GetInt64FromPayload(CPayloadReader payloadReader, int64_t **values, int *length) {
|
||||
return GetValuesFromPayload<int64_t, arrow::Int64Array>(payloadReader, values, length);
|
||||
}
|
||||
extern "C" CStatus GetFloatFromPayload(CPayloadReader payloadReader, float **values, int *length) {
|
||||
return GetValuesFromPayload<float, arrow::FloatArray>(payloadReader, values, length);
|
||||
}
|
||||
extern "C" CStatus GetDoubleFromPayload(CPayloadReader payloadReader, double **values, int *length) {
|
||||
return GetValuesFromPayload<double, arrow::DoubleArray>(payloadReader, values, length);
|
||||
}
|
||||
extern "C" CStatus GetOneStringFromPayload(CPayloadReader payloadReader, int idx, char **cstr, int *str_size) {
|
||||
CStatus st;
|
||||
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
|
||||
st.error_msg = nullptr;
|
||||
auto p = reinterpret_cast<wrapper::PayloadReader *>(payloadReader);
|
||||
auto array = std::dynamic_pointer_cast<arrow::StringArray>(p->array);
|
||||
if (array == nullptr) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg("Incorrect data type");
|
||||
return st;
|
||||
}
|
||||
if (idx >= array->length()) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg("memory overflow");
|
||||
return st;
|
||||
}
|
||||
arrow::StringArray::offset_type length;
|
||||
*cstr = (char *) array->GetValue(idx, &length);
|
||||
*str_size = length;
|
||||
return st;
|
||||
}
|
||||
extern "C" CStatus GetBinaryVectorFromPayload(CPayloadReader payloadReader,
|
||||
uint8_t **values,
|
||||
int *dimension,
|
||||
int *length) {
|
||||
CStatus st;
|
||||
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
|
||||
st.error_msg = nullptr;
|
||||
auto p = reinterpret_cast<wrapper::PayloadReader *>(payloadReader);
|
||||
auto array = std::dynamic_pointer_cast<arrow::FixedSizeBinaryArray>(p->array);
|
||||
if (array == nullptr) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg("Incorrect data type");
|
||||
return st;
|
||||
}
|
||||
*dimension = array->byte_width() * 8;
|
||||
*length = array->length() / array->byte_width();
|
||||
*values = (uint8_t *) array->raw_values();
|
||||
return st;
|
||||
}
|
||||
extern "C" CStatus GetFloatVectorFromPayload(CPayloadReader payloadReader,
|
||||
float **values,
|
||||
int *dimension,
|
||||
int *length) {
|
||||
CStatus st;
|
||||
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
|
||||
st.error_msg = nullptr;
|
||||
auto p = reinterpret_cast<wrapper::PayloadReader *>(payloadReader);
|
||||
auto array = std::dynamic_pointer_cast<arrow::FixedSizeBinaryArray>(p->array);
|
||||
if (array == nullptr) {
|
||||
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
|
||||
st.error_msg = ErrorMsg("Incorrect data type");
|
||||
return st;
|
||||
}
|
||||
*dimension = array->byte_width() / sizeof(float);
|
||||
*length = array->length() / array->byte_width();
|
||||
*values = (float *) array->raw_values();
|
||||
return st;
|
||||
}
|
||||
|
||||
extern "C" int GetPayloadLengthFromReader(CPayloadReader payloadReader) {
|
||||
auto p = reinterpret_cast<wrapper::PayloadReader *>(payloadReader);
|
||||
if (p->array == nullptr) return 0;
|
||||
auto ba = std::dynamic_pointer_cast<arrow::FixedSizeBinaryArray>(p->array);
|
||||
if (ba == nullptr) {
|
||||
return p->array->length();
|
||||
} else {
|
||||
return ba->length() / ba->byte_width();
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" CStatus ReleasePayloadReader(CPayloadReader payloadReader) {
|
||||
CStatus st;
|
||||
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
|
||||
st.error_msg = nullptr;
|
||||
auto p = reinterpret_cast<wrapper::PayloadReader *>(payloadReader);
|
||||
delete[] p->bValues;
|
||||
delete p;
|
||||
return st;
|
||||
}
|
@ -1,58 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
typedef void *CPayloadWriter;
|
||||
|
||||
typedef struct CBuffer {
|
||||
char *data;
|
||||
int length;
|
||||
} CBuffer;
|
||||
|
||||
typedef struct CStatus {
|
||||
int error_code;
|
||||
const char *error_msg;
|
||||
} CStatus;
|
||||
|
||||
CPayloadWriter NewPayloadWriter(int columnType);
|
||||
CStatus AddBooleanToPayload(CPayloadWriter payloadWriter, bool *values, int length);
|
||||
CStatus AddInt8ToPayload(CPayloadWriter payloadWriter, int8_t *values, int length);
|
||||
CStatus AddInt16ToPayload(CPayloadWriter payloadWriter, int16_t *values, int length);
|
||||
CStatus AddInt32ToPayload(CPayloadWriter payloadWriter, int32_t *values, int length);
|
||||
CStatus AddInt64ToPayload(CPayloadWriter payloadWriter, int64_t *values, int length);
|
||||
CStatus AddFloatToPayload(CPayloadWriter payloadWriter, float *values, int length);
|
||||
CStatus AddDoubleToPayload(CPayloadWriter payloadWriter, double *values, int length);
|
||||
CStatus AddOneStringToPayload(CPayloadWriter payloadWriter, char *cstr, int str_size);
|
||||
CStatus AddBinaryVectorToPayload(CPayloadWriter payloadWriter, uint8_t *values, int dimension, int length);
|
||||
CStatus AddFloatVectorToPayload(CPayloadWriter payloadWriter, float *values, int dimension, int length);
|
||||
|
||||
CStatus FinishPayloadWriter(CPayloadWriter payloadWriter);
|
||||
CBuffer GetPayloadBufferFromWriter(CPayloadWriter payloadWriter);
|
||||
int GetPayloadLengthFromWriter(CPayloadWriter payloadWriter);
|
||||
CStatus ReleasePayloadWriter(CPayloadWriter handler);
|
||||
|
||||
//============= payload reader ======================
|
||||
|
||||
typedef void *CPayloadReader;
|
||||
CPayloadReader NewPayloadReader(int columnType, uint8_t *buffer, int64_t buf_size);
|
||||
CStatus GetBoolFromPayload(CPayloadReader payloadReader, bool **values, int *length);
|
||||
CStatus GetInt8FromPayload(CPayloadReader payloadReader, int8_t **values, int *length);
|
||||
CStatus GetInt16FromPayload(CPayloadReader payloadReader, int16_t **values, int *length);
|
||||
CStatus GetInt32FromPayload(CPayloadReader payloadReader, int32_t **values, int *length);
|
||||
CStatus GetInt64FromPayload(CPayloadReader payloadReader, int64_t **values, int *length);
|
||||
CStatus GetFloatFromPayload(CPayloadReader payloadReader, float **values, int *length);
|
||||
CStatus GetDoubleFromPayload(CPayloadReader payloadReader, double **values, int *length);
|
||||
CStatus GetOneStringFromPayload(CPayloadReader payloadReader, int idx, char **cstr, int *str_size);
|
||||
CStatus GetBinaryVectorFromPayload(CPayloadReader payloadReader, uint8_t **values, int *dimension, int *length);
|
||||
CStatus GetFloatVectorFromPayload(CPayloadReader payloadReader, float **values, int *dimension, int *length);
|
||||
|
||||
int GetPayloadLengthFromReader(CPayloadReader payloadReader);
|
||||
CStatus ReleasePayloadReader(CPayloadReader payloadReader);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
@ -1,91 +0,0 @@
|
||||
#include "PayloadStream.h"
|
||||
|
||||
namespace wrapper {
|
||||
|
||||
PayloadOutputStream::PayloadOutputStream() {
|
||||
buffer_.reserve(1024 * 1024);
|
||||
closed_ = false;
|
||||
}
|
||||
|
||||
PayloadOutputStream::~PayloadOutputStream() noexcept {
|
||||
|
||||
}
|
||||
|
||||
arrow::Status PayloadOutputStream::Close() {
|
||||
closed_ = true;
|
||||
return arrow::Status::OK();
|
||||
}
|
||||
|
||||
arrow::Result<int64_t> PayloadOutputStream::Tell() const {
|
||||
return arrow::Result<int64_t>(buffer_.size());
|
||||
}
|
||||
|
||||
bool PayloadOutputStream::closed() const {
|
||||
return closed_;
|
||||
}
|
||||
|
||||
arrow::Status PayloadOutputStream::Write(const void *data, int64_t nbytes) {
|
||||
if (nbytes <= 0) return arrow::Status::OK();
|
||||
auto size = buffer_.size();
|
||||
buffer_.resize(size + nbytes);
|
||||
std::memcpy(buffer_.data() + size, data, nbytes);
|
||||
return arrow::Status::OK();
|
||||
}
|
||||
|
||||
arrow::Status PayloadOutputStream::Flush() {
|
||||
return arrow::Status::OK();
|
||||
}
|
||||
|
||||
const std::vector<uint8_t> &PayloadOutputStream::Buffer() const {
|
||||
return buffer_;
|
||||
}
|
||||
|
||||
PayloadInputStream::PayloadInputStream(const uint8_t *data, int64_t size) :
|
||||
data_(data), size_(size), tell_(0), closed_(false) {
|
||||
}
|
||||
|
||||
PayloadInputStream::~PayloadInputStream() noexcept {
|
||||
|
||||
}
|
||||
|
||||
arrow::Status PayloadInputStream::Close() {
|
||||
closed_ = true;
|
||||
return arrow::Status::OK();
|
||||
}
|
||||
|
||||
bool PayloadInputStream::closed() const {
|
||||
return closed_;
|
||||
}
|
||||
|
||||
arrow::Result<int64_t> PayloadInputStream::Tell() const {
|
||||
return arrow::Result<int64_t>(tell_);
|
||||
}
|
||||
|
||||
arrow::Status PayloadInputStream::Seek(int64_t position) {
|
||||
if (position < 0 || position >= size_) return arrow::Status::IOError("invalid position");
|
||||
tell_ = position;
|
||||
return arrow::Status::OK();
|
||||
}
|
||||
|
||||
arrow::Result<int64_t> PayloadInputStream::Read(int64_t nbytes, void *out) {
|
||||
auto remain = size_ - tell_;
|
||||
if (nbytes > remain) nbytes = remain;
|
||||
std::memcpy(out, data_ + tell_, nbytes);
|
||||
tell_ += nbytes;
|
||||
return arrow::Result<int64_t>(nbytes);
|
||||
}
|
||||
|
||||
arrow::Result<std::shared_ptr<arrow::Buffer>> PayloadInputStream::Read(int64_t nbytes) {
|
||||
auto remain = size_ - tell_;
|
||||
if (nbytes > remain) nbytes = remain;
|
||||
auto buf = std::make_shared<arrow::Buffer>(data_ + tell_, nbytes);
|
||||
tell_ += nbytes;
|
||||
return arrow::Result<std::shared_ptr<arrow::Buffer>>(buf);
|
||||
}
|
||||
|
||||
arrow::Result<int64_t> PayloadInputStream::GetSize() {
|
||||
return arrow::Result<int64_t>(size_);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,75 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <arrow/api.h>
|
||||
#include <arrow/io/interfaces.h>
|
||||
#include <parquet/arrow/writer.h>
|
||||
#include <parquet/arrow/reader.h>
|
||||
#include "ColumnType.h"
|
||||
|
||||
namespace wrapper {
|
||||
|
||||
class PayloadOutputStream;
|
||||
class PayloadInputStream;
|
||||
|
||||
constexpr int EMPTY_DIMENSION = -1;
|
||||
|
||||
struct PayloadWriter {
|
||||
ColumnType columnType;
|
||||
int dimension; // binary vector, float vector
|
||||
std::shared_ptr<arrow::ArrayBuilder> builder;
|
||||
std::shared_ptr<arrow::Schema> schema;
|
||||
std::shared_ptr<PayloadOutputStream> output;
|
||||
int rows;
|
||||
};
|
||||
|
||||
struct PayloadReader {
|
||||
ColumnType column_type;
|
||||
std::shared_ptr<PayloadInputStream> input;
|
||||
std::unique_ptr<parquet::arrow::FileReader> reader;
|
||||
std::shared_ptr<arrow::Table> table;
|
||||
std::shared_ptr<arrow::ChunkedArray> column;
|
||||
std::shared_ptr<arrow::Array> array;
|
||||
bool *bValues;
|
||||
};
|
||||
|
||||
class PayloadOutputStream : public arrow::io::OutputStream {
|
||||
public:
|
||||
PayloadOutputStream();
|
||||
~PayloadOutputStream();
|
||||
|
||||
arrow::Status Close() override;
|
||||
arrow::Result<int64_t> Tell() const override;
|
||||
bool closed() const override;
|
||||
arrow::Status Write(const void *data, int64_t nbytes) override;
|
||||
arrow::Status Flush() override;
|
||||
|
||||
public:
|
||||
const std::vector<uint8_t> &Buffer() const;
|
||||
|
||||
private:
|
||||
std::vector<uint8_t> buffer_;
|
||||
bool closed_;
|
||||
};
|
||||
|
||||
class PayloadInputStream : public arrow::io::RandomAccessFile {
|
||||
public:
|
||||
PayloadInputStream(const uint8_t *data, int64_t size);
|
||||
~PayloadInputStream();
|
||||
|
||||
arrow::Status Close() override;
|
||||
arrow::Result<int64_t> Tell() const override;
|
||||
bool closed() const override;
|
||||
arrow::Status Seek(int64_t position) override;
|
||||
arrow::Result<int64_t> Read(int64_t nbytes, void *out) override;
|
||||
arrow::Result<std::shared_ptr<arrow::Buffer>> Read(int64_t nbytes) override;
|
||||
arrow::Result<int64_t> GetSize() override;
|
||||
|
||||
private:
|
||||
const uint8_t *data_;
|
||||
const int64_t size_;
|
||||
int64_t tell_;
|
||||
bool closed_;
|
||||
|
||||
};
|
||||
|
||||
}
|
@ -1,95 +0,0 @@
|
||||
set(ARROW_ROOT ${CMAKE_BINARY_DIR}/arrow)
|
||||
|
||||
set(ARROW_CMAKE_ARGS " -DARROW_WITH_LZ4=OFF"
|
||||
" -DARROW_WITH_ZSTD=OFF"
|
||||
" -DARROW_WITH_BROTLI=OFF"
|
||||
" -DARROW_WITH_SNAPPY=OFF"
|
||||
" -DARROW_WITH_ZLIB=OFF"
|
||||
" -DARROW_BUILD_STATIC=ON"
|
||||
" -DARROW_BUILD_SHARED=OFF"
|
||||
" -DARROW_BOOST_USE_SHARED=OFF"
|
||||
" -DARROW_BUILD_TESTS=OFF"
|
||||
" -DARROW_TEST_MEMCHECK=OFF"
|
||||
" -DARROW_BUILD_BENCHMARKS=OFF"
|
||||
" -DARROW_CUDA=OFF"
|
||||
" -DARROW_JEMALLOC=OFF"
|
||||
" -DARROW_PYTHON=OFF"
|
||||
" -DARROW_BUILD_UTILITIES=OFF"
|
||||
" -DARROW_PARQUET=ON"
|
||||
" -DPARQUET_BUILD_SHARED=OFF"
|
||||
" -DARROW_S3=OFF"
|
||||
" -DCMAKE_VERBOSE_MAKEFILE=ON")
|
||||
|
||||
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/Templates/Arrow.CMakeLists.txt.cmake"
|
||||
"${ARROW_ROOT}/CMakeLists.txt")
|
||||
|
||||
file(MAKE_DIRECTORY "${ARROW_ROOT}/build")
|
||||
file(MAKE_DIRECTORY "${ARROW_ROOT}/install")
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}" .
|
||||
RESULT_VARIABLE ARROW_CONFIG
|
||||
WORKING_DIRECTORY ${ARROW_ROOT})
|
||||
|
||||
if(ARROW_CONFIG)
|
||||
message(FATAL_ERROR "Configuring Arrow failed: " ${ARROW_CONFIG})
|
||||
endif(ARROW_CONFIG)
|
||||
|
||||
set(PARALLEL_BUILD -j)
|
||||
if($ENV{PARALLEL_LEVEL})
|
||||
set(NUM_JOBS $ENV{PARALLEL_LEVEL})
|
||||
set(PARALLEL_BUILD "${PARALLEL_BUILD}${NUM_JOBS}")
|
||||
endif($ENV{PARALLEL_LEVEL})
|
||||
|
||||
if(${NUM_JOBS})
|
||||
if(${NUM_JOBS} EQUAL 1)
|
||||
message(STATUS "ARROW BUILD: Enabling Sequential CMake build")
|
||||
elseif(${NUM_JOBS} GREATER 1)
|
||||
message(STATUS "ARROW BUILD: Enabling Parallel CMake build with ${NUM_JOBS} jobs")
|
||||
endif(${NUM_JOBS} EQUAL 1)
|
||||
else()
|
||||
message(STATUS "ARROW BUILD: Enabling Parallel CMake build with all threads")
|
||||
endif(${NUM_JOBS})
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND} --build .. -- ${PARALLEL_BUILD}
|
||||
RESULT_VARIABLE ARROW_BUILD
|
||||
WORKING_DIRECTORY ${ARROW_ROOT}/build)
|
||||
|
||||
if(ARROW_BUILD)
|
||||
message(FATAL_ERROR "Building Arrow failed: " ${ARROW_BUILD})
|
||||
endif(ARROW_BUILD)
|
||||
|
||||
message(STATUS "Arrow installed here: " ${ARROW_ROOT}/install)
|
||||
set(ARROW_LIBRARY_DIR "${ARROW_ROOT}/install/lib")
|
||||
set(ARROW_INCLUDE_DIR "${ARROW_ROOT}/install/include")
|
||||
|
||||
find_library(ARROW_LIB arrow
|
||||
NO_DEFAULT_PATH
|
||||
HINTS "${ARROW_LIBRARY_DIR}")
|
||||
message(STATUS "Arrow library: " ${ARROW_LIB})
|
||||
|
||||
find_library(PARQUET_LIB parquet
|
||||
NO_DEFAULT_PATH
|
||||
HINTS "${ARROW_LIBRARY_DIR}")
|
||||
message(STATUS "Parquet library: " ${PARQUET_LIB})
|
||||
|
||||
find_library(THRIFT_LIB thrift
|
||||
NO_DEFAULT_PATH
|
||||
HINTS "${ARROW_ROOT}/build/thrift_ep-install/lib")
|
||||
message(STATUS "Thirft library: " ${THRIFT_LIB})
|
||||
|
||||
find_library(UTF8PROC_LIB utf8proc
|
||||
NO_DEFAULT_PATH
|
||||
HINTS "${ARROW_ROOT}/build/utf8proc_ep-install/lib")
|
||||
message(STATUS "utf8proc library: " ${UTF8PROC_LIB})
|
||||
|
||||
if(ARROW_LIB AND PARQUET_LIB AND THRIFT_LIB AND UTF8PROC_LIB)
|
||||
set(ARROW_FOUND TRUE)
|
||||
endif(ARROW_LIB AND PARQUET_LIB AND THRIFT_LIB AND UTF8PROC_LIB)
|
||||
|
||||
message(STATUS "FlatBuffers installed here: " ${FLATBUFFERS_ROOT})
|
||||
set(FLATBUFFERS_INCLUDE_DIR "${FLATBUFFERS_ROOT}/include")
|
||||
set(FLATBUFFERS_LIBRARY_DIR "${FLATBUFFERS_ROOT}/lib")
|
||||
|
||||
add_definitions(-DARROW_METADATA_V4)
|
@ -1,30 +0,0 @@
|
||||
#=============================================================================
|
||||
# Copyright (c) 2018-2020, NVIDIA CORPORATION.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#=============================================================================
|
||||
cmake_minimum_required(VERSION 3.14...3.17 FATAL_ERROR)
|
||||
|
||||
project(wrapper-Arrow)
|
||||
|
||||
include(ExternalProject)
|
||||
|
||||
ExternalProject_Add(Arrow
|
||||
GIT_REPOSITORY https://github.com/apache/arrow.git
|
||||
GIT_TAG apache-arrow-2.0.0
|
||||
GIT_SHALLOW true
|
||||
SOURCE_DIR "${ARROW_ROOT}/arrow"
|
||||
SOURCE_SUBDIR "cpp"
|
||||
BINARY_DIR "${ARROW_ROOT}/build"
|
||||
INSTALL_DIR "${ARROW_ROOT}/install"
|
||||
CMAKE_ARGS ${ARROW_CMAKE_ARGS} -DCMAKE_INSTALL_PREFIX=${ARROW_ROOT}/install)
|
@ -1,19 +0,0 @@
|
||||
add_executable(wrapper_test
|
||||
ParquetWrapperTest.cpp)
|
||||
|
||||
include(FetchContent)
|
||||
FetchContent_Declare(googletest
|
||||
URL "https://github.com/google/googletest/archive/release-1.10.0.tar.gz")
|
||||
set(BUILD_GMOCK CACHE BOOL OFF)
|
||||
set(INSTALL_GTEST CACHE BOOL OFF)
|
||||
FetchContent_MakeAvailable(googletest)
|
||||
|
||||
target_link_libraries(wrapper_test
|
||||
gtest_main
|
||||
wrapper
|
||||
parquet arrow thrift utf8proc pthread
|
||||
)
|
||||
|
||||
# Defines `gtest_discover_tests()`.
|
||||
#include(GoogleTest)
|
||||
#gtest_discover_tests(milvusd_test)
|
@ -1,305 +0,0 @@
|
||||
#include <gtest/gtest.h>
|
||||
#include <fstream>
|
||||
#include <arrow/api.h>
|
||||
#include <arrow/io/api.h>
|
||||
#include <parquet/arrow/reader.h>
|
||||
#include <parquet/arrow/writer.h>
|
||||
#include "ParquetWrapper.h"
|
||||
#include "ColumnType.h"
|
||||
#include "PayloadStream.h"
|
||||
|
||||
static void WriteToFile(CBuffer cb) {
|
||||
auto data_file = std::ofstream("/tmp/wrapper_test_data.dat", std::ios::binary);
|
||||
data_file.write(cb.data, cb.length);
|
||||
data_file.close();
|
||||
}
|
||||
|
||||
static std::shared_ptr<arrow::Table> ReadFromFile() {
|
||||
std::shared_ptr<arrow::io::ReadableFile> infile;
|
||||
auto rst = arrow::io::ReadableFile::Open("/tmp/wrapper_test_data.dat");
|
||||
if (!rst.ok()) return nullptr;
|
||||
infile = *rst;
|
||||
|
||||
std::shared_ptr<arrow::Table> table;
|
||||
std::unique_ptr<parquet::arrow::FileReader> reader;
|
||||
auto st = parquet::arrow::OpenFile(infile, arrow::default_memory_pool(), &reader);
|
||||
if (!st.ok()) return nullptr;
|
||||
st = reader->ReadTable(&table);
|
||||
if (!st.ok()) return nullptr;
|
||||
return table;
|
||||
}
|
||||
|
||||
TEST(wrapper, inoutstream) {
|
||||
arrow::Int64Builder i64builder;
|
||||
arrow::Status st;
|
||||
st = i64builder.AppendValues({1, 2, 3, 4, 5});
|
||||
ASSERT_TRUE(st.ok());
|
||||
std::shared_ptr<arrow::Array> i64array;
|
||||
st = i64builder.Finish(&i64array);
|
||||
ASSERT_TRUE(st.ok());
|
||||
|
||||
auto schema = arrow::schema({arrow::field("val", arrow::int64())});
|
||||
ASSERT_NE(schema, nullptr);
|
||||
auto table = arrow::Table::Make(schema, {i64array});
|
||||
ASSERT_NE(table, nullptr);
|
||||
|
||||
auto os = std::make_shared<wrapper::PayloadOutputStream>();
|
||||
st = parquet::arrow::WriteTable(*table, arrow::default_memory_pool(), os, 1024);
|
||||
ASSERT_TRUE(st.ok());
|
||||
|
||||
const uint8_t *buf = os->Buffer().data();
|
||||
int64_t buf_size = os->Buffer().size();
|
||||
auto is = std::make_shared<wrapper::PayloadInputStream>(buf, buf_size);
|
||||
|
||||
std::shared_ptr<arrow::Table> intable;
|
||||
std::unique_ptr<parquet::arrow::FileReader> reader;
|
||||
st = parquet::arrow::OpenFile(is, arrow::default_memory_pool(), &reader);
|
||||
ASSERT_TRUE(st.ok());
|
||||
st = reader->ReadTable(&intable);
|
||||
ASSERT_TRUE(st.ok());
|
||||
|
||||
auto chunks = intable->column(0)->chunks();
|
||||
ASSERT_EQ(chunks.size(), 1);
|
||||
|
||||
auto inarray = std::dynamic_pointer_cast<arrow::Int64Array>(chunks[0]);
|
||||
ASSERT_NE(inarray, nullptr);
|
||||
ASSERT_EQ(inarray->Value(0), 1);
|
||||
ASSERT_EQ(inarray->Value(1), 2);
|
||||
ASSERT_EQ(inarray->Value(2), 3);
|
||||
ASSERT_EQ(inarray->Value(3), 4);
|
||||
ASSERT_EQ(inarray->Value(4), 5);
|
||||
}
|
||||
|
||||
TEST(wrapper, boolean) {
|
||||
auto payload = NewPayloadWriter(ColumnType::BOOL);
|
||||
bool data[] = {true, false, true, false};
|
||||
|
||||
auto st = AddBooleanToPayload(payload, data, 4);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
st = FinishPayloadWriter(payload);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
auto cb = GetPayloadBufferFromWriter(payload);
|
||||
ASSERT_GT(cb.length, 0);
|
||||
ASSERT_NE(cb.data, nullptr);
|
||||
auto nums = GetPayloadLengthFromWriter(payload);
|
||||
ASSERT_EQ(nums, 4);
|
||||
|
||||
auto reader = NewPayloadReader(ColumnType::BOOL, (uint8_t *) cb.data, cb.length);
|
||||
bool *values;
|
||||
int length;
|
||||
st = GetBoolFromPayload(reader, &values, &length);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
ASSERT_NE(values, nullptr);
|
||||
ASSERT_EQ(length, 4);
|
||||
length = GetPayloadLengthFromReader(reader);
|
||||
ASSERT_EQ(length, 4);
|
||||
for (int i = 0; i < length; i++) {
|
||||
ASSERT_EQ(data[i], values[i]);
|
||||
}
|
||||
|
||||
st = ReleasePayloadWriter(payload);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
st = ReleasePayloadReader(reader);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
}
|
||||
|
||||
#define NUMERIC_TEST(TEST_NAME, COLUMN_TYPE, DATA_TYPE, ADD_FUNC, GET_FUNC, ARRAY_TYPE) TEST(wrapper, TEST_NAME) { \
|
||||
auto payload = NewPayloadWriter(COLUMN_TYPE); \
|
||||
DATA_TYPE data[] = {-1, 1, -100, 100}; \
|
||||
\
|
||||
auto st = ADD_FUNC(payload, data, 4); \
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS); \
|
||||
st = FinishPayloadWriter(payload); \
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS); \
|
||||
auto cb = GetPayloadBufferFromWriter(payload); \
|
||||
ASSERT_GT(cb.length, 0); \
|
||||
ASSERT_NE(cb.data, nullptr); \
|
||||
auto nums = GetPayloadLengthFromWriter(payload); \
|
||||
ASSERT_EQ(nums, 4); \
|
||||
\
|
||||
auto reader = NewPayloadReader(COLUMN_TYPE,(uint8_t*)cb.data,cb.length); \
|
||||
DATA_TYPE *values; \
|
||||
int length; \
|
||||
st = GET_FUNC(reader,&values,&length); \
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS); \
|
||||
ASSERT_NE(values, nullptr); \
|
||||
ASSERT_EQ(length, 4); \
|
||||
length = GetPayloadLengthFromReader(reader); \
|
||||
ASSERT_EQ(length, 4); \
|
||||
\
|
||||
for (int i = 0; i < length; i++) { \
|
||||
ASSERT_EQ(data[i], values[i]); \
|
||||
} \
|
||||
\
|
||||
st = ReleasePayloadWriter(payload); \
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS); \
|
||||
st = ReleasePayloadReader(reader); \
|
||||
ASSERT_EQ(st.error_code,ErrorCode::SUCCESS); \
|
||||
}
|
||||
|
||||
NUMERIC_TEST(int8, ColumnType::INT8, int8_t, AddInt8ToPayload, GetInt8FromPayload, arrow::Int8Array)
|
||||
NUMERIC_TEST(int16, ColumnType::INT16, int16_t, AddInt16ToPayload, GetInt16FromPayload, arrow::Int16Array)
|
||||
NUMERIC_TEST(int32, ColumnType::INT32, int32_t, AddInt32ToPayload, GetInt32FromPayload, arrow::Int32Array)
|
||||
NUMERIC_TEST(int64, ColumnType::INT64, int64_t, AddInt64ToPayload, GetInt64FromPayload, arrow::Int64Array)
|
||||
NUMERIC_TEST(float32, ColumnType::FLOAT, float, AddFloatToPayload, GetFloatFromPayload, arrow::FloatArray)
|
||||
NUMERIC_TEST(float64, ColumnType::DOUBLE, double, AddDoubleToPayload, GetDoubleFromPayload, arrow::DoubleArray)
|
||||
|
||||
TEST(wrapper, stringarray) {
|
||||
auto payload = NewPayloadWriter(ColumnType::STRING);
|
||||
auto st = AddOneStringToPayload(payload, (char *) "1234", 4);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
st = AddOneStringToPayload(payload, (char *) "12345", 5);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
char v[3] = {0};
|
||||
v[1] = 'a';
|
||||
st = AddOneStringToPayload(payload, v, 3);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
|
||||
st = FinishPayloadWriter(payload);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
auto cb = GetPayloadBufferFromWriter(payload);
|
||||
ASSERT_GT(cb.length, 0);
|
||||
ASSERT_NE(cb.data, nullptr);
|
||||
auto nums = GetPayloadLengthFromWriter(payload);
|
||||
ASSERT_EQ(nums, 3);
|
||||
|
||||
auto reader = NewPayloadReader(ColumnType::STRING, (uint8_t *) cb.data, cb.length);
|
||||
int length = GetPayloadLengthFromReader(reader);
|
||||
ASSERT_EQ(length, 3);
|
||||
char *v0, *v1, *v2;
|
||||
int s0, s1, s2;
|
||||
st = GetOneStringFromPayload(reader, 0, &v0, &s0);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
ASSERT_EQ(s0, 4);
|
||||
ASSERT_EQ(v0[0], '1');
|
||||
ASSERT_EQ(v0[1], '2');
|
||||
ASSERT_EQ(v0[2], '3');
|
||||
ASSERT_EQ(v0[3], '4');
|
||||
|
||||
st = GetOneStringFromPayload(reader, 1, &v1, &s1);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
ASSERT_EQ(s1, 5);
|
||||
ASSERT_EQ(v1[0], '1');
|
||||
ASSERT_EQ(v1[1], '2');
|
||||
ASSERT_EQ(v1[2], '3');
|
||||
ASSERT_EQ(v1[3], '4');
|
||||
ASSERT_EQ(v1[4], '5');
|
||||
|
||||
st = GetOneStringFromPayload(reader, 2, &v2, &s2);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
ASSERT_EQ(s2, 3);
|
||||
ASSERT_EQ(v2[0], 0);
|
||||
ASSERT_EQ(v2[1], 'a');
|
||||
ASSERT_EQ(v2[2], 0);
|
||||
|
||||
st = ReleasePayloadWriter(payload);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
st = ReleasePayloadReader(reader);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
}
|
||||
|
||||
TEST(wrapper, binary_vector) {
|
||||
auto payload = NewPayloadWriter(ColumnType::VECTOR_BINARY);
|
||||
uint8_t data[] = {0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8};
|
||||
|
||||
auto st = AddBinaryVectorToPayload(payload, data, 16, 4);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
st = FinishPayloadWriter(payload);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
auto cb = GetPayloadBufferFromWriter(payload);
|
||||
ASSERT_GT(cb.length, 0);
|
||||
ASSERT_NE(cb.data, nullptr);
|
||||
auto nums = GetPayloadLengthFromWriter(payload);
|
||||
ASSERT_EQ(nums, 4);
|
||||
|
||||
auto reader = NewPayloadReader(ColumnType::VECTOR_BINARY, (uint8_t *) cb.data, cb.length);
|
||||
uint8_t *values;
|
||||
int length;
|
||||
int dim;
|
||||
|
||||
st = GetBinaryVectorFromPayload(reader, &values, &dim, &length);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
ASSERT_NE(values, nullptr);
|
||||
ASSERT_EQ(dim, 16);
|
||||
ASSERT_EQ(length, 4);
|
||||
length = GetPayloadLengthFromReader(reader);
|
||||
ASSERT_EQ(length, 4);
|
||||
for (int i = 0; i < 8; i++) {
|
||||
ASSERT_EQ(values[i], data[i]);
|
||||
}
|
||||
|
||||
st = ReleasePayloadWriter(payload);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
st = ReleasePayloadReader(reader);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
}
|
||||
|
||||
TEST(wrapper, float_vector) {
|
||||
auto payload = NewPayloadWriter(ColumnType::VECTOR_FLOAT);
|
||||
float data[] = {1, 2, 3, 4, 5, 6, 7, 8};
|
||||
|
||||
auto st = AddFloatVectorToPayload(payload, data, 2, 4);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
st = FinishPayloadWriter(payload);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
auto cb = GetPayloadBufferFromWriter(payload);
|
||||
ASSERT_GT(cb.length, 0);
|
||||
ASSERT_NE(cb.data, nullptr);
|
||||
auto nums = GetPayloadLengthFromWriter(payload);
|
||||
ASSERT_EQ(nums, 4);
|
||||
|
||||
auto reader = NewPayloadReader(ColumnType::VECTOR_FLOAT, (uint8_t *) cb.data, cb.length);
|
||||
float *values;
|
||||
int length;
|
||||
int dim;
|
||||
|
||||
st = GetFloatVectorFromPayload(reader, &values, &dim, &length);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
ASSERT_NE(values, nullptr);
|
||||
ASSERT_EQ(dim, 2);
|
||||
ASSERT_EQ(length, 4);
|
||||
length = GetPayloadLengthFromReader(reader);
|
||||
ASSERT_EQ(length, 4);
|
||||
for (int i = 0; i < 8; i++) {
|
||||
ASSERT_EQ(values[i], data[i]);
|
||||
}
|
||||
|
||||
st = ReleasePayloadWriter(payload);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
st = ReleasePayloadReader(reader);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
}
|
||||
|
||||
TEST(wrapper, int8_2) {
|
||||
auto payload = NewPayloadWriter(ColumnType::INT8);
|
||||
int8_t data[] = {-1, 1, -100, 100};
|
||||
|
||||
auto st = AddInt8ToPayload(payload, data, 4);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
st = FinishPayloadWriter(payload);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
auto cb = GetPayloadBufferFromWriter(payload);
|
||||
ASSERT_GT(cb.length, 0);
|
||||
ASSERT_NE(cb.data, nullptr);
|
||||
|
||||
WriteToFile(cb);
|
||||
|
||||
auto nums = GetPayloadLengthFromWriter(payload);
|
||||
ASSERT_EQ(nums, 4);
|
||||
st = ReleasePayloadWriter(payload);
|
||||
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
|
||||
|
||||
auto table = ReadFromFile();
|
||||
ASSERT_NE(table, nullptr);
|
||||
|
||||
auto chunks = table->column(0)->chunks();
|
||||
ASSERT_EQ(chunks.size(), 1);
|
||||
|
||||
auto bool_array = std::dynamic_pointer_cast<arrow::Int8Array>(chunks[0]);
|
||||
ASSERT_NE(bool_array, nullptr);
|
||||
|
||||
ASSERT_EQ(bool_array->Value(0), -1);
|
||||
ASSERT_EQ(bool_array->Value(1), 1);
|
||||
ASSERT_EQ(bool_array->Value(2), -100);
|
||||
ASSERT_EQ(bool_array->Value(3), 100);
|
||||
}
|
@ -16,11 +16,12 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cast"
|
||||
"github.com/spf13/viper"
|
||||
memkv "github.com/zilliztech/milvus-distributed/internal/kv/mem"
|
||||
"github.com/zilliztech/milvus-distributed/internal/kv"
|
||||
)
|
||||
|
||||
type Base interface {
|
||||
@ -33,21 +34,23 @@ type Base interface {
|
||||
}
|
||||
|
||||
type BaseTable struct {
|
||||
params *memkv.MemoryKV
|
||||
params *kv.MemoryKV
|
||||
}
|
||||
|
||||
func (gp *BaseTable) Init() {
|
||||
gp.params = memkv.NewMemoryKV()
|
||||
err := gp.LoadYaml("config.yaml")
|
||||
gp.params = kv.NewMemoryKV()
|
||||
|
||||
err := gp.LoadYaml("milvus.yaml")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
minioAddress := os.Getenv("MINIO_ADDRESS")
|
||||
if minioAddress == "" {
|
||||
minioAddress = "localhost:9000"
|
||||
err = gp.LoadYaml("advanced/common.yaml")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = gp.Save("_MinioAddress", minioAddress)
|
||||
|
||||
err = gp.LoadYaml("advanced/channel.yaml")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -146,3 +149,77 @@ func (gp *BaseTable) Remove(key string) error {
|
||||
func (gp *BaseTable) Save(key, value string) error {
|
||||
return gp.params.Save(strings.ToLower(key), value)
|
||||
}
|
||||
|
||||
func (gp *BaseTable) ParseInt64(key string) int64 {
|
||||
valueStr, err := gp.Load(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
value, err := strconv.Atoi(valueStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return int64(value)
|
||||
}
|
||||
|
||||
func (gp *BaseTable) ParseInt32(key string) int32 {
|
||||
valueStr, err := gp.Load(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
value, err := strconv.Atoi(valueStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return int32(value)
|
||||
}
|
||||
|
||||
func (gp *BaseTable) ParseInt(key string) int {
|
||||
valueStr, err := gp.Load(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
value, err := strconv.Atoi(valueStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
// package methods
|
||||
|
||||
func ConvertRangeToIntRange(rangeStr, sep string) []int {
|
||||
items := strings.Split(rangeStr, sep)
|
||||
if len(items) != 2 {
|
||||
panic("Illegal range ")
|
||||
}
|
||||
|
||||
startStr := items[0]
|
||||
endStr := items[1]
|
||||
start, err := strconv.Atoi(startStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
end, err := strconv.Atoi(endStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if start < 0 || end < 0 {
|
||||
panic("Illegal range value")
|
||||
}
|
||||
if start > end {
|
||||
panic("Illegal range value, start > end")
|
||||
}
|
||||
return []int{start, end}
|
||||
}
|
||||
|
||||
func ConvertRangeToIntSlice(rangeStr, sep string) []int {
|
||||
rangeSlice := ConvertRangeToIntRange(rangeStr, sep)
|
||||
start, end := rangeSlice[0], rangeSlice[1]
|
||||
var ret []int
|
||||
for i := start; i < end; i++ {
|
||||
ret = append(ret, i)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
@ -12,6 +12,7 @@
|
||||
package paramtable
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -21,6 +22,8 @@ var Params = BaseTable{}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
Params.Init()
|
||||
code := m.Run()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
//func TestMain
|
||||
@ -55,13 +58,13 @@ func TestGlobalParamsTable_SaveAndLoad(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGlobalParamsTable_LoadRange(t *testing.T) {
|
||||
_ = Params.Save("abc", "10")
|
||||
_ = Params.Save("fghz", "20")
|
||||
_ = Params.Save("bcde", "1.1")
|
||||
_ = Params.Save("abcd", "testSaveAndLoad")
|
||||
_ = Params.Save("zhi", "12")
|
||||
_ = Params.Save("xxxaab", "10")
|
||||
_ = Params.Save("xxxfghz", "20")
|
||||
_ = Params.Save("xxxbcde", "1.1")
|
||||
_ = Params.Save("xxxabcd", "testSaveAndLoad")
|
||||
_ = Params.Save("xxxzhi", "12")
|
||||
|
||||
keys, values, err := Params.LoadRange("a", "g", 10)
|
||||
keys, values, err := Params.LoadRange("xxxa", "xxxg", 10)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 4, len(keys))
|
||||
assert.Equal(t, "10", values[0])
|
||||
@ -97,24 +100,17 @@ func TestGlobalParamsTable_Remove(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGlobalParamsTable_LoadYaml(t *testing.T) {
|
||||
err := Params.LoadYaml("config.yaml")
|
||||
err := Params.LoadYaml("milvus.yaml")
|
||||
assert.Nil(t, err)
|
||||
|
||||
value1, err1 := Params.Load("etcd.address")
|
||||
value2, err2 := Params.Load("pulsar.port")
|
||||
value3, err3 := Params.Load("reader.topicend")
|
||||
value4, err4 := Params.Load("proxy.pulsarTopics.readerTopicPrefix")
|
||||
value5, err5 := Params.Load("proxy.network.address")
|
||||
err = Params.LoadYaml("advanced/channel.yaml")
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, value1, "localhost")
|
||||
assert.Equal(t, value2, "6650")
|
||||
assert.Equal(t, value3, "128")
|
||||
assert.Equal(t, value4, "milvusReader")
|
||||
assert.Equal(t, value5, "0.0.0.0")
|
||||
_, err = Params.Load("etcd.address")
|
||||
assert.Nil(t, err)
|
||||
_, err = Params.Load("pulsar.port")
|
||||
assert.Nil(t, err)
|
||||
_, err = Params.Load("msgChannel.channelRange.insert")
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Nil(t, err1)
|
||||
assert.Nil(t, err2)
|
||||
assert.Nil(t, err3)
|
||||
assert.Nil(t, err4)
|
||||
assert.Nil(t, err5)
|
||||
}
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"path"
|
||||
"time"
|
||||
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
"github.com/zilliztech/milvus-distributed/internal/kv"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
)
|
||||
|
||||
@ -25,10 +25,10 @@ func ParseTS(ts uint64) (time.Time, uint64) {
|
||||
return physicalTime, logical
|
||||
}
|
||||
|
||||
func NewTSOKVBase(etcdAddr []string, tsoRoot, subPath string) *etcdkv.EtcdKV {
|
||||
func NewTSOKVBase(etcdAddr []string, tsoRoot, subPath string) *kv.EtcdKV {
|
||||
client, _ := clientv3.New(clientv3.Config{
|
||||
Endpoints: etcdAddr,
|
||||
DialTimeout: 5 * time.Second,
|
||||
})
|
||||
return etcdkv.NewEtcdKV(client, path.Join(tsoRoot, subPath))
|
||||
return kv.NewEtcdKV(client, path.Join(tsoRoot, subPath))
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user