Use MetaRootPath instead

Signed-off-by: neza2017 <yefu.chen@zilliz.com>
This commit is contained in:
neza2017 2020-12-01 17:03:39 +08:00 committed by yefu.chen
parent 84b15c1f5c
commit 17cc7bc337
53 changed files with 3440 additions and 4825 deletions

View File

@ -35,7 +35,7 @@ fmt:
@echo "Running $@ check"
@GO111MODULE=on env bash $(PWD)/scripts/gofmt.sh cmd/
@GO111MODULE=on env bash $(PWD)/scripts/gofmt.sh internal/
@GO111MODULE=on env bash $(PWD)/scripts/gofmt.sh tests/go/
@GO111MODULE=on env bash $(PWD)/scripts/gofmt.sh test/
#TODO: Check code specifications by golangci-lint
lint:
@ -43,13 +43,13 @@ lint:
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=3m --config ./.golangci.yml ./internal/...
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=3m --config ./.golangci.yml ./cmd/...
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=3m --config ./.golangci.yml ./tests/go/...
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=3m --config ./.golangci.yml ./test/...
ruleguard:
@echo "Running $@ check"
@${GOPATH}/bin/ruleguard -rules ruleguard.rules.go ./internal/...
@${GOPATH}/bin/ruleguard -rules ruleguard.rules.go ./cmd/...
@${GOPATH}/bin/ruleguard -rules ruleguard.rules.go ./tests/go/...
@${GOPATH}/bin/ruleguard -rules ruleguard.rules.go ./test/...
verifiers: cppcheck fmt lint ruleguard

View File

@ -34,6 +34,7 @@ msgChannel:
channelRange:
insert: [0, 1]
delete: [0, 1]
dataDefinition: [0,1]
k2s: [0, 1]
search: [0, 1]
searchResult: [0, 1]

View File

@ -380,13 +380,18 @@ func (segMgr *SegmentManager) AssignSegmentID(segIDReq []*internalpb.SegIDReques
// "/msg_stream/insert"
message SysConfigRequest {
repeated string keys = 1;
repeated string key_prefixes = 2;
MsgType msg_type = 1;
int64 reqID = 2;
int64 proxyID = 3;
uint64 timestamp = 4;
repeated string keys = 5;
repeated string key_prefixes = 6;
}
message SysConfigResponse {
repeated string keys = 1;
repeated string values = 2;
common.Status status = 1;
repeated string keys = 2;
repeated string values = 3;
}
```
@ -394,12 +399,11 @@ message SysConfigResponse {
```go
type SysConfig struct {
etcdKV *etcd
etcdPathPrefix string
kv *kv.EtcdKV
}
func (conf *SysConfig) InitFromFile(filePath string) (error)
func (conf *SysConfig) GetByPrefix(keyPrefix string) ([]string, error)
func (conf *SysConfig) GetByPrefix(keyPrefix string) (keys []string, values []string, err error)
func (conf *SysConfig) Get(keys []string) ([]string, error)
```

View File

@ -76,6 +76,10 @@ class PartitionDescriptionDefaultTypeInternal {
public:
::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<PartitionDescription> _instance;
} _PartitionDescription_default_instance_;
class SysConfigResponseDefaultTypeInternal {
public:
::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<SysConfigResponse> _instance;
} _SysConfigResponse_default_instance_;
class HitsDefaultTypeInternal {
public:
::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<Hits> _instance;
@ -311,7 +315,22 @@ static void InitDefaultsscc_info_StringResponse_service_5fmsg_2eproto() {
{{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsscc_info_StringResponse_service_5fmsg_2eproto}, {
&scc_info_Status_common_2eproto.base,}};
static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_service_5fmsg_2eproto[15];
static void InitDefaultsscc_info_SysConfigResponse_service_5fmsg_2eproto() {
GOOGLE_PROTOBUF_VERIFY_VERSION;
{
void* ptr = &::milvus::proto::service::_SysConfigResponse_default_instance_;
new (ptr) ::milvus::proto::service::SysConfigResponse();
::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
}
::milvus::proto::service::SysConfigResponse::InitAsDefaultInstance();
}
::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_SysConfigResponse_service_5fmsg_2eproto =
{{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsscc_info_SysConfigResponse_service_5fmsg_2eproto}, {
&scc_info_Status_common_2eproto.base,}};
static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_service_5fmsg_2eproto[16];
static const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* file_level_enum_descriptors_service_5fmsg_2eproto[1];
static constexpr ::PROTOBUF_NAMESPACE_ID::ServiceDescriptor const** file_level_service_descriptors_service_5fmsg_2eproto = nullptr;
@ -414,6 +433,14 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_service_5fmsg_2eproto::offsets
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::PartitionDescription, name_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::PartitionDescription, statistics_),
~0u, // no _has_bits_
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::SysConfigResponse, _internal_metadata_),
~0u, // no _extensions_
~0u, // no _oneof_case_
~0u, // no _weak_field_map_
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::SysConfigResponse, status_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::SysConfigResponse, keys_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::SysConfigResponse, values_),
~0u, // no _has_bits_
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::Hits, _internal_metadata_),
~0u, // no _extensions_
~0u, // no _oneof_case_
@ -443,8 +470,9 @@ static const ::PROTOBUF_NAMESPACE_ID::internal::MigrationSchema schemas[] PROTOB
{ 73, -1, sizeof(::milvus::proto::service::IntegerRangeResponse)},
{ 81, -1, sizeof(::milvus::proto::service::CollectionDescription)},
{ 89, -1, sizeof(::milvus::proto::service::PartitionDescription)},
{ 97, -1, sizeof(::milvus::proto::service::Hits)},
{ 105, -1, sizeof(::milvus::proto::service::QueryResult)},
{ 97, -1, sizeof(::milvus::proto::service::SysConfigResponse)},
{ 105, -1, sizeof(::milvus::proto::service::Hits)},
{ 113, -1, sizeof(::milvus::proto::service::QueryResult)},
};
static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = {
@ -461,6 +489,7 @@ static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] =
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::milvus::proto::service::_IntegerRangeResponse_default_instance_),
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::milvus::proto::service::_CollectionDescription_default_instance_),
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::milvus::proto::service::_PartitionDescription_default_instance_),
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::milvus::proto::service::_SysConfigResponse_default_instance_),
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::milvus::proto::service::_Hits_default_instance_),
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::milvus::proto::service::_QueryResult_default_instance_),
};
@ -499,20 +528,22 @@ const char descriptor_table_protodef_service_5fmsg_2eproto[] PROTOBUF_SECTION_VA
"\006status\030\001 \001(\0132\033.milvus.proto.common.Stat"
"us\0221\n\004name\030\002 \001(\0132#.milvus.proto.service."
"PartitionName\0225\n\nstatistics\030\003 \003(\0132!.milv"
"us.proto.common.KeyValuePair\"5\n\004Hits\022\013\n\003"
"IDs\030\001 \003(\003\022\020\n\010row_data\030\002 \003(\014\022\016\n\006scores\030\003 "
"\003(\002\"H\n\013QueryResult\022+\n\006status\030\001 \001(\0132\033.mil"
"vus.proto.common.Status\022\014\n\004hits\030\002 \003(\014*@\n"
"\017PlaceholderType\022\010\n\004NONE\020\000\022\021\n\rVECTOR_BIN"
"ARY\020d\022\020\n\014VECTOR_FLOAT\020eBCZAgithub.com/zi"
"lliztech/milvus-distributed/internal/pro"
"to/servicepbb\006proto3"
"us.proto.common.KeyValuePair\"^\n\021SysConfi"
"gResponse\022+\n\006status\030\001 \001(\0132\033.milvus.proto"
".common.Status\022\014\n\004keys\030\002 \003(\t\022\016\n\006values\030\003"
" \003(\t\"5\n\004Hits\022\013\n\003IDs\030\001 \003(\003\022\020\n\010row_data\030\002 "
"\003(\014\022\016\n\006scores\030\003 \003(\002\"H\n\013QueryResult\022+\n\006st"
"atus\030\001 \001(\0132\033.milvus.proto.common.Status\022"
"\014\n\004hits\030\002 \003(\014*@\n\017PlaceholderType\022\010\n\004NONE"
"\020\000\022\021\n\rVECTOR_BINARY\020d\022\020\n\014VECTOR_FLOAT\020eB"
"CZAgithub.com/zilliztech/milvus-distribu"
"ted/internal/proto/servicepbb\006proto3"
;
static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_service_5fmsg_2eproto_deps[2] = {
&::descriptor_table_common_2eproto,
&::descriptor_table_schema_2eproto,
};
static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_service_5fmsg_2eproto_sccs[15] = {
static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_service_5fmsg_2eproto_sccs[16] = {
&scc_info_BoolResponse_service_5fmsg_2eproto.base,
&scc_info_CollectionDescription_service_5fmsg_2eproto.base,
&scc_info_CollectionName_service_5fmsg_2eproto.base,
@ -528,14 +559,15 @@ static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_ser
&scc_info_RowBatch_service_5fmsg_2eproto.base,
&scc_info_StringListResponse_service_5fmsg_2eproto.base,
&scc_info_StringResponse_service_5fmsg_2eproto.base,
&scc_info_SysConfigResponse_service_5fmsg_2eproto.base,
};
static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_service_5fmsg_2eproto_once;
static bool descriptor_table_service_5fmsg_2eproto_initialized = false;
const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_service_5fmsg_2eproto = {
&descriptor_table_service_5fmsg_2eproto_initialized, descriptor_table_protodef_service_5fmsg_2eproto, "service_msg.proto", 1620,
&descriptor_table_service_5fmsg_2eproto_once, descriptor_table_service_5fmsg_2eproto_sccs, descriptor_table_service_5fmsg_2eproto_deps, 15, 2,
&descriptor_table_service_5fmsg_2eproto_initialized, descriptor_table_protodef_service_5fmsg_2eproto, "service_msg.proto", 1716,
&descriptor_table_service_5fmsg_2eproto_once, descriptor_table_service_5fmsg_2eproto_sccs, descriptor_table_service_5fmsg_2eproto_deps, 16, 2,
schemas, file_default_instances, TableStruct_service_5fmsg_2eproto::offsets,
file_level_metadata_service_5fmsg_2eproto, 15, file_level_enum_descriptors_service_5fmsg_2eproto, file_level_service_descriptors_service_5fmsg_2eproto,
file_level_metadata_service_5fmsg_2eproto, 16, file_level_enum_descriptors_service_5fmsg_2eproto, file_level_service_descriptors_service_5fmsg_2eproto,
};
// Force running AddDescriptors() at dynamic initialization time.
@ -5159,6 +5191,398 @@ void PartitionDescription::InternalSwap(PartitionDescription* other) {
}
// ===================================================================
void SysConfigResponse::InitAsDefaultInstance() {
::milvus::proto::service::_SysConfigResponse_default_instance_._instance.get_mutable()->status_ = const_cast< ::milvus::proto::common::Status*>(
::milvus::proto::common::Status::internal_default_instance());
}
class SysConfigResponse::_Internal {
public:
static const ::milvus::proto::common::Status& status(const SysConfigResponse* msg);
};
const ::milvus::proto::common::Status&
SysConfigResponse::_Internal::status(const SysConfigResponse* msg) {
return *msg->status_;
}
void SysConfigResponse::clear_status() {
if (GetArenaNoVirtual() == nullptr && status_ != nullptr) {
delete status_;
}
status_ = nullptr;
}
SysConfigResponse::SysConfigResponse()
: ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
SharedCtor();
// @@protoc_insertion_point(constructor:milvus.proto.service.SysConfigResponse)
}
SysConfigResponse::SysConfigResponse(const SysConfigResponse& from)
: ::PROTOBUF_NAMESPACE_ID::Message(),
_internal_metadata_(nullptr),
keys_(from.keys_),
values_(from.values_) {
_internal_metadata_.MergeFrom(from._internal_metadata_);
if (from.has_status()) {
status_ = new ::milvus::proto::common::Status(*from.status_);
} else {
status_ = nullptr;
}
// @@protoc_insertion_point(copy_constructor:milvus.proto.service.SysConfigResponse)
}
void SysConfigResponse::SharedCtor() {
::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_SysConfigResponse_service_5fmsg_2eproto.base);
status_ = nullptr;
}
SysConfigResponse::~SysConfigResponse() {
// @@protoc_insertion_point(destructor:milvus.proto.service.SysConfigResponse)
SharedDtor();
}
void SysConfigResponse::SharedDtor() {
if (this != internal_default_instance()) delete status_;
}
void SysConfigResponse::SetCachedSize(int size) const {
_cached_size_.Set(size);
}
const SysConfigResponse& SysConfigResponse::default_instance() {
::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_SysConfigResponse_service_5fmsg_2eproto.base);
return *internal_default_instance();
}
void SysConfigResponse::Clear() {
// @@protoc_insertion_point(message_clear_start:milvus.proto.service.SysConfigResponse)
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
keys_.Clear();
values_.Clear();
if (GetArenaNoVirtual() == nullptr && status_ != nullptr) {
delete status_;
}
status_ = nullptr;
_internal_metadata_.Clear();
}
#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
const char* SysConfigResponse::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
while (!ctx->Done(&ptr)) {
::PROTOBUF_NAMESPACE_ID::uint32 tag;
ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
CHK_(ptr);
switch (tag >> 3) {
// .milvus.proto.common.Status status = 1;
case 1:
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) {
ptr = ctx->ParseMessage(mutable_status(), ptr);
CHK_(ptr);
} else goto handle_unusual;
continue;
// repeated string keys = 2;
case 2:
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) {
ptr -= 1;
do {
ptr += 1;
ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(add_keys(), ptr, ctx, "milvus.proto.service.SysConfigResponse.keys");
CHK_(ptr);
if (!ctx->DataAvailable(ptr)) break;
} while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 18);
} else goto handle_unusual;
continue;
// repeated string values = 3;
case 3:
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) {
ptr -= 1;
do {
ptr += 1;
ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(add_values(), ptr, ctx, "milvus.proto.service.SysConfigResponse.values");
CHK_(ptr);
if (!ctx->DataAvailable(ptr)) break;
} while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 26);
} else goto handle_unusual;
continue;
default: {
handle_unusual:
if ((tag & 7) == 4 || tag == 0) {
ctx->SetLastTag(tag);
goto success;
}
ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
CHK_(ptr != nullptr);
continue;
}
} // switch
} // while
success:
return ptr;
failure:
ptr = nullptr;
goto success;
#undef CHK_
}
#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
bool SysConfigResponse::MergePartialFromCodedStream(
::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
::PROTOBUF_NAMESPACE_ID::uint32 tag;
// @@protoc_insertion_point(parse_start:milvus.proto.service.SysConfigResponse)
for (;;) {
::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
tag = p.first;
if (!p.second) goto handle_unusual;
switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
// .milvus.proto.common.Status status = 1;
case 1: {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (10 & 0xFF)) {
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
input, mutable_status()));
} else {
goto handle_unusual;
}
break;
}
// repeated string keys = 2;
case 2: {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (18 & 0xFF)) {
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
input, this->add_keys()));
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
this->keys(this->keys_size() - 1).data(),
static_cast<int>(this->keys(this->keys_size() - 1).length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
"milvus.proto.service.SysConfigResponse.keys"));
} else {
goto handle_unusual;
}
break;
}
// repeated string values = 3;
case 3: {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (26 & 0xFF)) {
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
input, this->add_values()));
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
this->values(this->values_size() - 1).data(),
static_cast<int>(this->values(this->values_size() - 1).length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
"milvus.proto.service.SysConfigResponse.values"));
} else {
goto handle_unusual;
}
break;
}
default: {
handle_unusual:
if (tag == 0) {
goto success;
}
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
input, tag, _internal_metadata_.mutable_unknown_fields()));
break;
}
}
}
success:
// @@protoc_insertion_point(parse_success:milvus.proto.service.SysConfigResponse)
return true;
failure:
// @@protoc_insertion_point(parse_failure:milvus.proto.service.SysConfigResponse)
return false;
#undef DO_
}
#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
void SysConfigResponse::SerializeWithCachedSizes(
::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
// @@protoc_insertion_point(serialize_start:milvus.proto.service.SysConfigResponse)
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
(void) cached_has_bits;
// .milvus.proto.common.Status status = 1;
if (this->has_status()) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
1, _Internal::status(this), output);
}
// repeated string keys = 2;
for (int i = 0, n = this->keys_size(); i < n; i++) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
this->keys(i).data(), static_cast<int>(this->keys(i).length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
"milvus.proto.service.SysConfigResponse.keys");
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteString(
2, this->keys(i), output);
}
// repeated string values = 3;
for (int i = 0, n = this->values_size(); i < n; i++) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
this->values(i).data(), static_cast<int>(this->values(i).length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
"milvus.proto.service.SysConfigResponse.values");
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteString(
3, this->values(i), output);
}
if (_internal_metadata_.have_unknown_fields()) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
_internal_metadata_.unknown_fields(), output);
}
// @@protoc_insertion_point(serialize_end:milvus.proto.service.SysConfigResponse)
}
::PROTOBUF_NAMESPACE_ID::uint8* SysConfigResponse::InternalSerializeWithCachedSizesToArray(
::PROTOBUF_NAMESPACE_ID::uint8* target) const {
// @@protoc_insertion_point(serialize_to_array_start:milvus.proto.service.SysConfigResponse)
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
(void) cached_has_bits;
// .milvus.proto.common.Status status = 1;
if (this->has_status()) {
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
InternalWriteMessageToArray(
1, _Internal::status(this), target);
}
// repeated string keys = 2;
for (int i = 0, n = this->keys_size(); i < n; i++) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
this->keys(i).data(), static_cast<int>(this->keys(i).length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
"milvus.proto.service.SysConfigResponse.keys");
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
WriteStringToArray(2, this->keys(i), target);
}
// repeated string values = 3;
for (int i = 0, n = this->values_size(); i < n; i++) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
this->values(i).data(), static_cast<int>(this->values(i).length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
"milvus.proto.service.SysConfigResponse.values");
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
WriteStringToArray(3, this->values(i), target);
}
if (_internal_metadata_.have_unknown_fields()) {
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
_internal_metadata_.unknown_fields(), target);
}
// @@protoc_insertion_point(serialize_to_array_end:milvus.proto.service.SysConfigResponse)
return target;
}
size_t SysConfigResponse::ByteSizeLong() const {
// @@protoc_insertion_point(message_byte_size_start:milvus.proto.service.SysConfigResponse)
size_t total_size = 0;
if (_internal_metadata_.have_unknown_fields()) {
total_size +=
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
_internal_metadata_.unknown_fields());
}
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
// repeated string keys = 2;
total_size += 1 *
::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->keys_size());
for (int i = 0, n = this->keys_size(); i < n; i++) {
total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
this->keys(i));
}
// repeated string values = 3;
total_size += 1 *
::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->values_size());
for (int i = 0, n = this->values_size(); i < n; i++) {
total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
this->values(i));
}
// .milvus.proto.common.Status status = 1;
if (this->has_status()) {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
*status_);
}
int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
SetCachedSize(cached_size);
return total_size;
}
void SysConfigResponse::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
// @@protoc_insertion_point(generalized_merge_from_start:milvus.proto.service.SysConfigResponse)
GOOGLE_DCHECK_NE(&from, this);
const SysConfigResponse* source =
::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<SysConfigResponse>(
&from);
if (source == nullptr) {
// @@protoc_insertion_point(generalized_merge_from_cast_fail:milvus.proto.service.SysConfigResponse)
::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
} else {
// @@protoc_insertion_point(generalized_merge_from_cast_success:milvus.proto.service.SysConfigResponse)
MergeFrom(*source);
}
}
void SysConfigResponse::MergeFrom(const SysConfigResponse& from) {
// @@protoc_insertion_point(class_specific_merge_from_start:milvus.proto.service.SysConfigResponse)
GOOGLE_DCHECK_NE(&from, this);
_internal_metadata_.MergeFrom(from._internal_metadata_);
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
(void) cached_has_bits;
keys_.MergeFrom(from.keys_);
values_.MergeFrom(from.values_);
if (from.has_status()) {
mutable_status()->::milvus::proto::common::Status::MergeFrom(from.status());
}
}
void SysConfigResponse::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
// @@protoc_insertion_point(generalized_copy_from_start:milvus.proto.service.SysConfigResponse)
if (&from == this) return;
Clear();
MergeFrom(from);
}
void SysConfigResponse::CopyFrom(const SysConfigResponse& from) {
// @@protoc_insertion_point(class_specific_copy_from_start:milvus.proto.service.SysConfigResponse)
if (&from == this) return;
Clear();
MergeFrom(from);
}
bool SysConfigResponse::IsInitialized() const {
return true;
}
void SysConfigResponse::InternalSwap(SysConfigResponse* other) {
using std::swap;
_internal_metadata_.Swap(&other->_internal_metadata_);
keys_.InternalSwap(CastToBase(&other->keys_));
values_.InternalSwap(CastToBase(&other->values_));
swap(status_, other->status_);
}
::PROTOBUF_NAMESPACE_ID::Metadata SysConfigResponse::GetMetadata() const {
return GetMetadataStatic();
}
// ===================================================================
void Hits::InitAsDefaultInstance() {
@ -5911,6 +6335,9 @@ template<> PROTOBUF_NOINLINE ::milvus::proto::service::CollectionDescription* Ar
template<> PROTOBUF_NOINLINE ::milvus::proto::service::PartitionDescription* Arena::CreateMaybeMessage< ::milvus::proto::service::PartitionDescription >(Arena* arena) {
return Arena::CreateInternal< ::milvus::proto::service::PartitionDescription >(arena);
}
template<> PROTOBUF_NOINLINE ::milvus::proto::service::SysConfigResponse* Arena::CreateMaybeMessage< ::milvus::proto::service::SysConfigResponse >(Arena* arena) {
return Arena::CreateInternal< ::milvus::proto::service::SysConfigResponse >(arena);
}
template<> PROTOBUF_NOINLINE ::milvus::proto::service::Hits* Arena::CreateMaybeMessage< ::milvus::proto::service::Hits >(Arena* arena) {
return Arena::CreateInternal< ::milvus::proto::service::Hits >(arena);
}

View File

@ -50,7 +50,7 @@ struct TableStruct_service_5fmsg_2eproto {
PROTOBUF_SECTION_VARIABLE(protodesc_cold);
static const ::PROTOBUF_NAMESPACE_ID::internal::AuxillaryParseTableField aux[]
PROTOBUF_SECTION_VARIABLE(protodesc_cold);
static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[15]
static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[16]
PROTOBUF_SECTION_VARIABLE(protodesc_cold);
static const ::PROTOBUF_NAMESPACE_ID::internal::FieldMetadata field_metadata[];
static const ::PROTOBUF_NAMESPACE_ID::internal::SerializationTable serialization_table[];
@ -105,6 +105,9 @@ extern StringListResponseDefaultTypeInternal _StringListResponse_default_instanc
class StringResponse;
class StringResponseDefaultTypeInternal;
extern StringResponseDefaultTypeInternal _StringResponse_default_instance_;
class SysConfigResponse;
class SysConfigResponseDefaultTypeInternal;
extern SysConfigResponseDefaultTypeInternal _SysConfigResponse_default_instance_;
} // namespace service
} // namespace proto
} // namespace milvus
@ -124,6 +127,7 @@ template<> ::milvus::proto::service::QueryResult* Arena::CreateMaybeMessage<::mi
template<> ::milvus::proto::service::RowBatch* Arena::CreateMaybeMessage<::milvus::proto::service::RowBatch>(Arena*);
template<> ::milvus::proto::service::StringListResponse* Arena::CreateMaybeMessage<::milvus::proto::service::StringListResponse>(Arena*);
template<> ::milvus::proto::service::StringResponse* Arena::CreateMaybeMessage<::milvus::proto::service::StringResponse>(Arena*);
template<> ::milvus::proto::service::SysConfigResponse* Arena::CreateMaybeMessage<::milvus::proto::service::SysConfigResponse>(Arena*);
PROTOBUF_NAMESPACE_CLOSE
namespace milvus {
namespace proto {
@ -2154,6 +2158,178 @@ class PartitionDescription :
};
// -------------------------------------------------------------------
class SysConfigResponse :
public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.proto.service.SysConfigResponse) */ {
public:
SysConfigResponse();
virtual ~SysConfigResponse();
SysConfigResponse(const SysConfigResponse& from);
SysConfigResponse(SysConfigResponse&& from) noexcept
: SysConfigResponse() {
*this = ::std::move(from);
}
inline SysConfigResponse& operator=(const SysConfigResponse& from) {
CopyFrom(from);
return *this;
}
inline SysConfigResponse& operator=(SysConfigResponse&& from) noexcept {
if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) {
if (this != &from) InternalSwap(&from);
} else {
CopyFrom(from);
}
return *this;
}
static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() {
return GetDescriptor();
}
static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() {
return GetMetadataStatic().descriptor;
}
static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() {
return GetMetadataStatic().reflection;
}
static const SysConfigResponse& default_instance();
static void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY
static inline const SysConfigResponse* internal_default_instance() {
return reinterpret_cast<const SysConfigResponse*>(
&_SysConfigResponse_default_instance_);
}
static constexpr int kIndexInFileMessages =
13;
friend void swap(SysConfigResponse& a, SysConfigResponse& b) {
a.Swap(&b);
}
inline void Swap(SysConfigResponse* other) {
if (other == this) return;
InternalSwap(other);
}
// implements Message ----------------------------------------------
inline SysConfigResponse* New() const final {
return CreateMaybeMessage<SysConfigResponse>(nullptr);
}
SysConfigResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
return CreateMaybeMessage<SysConfigResponse>(arena);
}
void CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final;
void MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final;
void CopyFrom(const SysConfigResponse& from);
void MergeFrom(const SysConfigResponse& from);
PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
bool IsInitialized() const final;
size_t ByteSizeLong() const final;
#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
#else
bool MergePartialFromCodedStream(
::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) final;
#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
void SerializeWithCachedSizes(
::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const final;
::PROTOBUF_NAMESPACE_ID::uint8* InternalSerializeWithCachedSizesToArray(
::PROTOBUF_NAMESPACE_ID::uint8* target) const final;
int GetCachedSize() const final { return _cached_size_.Get(); }
private:
inline void SharedCtor();
inline void SharedDtor();
void SetCachedSize(int size) const final;
void InternalSwap(SysConfigResponse* other);
friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
return "milvus.proto.service.SysConfigResponse";
}
private:
inline ::PROTOBUF_NAMESPACE_ID::Arena* GetArenaNoVirtual() const {
return nullptr;
}
inline void* MaybeArenaPtr() const {
return nullptr;
}
public:
::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final;
private:
static ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadataStatic() {
::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&::descriptor_table_service_5fmsg_2eproto);
return ::descriptor_table_service_5fmsg_2eproto.file_level_metadata[kIndexInFileMessages];
}
public:
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
enum : int {
kKeysFieldNumber = 2,
kValuesFieldNumber = 3,
kStatusFieldNumber = 1,
};
// repeated string keys = 2;
int keys_size() const;
void clear_keys();
const std::string& keys(int index) const;
std::string* mutable_keys(int index);
void set_keys(int index, const std::string& value);
void set_keys(int index, std::string&& value);
void set_keys(int index, const char* value);
void set_keys(int index, const char* value, size_t size);
std::string* add_keys();
void add_keys(const std::string& value);
void add_keys(std::string&& value);
void add_keys(const char* value);
void add_keys(const char* value, size_t size);
const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>& keys() const;
::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>* mutable_keys();
// repeated string values = 3;
int values_size() const;
void clear_values();
const std::string& values(int index) const;
std::string* mutable_values(int index);
void set_values(int index, const std::string& value);
void set_values(int index, std::string&& value);
void set_values(int index, const char* value);
void set_values(int index, const char* value, size_t size);
std::string* add_values();
void add_values(const std::string& value);
void add_values(std::string&& value);
void add_values(const char* value);
void add_values(const char* value, size_t size);
const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>& values() const;
::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>* mutable_values();
// .milvus.proto.common.Status status = 1;
bool has_status() const;
void clear_status();
const ::milvus::proto::common::Status& status() const;
::milvus::proto::common::Status* release_status();
::milvus::proto::common::Status* mutable_status();
void set_allocated_status(::milvus::proto::common::Status* status);
// @@protoc_insertion_point(class_scope:milvus.proto.service.SysConfigResponse)
private:
class _Internal;
::PROTOBUF_NAMESPACE_ID::internal::InternalMetadataWithArena _internal_metadata_;
::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string> keys_;
::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string> values_;
::milvus::proto::common::Status* status_;
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
friend struct ::TableStruct_service_5fmsg_2eproto;
};
// -------------------------------------------------------------------
class Hits :
public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.proto.service.Hits) */ {
public:
@ -2196,7 +2372,7 @@ class Hits :
&_Hits_default_instance_);
}
static constexpr int kIndexInFileMessages =
13;
14;
friend void swap(Hits& a, Hits& b) {
a.Swap(&b);
@ -2367,7 +2543,7 @@ class QueryResult :
&_QueryResult_default_instance_);
}
static constexpr int kIndexInFileMessages =
14;
15;
friend void swap(QueryResult& a, QueryResult& b) {
a.Swap(&b);
@ -3880,6 +4056,185 @@ PartitionDescription::statistics() const {
// -------------------------------------------------------------------
// SysConfigResponse
// .milvus.proto.common.Status status = 1;
inline bool SysConfigResponse::has_status() const {
return this != internal_default_instance() && status_ != nullptr;
}
inline const ::milvus::proto::common::Status& SysConfigResponse::status() const {
const ::milvus::proto::common::Status* p = status_;
// @@protoc_insertion_point(field_get:milvus.proto.service.SysConfigResponse.status)
return p != nullptr ? *p : *reinterpret_cast<const ::milvus::proto::common::Status*>(
&::milvus::proto::common::_Status_default_instance_);
}
inline ::milvus::proto::common::Status* SysConfigResponse::release_status() {
// @@protoc_insertion_point(field_release:milvus.proto.service.SysConfigResponse.status)
::milvus::proto::common::Status* temp = status_;
status_ = nullptr;
return temp;
}
inline ::milvus::proto::common::Status* SysConfigResponse::mutable_status() {
if (status_ == nullptr) {
auto* p = CreateMaybeMessage<::milvus::proto::common::Status>(GetArenaNoVirtual());
status_ = p;
}
// @@protoc_insertion_point(field_mutable:milvus.proto.service.SysConfigResponse.status)
return status_;
}
inline void SysConfigResponse::set_allocated_status(::milvus::proto::common::Status* status) {
::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaNoVirtual();
if (message_arena == nullptr) {
delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(status_);
}
if (status) {
::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = nullptr;
if (message_arena != submessage_arena) {
status = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
message_arena, status, submessage_arena);
}
} else {
}
status_ = status;
// @@protoc_insertion_point(field_set_allocated:milvus.proto.service.SysConfigResponse.status)
}
// repeated string keys = 2;
inline int SysConfigResponse::keys_size() const {
return keys_.size();
}
inline void SysConfigResponse::clear_keys() {
keys_.Clear();
}
inline const std::string& SysConfigResponse::keys(int index) const {
// @@protoc_insertion_point(field_get:milvus.proto.service.SysConfigResponse.keys)
return keys_.Get(index);
}
inline std::string* SysConfigResponse::mutable_keys(int index) {
// @@protoc_insertion_point(field_mutable:milvus.proto.service.SysConfigResponse.keys)
return keys_.Mutable(index);
}
inline void SysConfigResponse::set_keys(int index, const std::string& value) {
// @@protoc_insertion_point(field_set:milvus.proto.service.SysConfigResponse.keys)
keys_.Mutable(index)->assign(value);
}
inline void SysConfigResponse::set_keys(int index, std::string&& value) {
// @@protoc_insertion_point(field_set:milvus.proto.service.SysConfigResponse.keys)
keys_.Mutable(index)->assign(std::move(value));
}
inline void SysConfigResponse::set_keys(int index, const char* value) {
GOOGLE_DCHECK(value != nullptr);
keys_.Mutable(index)->assign(value);
// @@protoc_insertion_point(field_set_char:milvus.proto.service.SysConfigResponse.keys)
}
inline void SysConfigResponse::set_keys(int index, const char* value, size_t size) {
keys_.Mutable(index)->assign(
reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_set_pointer:milvus.proto.service.SysConfigResponse.keys)
}
inline std::string* SysConfigResponse::add_keys() {
// @@protoc_insertion_point(field_add_mutable:milvus.proto.service.SysConfigResponse.keys)
return keys_.Add();
}
inline void SysConfigResponse::add_keys(const std::string& value) {
keys_.Add()->assign(value);
// @@protoc_insertion_point(field_add:milvus.proto.service.SysConfigResponse.keys)
}
inline void SysConfigResponse::add_keys(std::string&& value) {
keys_.Add(std::move(value));
// @@protoc_insertion_point(field_add:milvus.proto.service.SysConfigResponse.keys)
}
inline void SysConfigResponse::add_keys(const char* value) {
GOOGLE_DCHECK(value != nullptr);
keys_.Add()->assign(value);
// @@protoc_insertion_point(field_add_char:milvus.proto.service.SysConfigResponse.keys)
}
inline void SysConfigResponse::add_keys(const char* value, size_t size) {
keys_.Add()->assign(reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_add_pointer:milvus.proto.service.SysConfigResponse.keys)
}
inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>&
SysConfigResponse::keys() const {
// @@protoc_insertion_point(field_list:milvus.proto.service.SysConfigResponse.keys)
return keys_;
}
inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>*
SysConfigResponse::mutable_keys() {
// @@protoc_insertion_point(field_mutable_list:milvus.proto.service.SysConfigResponse.keys)
return &keys_;
}
// repeated string values = 3;
inline int SysConfigResponse::values_size() const {
return values_.size();
}
inline void SysConfigResponse::clear_values() {
values_.Clear();
}
inline const std::string& SysConfigResponse::values(int index) const {
// @@protoc_insertion_point(field_get:milvus.proto.service.SysConfigResponse.values)
return values_.Get(index);
}
inline std::string* SysConfigResponse::mutable_values(int index) {
// @@protoc_insertion_point(field_mutable:milvus.proto.service.SysConfigResponse.values)
return values_.Mutable(index);
}
inline void SysConfigResponse::set_values(int index, const std::string& value) {
// @@protoc_insertion_point(field_set:milvus.proto.service.SysConfigResponse.values)
values_.Mutable(index)->assign(value);
}
inline void SysConfigResponse::set_values(int index, std::string&& value) {
// @@protoc_insertion_point(field_set:milvus.proto.service.SysConfigResponse.values)
values_.Mutable(index)->assign(std::move(value));
}
inline void SysConfigResponse::set_values(int index, const char* value) {
GOOGLE_DCHECK(value != nullptr);
values_.Mutable(index)->assign(value);
// @@protoc_insertion_point(field_set_char:milvus.proto.service.SysConfigResponse.values)
}
inline void SysConfigResponse::set_values(int index, const char* value, size_t size) {
values_.Mutable(index)->assign(
reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_set_pointer:milvus.proto.service.SysConfigResponse.values)
}
inline std::string* SysConfigResponse::add_values() {
// @@protoc_insertion_point(field_add_mutable:milvus.proto.service.SysConfigResponse.values)
return values_.Add();
}
inline void SysConfigResponse::add_values(const std::string& value) {
values_.Add()->assign(value);
// @@protoc_insertion_point(field_add:milvus.proto.service.SysConfigResponse.values)
}
inline void SysConfigResponse::add_values(std::string&& value) {
values_.Add(std::move(value));
// @@protoc_insertion_point(field_add:milvus.proto.service.SysConfigResponse.values)
}
inline void SysConfigResponse::add_values(const char* value) {
GOOGLE_DCHECK(value != nullptr);
values_.Add()->assign(value);
// @@protoc_insertion_point(field_add_char:milvus.proto.service.SysConfigResponse.values)
}
inline void SysConfigResponse::add_values(const char* value, size_t size) {
values_.Add()->assign(reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_add_pointer:milvus.proto.service.SysConfigResponse.values)
}
inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>&
SysConfigResponse::values() const {
// @@protoc_insertion_point(field_list:milvus.proto.service.SysConfigResponse.values)
return values_;
}
inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>*
SysConfigResponse::mutable_values() {
// @@protoc_insertion_point(field_mutable_list:milvus.proto.service.SysConfigResponse.values)
return &values_;
}
// -------------------------------------------------------------------
// Hits
// repeated int64 IDs = 1;
@ -4152,6 +4507,8 @@ QueryResult::mutable_hits() {
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// @@protoc_insertion_point(namespace_scope)

View File

@ -31,8 +31,6 @@ ParseVecNode(Plan* plan, const Json& out_body) {
std::string field_name = iter.key();
auto& vec_info = iter.value();
auto topK = vec_info["topk"];
AssertInfo(topK > 0, "topK must greater than 0");
AssertInfo(topK < 16384, "topK is too large");
vec_node->query_info_.topK_ = topK;
vec_node->query_info_.metric_type_ = vec_info["metric_type"];
vec_node->query_info_.search_params_ = vec_info["params"];

View File

@ -5,6 +5,7 @@ import (
"log"
"github.com/golang/protobuf/proto"
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
@ -85,8 +86,23 @@ func (t *createCollectionTask) Execute() error {
// TODO: initial partition?
PartitionTags: make([]string, 0),
}
err = t.mt.AddCollection(&collection)
if err != nil {
return err
}
return t.mt.AddCollection(&collection)
msgPack := ms.MsgPack{}
baseMsg := ms.BaseMsg{
BeginTimestamp: t.req.Timestamp,
EndTimestamp: t.req.Timestamp,
HashValues: []uint32{0},
}
timeTickMsg := &ms.CreateCollectionMsg{
BaseMsg: baseMsg,
CreateCollectionRequest: *t.req,
}
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
return t.sch.ddMsgStream.Broadcast(&msgPack)
}
//////////////////////////////////////////////////////////////////////////
@ -102,7 +118,7 @@ func (t *dropCollectionTask) Ts() (Timestamp, error) {
if t.req == nil {
return 0, errors.New("null request")
}
return Timestamp(t.req.Timestamp), nil
return t.req.Timestamp, nil
}
func (t *dropCollectionTask) Execute() error {
@ -118,7 +134,29 @@ func (t *dropCollectionTask) Execute() error {
collectionID := collectionMeta.ID
return t.mt.DeleteCollection(collectionID)
err = t.mt.DeleteCollection(collectionID)
if err != nil {
return err
}
ts, err := t.Ts()
if err != nil {
return err
}
msgPack := ms.MsgPack{}
baseMsg := ms.BaseMsg{
BeginTimestamp: ts,
EndTimestamp: ts,
HashValues: []uint32{0},
}
timeTickMsg := &ms.DropCollectionMsg{
BaseMsg: baseMsg,
DropCollectionRequest: *t.req,
}
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
return t.sch.ddMsgStream.Broadcast(&msgPack)
}
//////////////////////////////////////////////////////////////////////////
@ -134,7 +172,7 @@ func (t *hasCollectionTask) Ts() (Timestamp, error) {
if t.req == nil {
return 0, errors.New("null request")
}
return Timestamp(t.req.Timestamp), nil
return t.req.Timestamp, nil
}
func (t *hasCollectionTask) Execute() error {
@ -147,8 +185,8 @@ func (t *hasCollectionTask) Execute() error {
if err == nil {
t.hasCollection = true
}
return nil
}
//////////////////////////////////////////////////////////////////////////
@ -181,6 +219,7 @@ func (t *describeCollectionTask) Execute() error {
t.description.Schema = collection.Schema
return nil
}
//////////////////////////////////////////////////////////////////////////

View File

@ -55,6 +55,7 @@ func TestMaster_CollectionTask(t *testing.T) {
// msgChannel
ProxyTimeTickChannelNames: []string{"proxy1", "proxy2"},
WriteNodeTimeTickChannelNames: []string{"write3", "write4"},
DDChannelNames: []string{"dd1", "dd2"},
InsertChannelNames: []string{"dm0", "dm1"},
K2SChannelNames: []string{"k2s0", "k2s1"},
QueryNodeStatsChannelName: "statistic",

View File

@ -0,0 +1,79 @@
package master
import (
"log"
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/kv"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
)
type getSysConfigsTask struct {
baseTask
configkv *kv.EtcdKV
req *internalpb.SysConfigRequest
keys []string
values []string
}
func (t *getSysConfigsTask) Type() internalpb.MsgType {
if t.req == nil {
log.Printf("null request")
return 0
}
return t.req.MsgType
}
func (t *getSysConfigsTask) Ts() (Timestamp, error) {
if t.req == nil {
return 0, errors.New("null request")
}
return t.req.Timestamp, nil
}
func (t *getSysConfigsTask) Execute() error {
if t.req == nil {
return errors.New("null request")
}
sc := &SysConfig{kv: t.configkv}
keyMap := make(map[string]bool)
// Load configs with prefix
for _, prefix := range t.req.KeyPrefixes {
prefixKeys, prefixVals, err := sc.GetByPrefix(prefix)
if err != nil {
return errors.Errorf("Load configs by prefix wrong: %s", err.Error())
}
t.keys = append(t.keys, prefixKeys...)
t.values = append(t.values, prefixVals...)
}
for _, key := range t.keys {
keyMap[key] = true
}
// Load specific configs
if len(t.req.Keys) > 0 {
// To clean up duplicated keys
cleanKeys := []string{}
for _, key := range t.req.Keys {
if v, ok := keyMap[key]; (!ok) || (ok && !v) {
cleanKeys = append(cleanKeys, key)
keyMap[key] = true
continue
}
log.Println("[GetSysConfigs] Warning: duplicate key:", key)
}
v, err := sc.Get(cleanKeys)
if err != nil {
return errors.Errorf("Load configs wrong: %s", err.Error())
}
t.keys = append(t.keys, cleanKeys...)
t.values = append(t.values, v...)
}
return nil
}

View File

@ -0,0 +1,150 @@
package master
import (
"context"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
"go.etcd.io/etcd/clientv3"
"google.golang.org/grpc"
)
func TestMaster_ConfigTask(t *testing.T) {
Init()
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
etcdCli, err := clientv3.New(clientv3.Config{Endpoints: []string{Params.EtcdAddress}})
require.Nil(t, err)
_, err = etcdCli.Delete(ctx, "/test/root", clientv3.WithPrefix())
require.Nil(t, err)
Params = ParamTable{
Address: Params.Address,
Port: Params.Port,
EtcdAddress: Params.EtcdAddress,
MetaRootPath: "/test/root",
PulsarAddress: Params.PulsarAddress,
ProxyIDList: []typeutil.UniqueID{1, 2},
WriteNodeIDList: []typeutil.UniqueID{3, 4},
TopicNum: 5,
QueryNodeNum: 3,
SoftTimeTickBarrierInterval: 300,
// segment
SegmentSize: 536870912 / 1024 / 1024,
SegmentSizeFactor: 0.75,
DefaultRecordSize: 1024,
MinSegIDAssignCnt: 1048576 / 1024,
MaxSegIDAssignCnt: Params.MaxSegIDAssignCnt,
SegIDAssignExpiration: 2000,
// msgChannel
ProxyTimeTickChannelNames: []string{"proxy1", "proxy2"},
WriteNodeTimeTickChannelNames: []string{"write3", "write4"},
InsertChannelNames: []string{"dm0", "dm1"},
K2SChannelNames: []string{"k2s0", "k2s1"},
QueryNodeStatsChannelName: "statistic",
MsgChannelSubName: Params.MsgChannelSubName,
}
svr, err := CreateServer(ctx)
require.Nil(t, err)
err = svr.Run(10002)
defer svr.Close()
require.Nil(t, err)
conn, err := grpc.DialContext(ctx, "127.0.0.1:10002", grpc.WithInsecure(), grpc.WithBlock())
require.Nil(t, err)
defer conn.Close()
cli := masterpb.NewMasterClient(conn)
testKeys := []string{
"/etcd/address",
"/master/port",
"/master/proxyidlist",
"/master/segmentthresholdfactor",
"/pulsar/token",
"/reader/stopflag",
"/proxy/timezone",
"/proxy/network/address",
"/proxy/storage/path",
"/storage/accesskey",
}
testVals := []string{
"localhost",
"53100",
"[1 2]",
"0.75",
"eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJKb2UifQ.ipevRNuRP6HflG8cFKnmUPtypruRC4fb1DWtoLL62SY",
"-1",
"UTC+8",
"0.0.0.0",
"/var/lib/milvus",
"",
}
sc := SysConfig{kv: svr.kvBase}
sc.InitFromFile(".")
configRequest := &internalpb.SysConfigRequest{
MsgType: internalpb.MsgType_kGetSysConfigs,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
Keys: testKeys,
KeyPrefixes: []string{},
}
response, err := cli.GetSysConfigs(ctx, configRequest)
assert.Nil(t, err)
assert.ElementsMatch(t, testKeys, response.Keys)
assert.ElementsMatch(t, testVals, response.Values)
assert.Equal(t, len(response.GetKeys()), len(response.GetValues()))
configRequest = &internalpb.SysConfigRequest{
MsgType: internalpb.MsgType_kGetSysConfigs,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
Keys: []string{},
KeyPrefixes: []string{"/master"},
}
response, err = cli.GetSysConfigs(ctx, configRequest)
assert.Nil(t, err)
for i := range response.GetKeys() {
assert.True(t, strings.HasPrefix(response.GetKeys()[i], "/master"))
}
assert.Equal(t, len(response.GetKeys()), len(response.GetValues()))
t.Run("Test duplicate keys and key prefix", func(t *testing.T) {
configRequest.Keys = []string{}
configRequest.KeyPrefixes = []string{"/master"}
resp, err := cli.GetSysConfigs(ctx, configRequest)
require.Nil(t, err)
assert.Equal(t, len(resp.GetKeys()), len(resp.GetValues()))
assert.NotEqual(t, 0, len(resp.GetKeys()))
configRequest.Keys = []string{"/master/port"}
configRequest.KeyPrefixes = []string{"/master"}
respDup, err := cli.GetSysConfigs(ctx, configRequest)
require.Nil(t, err)
assert.Equal(t, len(respDup.GetKeys()), len(respDup.GetValues()))
assert.NotEqual(t, 0, len(respDup.GetKeys()))
assert.Equal(t, len(respDup.GetKeys()), len(resp.GetKeys()))
})
}

View File

@ -0,0 +1,111 @@
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under the License.
master: # 21
address: localhost
port: 53100
pulsarmoniterinterval: 1
pulsartopic: "monitor-topic"
proxyidlist: [1, 2]
proxyTimeSyncChannels: ["proxy1", "proxy2"]
proxyTimeSyncSubName: "proxy-topic"
softTimeTickBarrierInterval: 500
writeidlist: [3, 4]
writeTimeSyncChannels: ["write3", "write4"]
writeTimeSyncSubName: "write-topic"
dmTimeSyncChannels: ["dm5", "dm6"]
k2sTimeSyncChannels: ["k2s7", "k2s8"]
defaultSizePerRecord: 1024
minimumAssignSize: 1048576
segmentThreshold: 536870912
segmentExpireDuration: 2000
segmentThresholdFactor: 0.75
querynodenum: 1
writenodenum: 1
statsChannels: "statistic"
etcd: # 4
address: localhost
port: 2379
rootpath: by-dev
segthreshold: 10000
timesync: # 1
interval: 400
storage: # 5
driver: TIKV
address: localhost
port: 2379
accesskey:
secretkey:
pulsar: # 6
authentication: false
user: user-default
token: eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJKb2UifQ.ipevRNuRP6HflG8cFKnmUPtypruRC4fb1DWtoLL62SY
address: localhost
port: 6650
topicnum: 128
reader: # 7
clientid: 0
stopflag: -1
readerqueuesize: 10000
searchchansize: 10000
key2segchansize: 10000
topicstart: 0
topicend: 128
writer: # 8
clientid: 0
stopflag: -2
readerqueuesize: 10000
searchbyidchansize: 10000
parallelism: 100
topicstart: 0
topicend: 128
bucket: "zilliz-hz"
proxy: # 21
timezone: UTC+8
proxy_id: 1
numReaderNodes: 2
tsoSaveInterval: 200
timeTickInterval: 200
pulsarTopics:
readerTopicPrefix: "milvusReader"
numReaderTopics: 2
deleteTopic: "milvusDeleter"
queryTopic: "milvusQuery"
resultTopic: "milvusResult"
resultGroup: "milvusResultGroup"
timeTickTopic: "milvusTimeTick"
network:
address: 0.0.0.0
port: 19530
logs:
level: debug
trace.enable: true
path: /tmp/logs
max_log_file_size: 1024MB
log_rotate_num: 0
storage:
path: /var/lib/milvus
auto_flush_interval: 1

View File

@ -359,6 +359,43 @@ func (s *Master) ShowPartitions(ctx context.Context, in *internalpb.ShowPartitio
return t.(*showPartitionTask).stringListResponse, nil
}
func (s *Master) GetSysConfigs(ctx context.Context, in *internalpb.SysConfigRequest) (*servicepb.SysConfigResponse, error) {
var t task = &getSysConfigsTask{
req: in,
configkv: s.kvBase,
baseTask: baseTask{
sch: s.scheduler,
mt: s.metaTable,
cv: make(chan error),
},
keys: []string{},
values: []string{},
}
response := &servicepb.SysConfigResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
},
}
var err = s.scheduler.Enqueue(t)
if err != nil {
response.Status.Reason = "Enqueue failed: " + err.Error()
return response, nil
}
err = t.WaitToFinish(ctx)
if err != nil {
response.Status.Reason = "Get System Config failed: " + err.Error()
return response, nil
}
response.Keys = t.(*getSysConfigsTask).keys
response.Values = t.(*getSysConfigsTask).values
response.Status.ErrorCode = commonpb.ErrorCode_SUCCESS
return response, nil
}
//----------------------------------------Internal GRPC Service--------------------------------
func (s *Master) AllocTimestamp(ctx context.Context, request *internalpb.TsoRequest) (*internalpb.TsoResponse, error) {

View File

@ -10,7 +10,6 @@ import (
"sync/atomic"
"time"
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/kv"
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
@ -123,6 +122,11 @@ func CreateServer(ctx context.Context) (*Master, error) {
}
tsMsgProducer.SetWriteNodeTtBarrier(writeTimeTickBarrier)
pulsarDDStream := ms.NewPulsarMsgStream(ctx, 1024) //input stream
pulsarDDStream.SetPulsarClient(pulsarAddr)
pulsarDDStream.CreatePulsarProducers(Params.DDChannelNames)
tsMsgProducer.SetDDSyncStream(pulsarDDStream)
pulsarDMStream := ms.NewPulsarMsgStream(ctx, 1024) //input stream
pulsarDMStream.SetPulsarClient(pulsarAddr)
pulsarDMStream.CreatePulsarProducers(Params.InsertChannelNames)
@ -161,7 +165,10 @@ func CreateServer(ctx context.Context) (*Master, error) {
return nil, err
}
m.scheduler = NewDDRequestScheduler(func() (UniqueID, error) { return m.idAllocator.AllocOne() })
m.scheduler = NewDDRequestScheduler(ctx)
m.scheduler.SetDDMsgStream(pulsarDDStream)
m.scheduler.SetIDAllocator(func() (UniqueID, error) { return m.idAllocator.AllocOne() })
m.segmentMgr = NewSegmentManager(metakv,
func() (UniqueID, error) { return m.idAllocator.AllocOne() },
func() (Timestamp, error) { return m.tsoAllocator.AllocOne() },
@ -248,6 +255,11 @@ func (s *Master) startServerLoop(ctx context.Context, grpcPort int64) error {
return err
}
s.serverLoopWg.Add(1)
if err := s.scheduler.Start(); err != nil {
return err
}
s.serverLoopWg.Add(1)
go s.grpcLoop(grpcPort)
@ -255,9 +267,6 @@ func (s *Master) startServerLoop(ctx context.Context, grpcPort int64) error {
return err
}
s.serverLoopWg.Add(1)
go s.tasksExecutionLoop()
s.serverLoopWg.Add(1)
go s.segmentStatisticsLoop()
@ -270,6 +279,8 @@ func (s *Master) startServerLoop(ctx context.Context, grpcPort int64) error {
func (s *Master) stopServerLoop() {
s.timesSyncMsgProducer.Close()
s.serverLoopWg.Done()
s.scheduler.Close()
s.serverLoopWg.Done()
if s.grpcServer != nil {
s.grpcServer.GracefulStop()
@ -337,33 +348,6 @@ func (s *Master) tsLoop() {
}
}
func (s *Master) tasksExecutionLoop() {
defer s.serverLoopWg.Done()
ctx, cancel := context.WithCancel(s.serverLoopCtx)
defer cancel()
for {
select {
case task := <-s.scheduler.reqQueue:
timeStamp, err := (task).Ts()
if err != nil {
log.Println(err)
} else {
if timeStamp < s.scheduler.scheduleTimeStamp {
task.Notify(errors.Errorf("input timestamp = %d, schduler timestamp = %d", timeStamp, s.scheduler.scheduleTimeStamp))
} else {
s.scheduler.scheduleTimeStamp = timeStamp
err = task.Execute()
task.Notify(err)
}
}
case <-ctx.Done():
log.Print("server is closed, exit task execution loop")
return
}
}
}
func (s *Master) segmentStatisticsLoop() {
defer s.serverLoopWg.Done()
defer s.segmentStatusMsg.Close()

View File

@ -0,0 +1,246 @@
package master
import (
"context"
"log"
"math/rand"
"strconv"
"testing"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
"go.uber.org/zap"
"google.golang.org/grpc"
)
func receiveTimeTickMsg(stream *ms.MsgStream) bool {
for {
result := (*stream).Consume()
if len(result.Msgs) > 0 {
return true
}
}
}
func getTimeTickMsgPack(ttmsgs [][2]uint64) *ms.MsgPack {
msgPack := ms.MsgPack{}
for _, vi := range ttmsgs {
msgPack.Msgs = append(msgPack.Msgs, getTtMsg(internalPb.MsgType_kTimeTick, UniqueID(vi[0]), Timestamp(vi[1])))
}
return &msgPack
}
func TestMaster(t *testing.T) {
Init()
pulsarAddr := Params.PulsarAddress
// Creates server.
ctx, cancel := context.WithCancel(context.Background())
svr, err := CreateServer(ctx)
if err != nil {
log.Print("create server failed", zap.Error(err))
}
if err := svr.Run(int64(Params.Port)); err != nil {
log.Fatal("run server failed", zap.Error(err))
}
proxyTimeTickStream := ms.NewPulsarMsgStream(ctx, 1024) //input stream
proxyTimeTickStream.SetPulsarClient(pulsarAddr)
proxyTimeTickStream.CreatePulsarProducers(Params.ProxyTimeTickChannelNames)
proxyTimeTickStream.Start()
writeNodeStream := ms.NewPulsarMsgStream(ctx, 1024) //input stream
writeNodeStream.SetPulsarClient(pulsarAddr)
writeNodeStream.CreatePulsarProducers(Params.WriteNodeTimeTickChannelNames)
writeNodeStream.Start()
ddMs := ms.NewPulsarMsgStream(ctx, 1024)
ddMs.SetPulsarClient(pulsarAddr)
ddMs.CreatePulsarConsumers(Params.DDChannelNames, "DDStream", ms.NewUnmarshalDispatcher(), 1024)
ddMs.Start()
dMMs := ms.NewPulsarMsgStream(ctx, 1024)
dMMs.SetPulsarClient(pulsarAddr)
dMMs.CreatePulsarConsumers(Params.InsertChannelNames, "DMStream", ms.NewUnmarshalDispatcher(), 1024)
dMMs.Start()
k2sMs := ms.NewPulsarMsgStream(ctx, 1024)
k2sMs.SetPulsarClient(pulsarAddr)
k2sMs.CreatePulsarConsumers(Params.K2SChannelNames, "K2SStream", ms.NewUnmarshalDispatcher(), 1024)
k2sMs.Start()
ttsoftmsgs := [][2]uint64{
{0, 10},
}
msgSoftPackAddr := getTimeTickMsgPack(ttsoftmsgs)
proxyTimeTickStream.Produce(msgSoftPackAddr)
var dMMsgstream ms.MsgStream = dMMs
assert.True(t, receiveTimeTickMsg(&dMMsgstream))
var ddMsgstream ms.MsgStream = ddMs
assert.True(t, receiveTimeTickMsg(&ddMsgstream))
tthardmsgs := [][2]int{
{3, 10},
}
msghardPackAddr := getMsgPack(tthardmsgs)
writeNodeStream.Produce(msghardPackAddr)
var k2sMsgstream ms.MsgStream = k2sMs
assert.True(t, receiveTimeTickMsg(&k2sMsgstream))
conn, err := grpc.DialContext(ctx, "127.0.0.1:53100", grpc.WithInsecure(), grpc.WithBlock())
assert.Nil(t, err)
defer conn.Close()
cli := masterpb.NewMasterClient(conn)
sch := schemapb.CollectionSchema{
Name: "name" + strconv.FormatUint(rand.Uint64(), 10),
Description: "test collection",
AutoID: false,
Fields: []*schemapb.FieldSchema{},
}
schemaBytes, err := proto.Marshal(&sch)
assert.Nil(t, err)
createCollectionReq := internalpb.CreateCollectionRequest{
MsgType: internalpb.MsgType_kCreateCollection,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
Schema: &commonpb.Blob{Value: schemaBytes},
}
st, err := cli.CreateCollection(ctx, &createCollectionReq)
assert.Nil(t, err)
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
var consumeMsg ms.MsgStream = ddMs
var createCollectionMsg *ms.CreateCollectionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
createCollectionMsg = v.(*ms.CreateCollectionMsg)
}
break
}
}
assert.Equal(t, createCollectionReq.MsgType, createCollectionMsg.CreateCollectionRequest.MsgType)
assert.Equal(t, createCollectionReq.ReqID, createCollectionMsg.CreateCollectionRequest.ReqID)
assert.Equal(t, createCollectionReq.Timestamp, createCollectionMsg.CreateCollectionRequest.Timestamp)
assert.Equal(t, createCollectionReq.ProxyID, createCollectionMsg.CreateCollectionRequest.ProxyID)
assert.Equal(t, createCollectionReq.Schema.Value, createCollectionMsg.CreateCollectionRequest.Schema.Value)
////////////////////////////CreatePartition////////////////////////
partitionName := "partitionName" + strconv.FormatUint(rand.Uint64(), 10)
createPartitionReq := internalpb.CreatePartitionRequest{
MsgType: internalpb.MsgType_kCreatePartition,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
PartitionName: &servicepb.PartitionName{
CollectionName: sch.Name,
Tag: partitionName,
},
}
st, err = cli.CreatePartition(ctx, &createPartitionReq)
assert.Nil(t, err)
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
var createPartitionMsg *ms.CreatePartitionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
createPartitionMsg = v.(*ms.CreatePartitionMsg)
}
break
}
}
assert.Equal(t, createPartitionReq.MsgType, createPartitionMsg.CreatePartitionRequest.MsgType)
assert.Equal(t, createPartitionReq.ReqID, createPartitionMsg.CreatePartitionRequest.ReqID)
assert.Equal(t, createPartitionReq.Timestamp, createPartitionMsg.CreatePartitionRequest.Timestamp)
assert.Equal(t, createPartitionReq.ProxyID, createPartitionMsg.CreatePartitionRequest.ProxyID)
assert.Equal(t, createPartitionReq.PartitionName.CollectionName, createPartitionMsg.CreatePartitionRequest.PartitionName.CollectionName)
assert.Equal(t, createPartitionReq.PartitionName.Tag, createPartitionMsg.CreatePartitionRequest.PartitionName.Tag)
////////////////////////////DropPartition////////////////////////
dropPartitionReq := internalpb.DropPartitionRequest{
MsgType: internalpb.MsgType_kDropPartition,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
PartitionName: &servicepb.PartitionName{
CollectionName: sch.Name,
Tag: partitionName,
},
}
st, err = cli.DropPartition(ctx, &dropPartitionReq)
assert.Nil(t, err)
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
var dropPartitionMsg *ms.DropPartitionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
dropPartitionMsg = v.(*ms.DropPartitionMsg)
}
break
}
}
assert.Equal(t, dropPartitionReq.MsgType, dropPartitionMsg.DropPartitionRequest.MsgType)
assert.Equal(t, dropPartitionReq.ReqID, dropPartitionMsg.DropPartitionRequest.ReqID)
assert.Equal(t, dropPartitionReq.Timestamp, dropPartitionMsg.DropPartitionRequest.Timestamp)
assert.Equal(t, dropPartitionReq.ProxyID, dropPartitionMsg.DropPartitionRequest.ProxyID)
assert.Equal(t, dropPartitionReq.PartitionName.CollectionName, dropPartitionMsg.DropPartitionRequest.PartitionName.CollectionName)
////////////////////////////DropCollection////////////////////////
dropCollectionReq := internalpb.DropCollectionRequest{
MsgType: internalpb.MsgType_kDropCollection,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
CollectionName: &servicepb.CollectionName{CollectionName: sch.Name},
}
st, err = cli.DropCollection(ctx, &dropCollectionReq)
assert.Nil(t, err)
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
var dropCollectionMsg *ms.DropCollectionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
dropCollectionMsg = v.(*ms.DropCollectionMsg)
}
break
}
}
assert.Equal(t, dropCollectionReq.MsgType, dropCollectionMsg.DropCollectionRequest.MsgType)
assert.Equal(t, dropCollectionReq.ReqID, dropCollectionMsg.DropCollectionRequest.ReqID)
assert.Equal(t, dropCollectionReq.Timestamp, dropCollectionMsg.DropCollectionRequest.Timestamp)
assert.Equal(t, dropCollectionReq.ProxyID, dropCollectionMsg.DropCollectionRequest.ProxyID)
assert.Equal(t, dropCollectionReq.CollectionName.CollectionName, dropCollectionMsg.DropCollectionRequest.CollectionName.CollectionName)
cancel()
svr.Close()
}

View File

@ -40,6 +40,7 @@ type ParamTable struct {
// msgChannel
ProxyTimeTickChannelNames []string
WriteNodeTimeTickChannelNames []string
DDChannelNames []string
InsertChannelNames []string
K2SChannelNames []string
QueryNodeStatsChannelName string
@ -97,6 +98,7 @@ func (p *ParamTable) Init() {
p.initProxyTimeTickChannelNames()
p.initWriteNodeTimeTickChannelNames()
p.initInsertChannelNames()
p.initDDChannelNames()
p.initK2SChannelNames()
p.initQueryNodeStatsChannelName()
p.initMsgChannelSubName()
@ -382,6 +384,27 @@ func (p *ParamTable) initWriteNodeTimeTickChannelNames() {
p.WriteNodeTimeTickChannelNames = channels
}
func (p *ParamTable) initDDChannelNames() {
ch, err := p.Load("msgChannel.chanNamePrefix.dataDefinition")
if err != nil {
log.Fatal(err)
}
id, err := p.Load("nodeID.queryNodeIDList")
if err != nil {
log.Panicf("load query node id list error, %s", err.Error())
}
ids := strings.Split(id, ",")
channels := make([]string, 0, len(ids))
for _, i := range ids {
_, err := strconv.ParseInt(i, 10, 64)
if err != nil {
log.Panicf("load query node id list error, %s", err.Error())
}
channels = append(channels, ch+"-"+i)
}
p.DDChannelNames = channels
}
func (p *ParamTable) initInsertChannelNames() {
ch, err := p.Load("msgChannel.chanNamePrefix.insert")
if err != nil {

View File

@ -4,6 +4,7 @@ import (
"errors"
"log"
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
@ -66,7 +67,30 @@ func (t *createPartitionTask) Execute() error {
if err != nil {
return err
}
return t.mt.AddPartition(collectionMeta.ID, partitionName.Tag)
ts, err := t.Ts()
if err != nil {
return err
}
err = t.mt.AddPartition(collectionMeta.ID, partitionName.Tag)
if err != nil {
return err
}
msgPack := ms.MsgPack{}
baseMsg := ms.BaseMsg{
BeginTimestamp: ts,
EndTimestamp: ts,
HashValues: []uint32{0},
}
timeTickMsg := &ms.CreatePartitionMsg{
BaseMsg: baseMsg,
CreatePartitionRequest: *t.req,
}
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
return t.sch.ddMsgStream.Broadcast(&msgPack)
}
//////////////////////////////////////////////////////////////////////////
@ -98,7 +122,29 @@ func (t *dropPartitionTask) Execute() error {
return err
}
return t.mt.DeletePartition(collectionMeta.ID, partitionName.Tag)
err = t.mt.DeletePartition(collectionMeta.ID, partitionName.Tag)
if err != nil {
return err
}
ts, err := t.Ts()
if err != nil {
return err
}
msgPack := ms.MsgPack{}
baseMsg := ms.BaseMsg{
BeginTimestamp: ts,
EndTimestamp: ts,
HashValues: []uint32{0},
}
timeTickMsg := &ms.DropPartitionMsg{
BaseMsg: baseMsg,
DropPartitionRequest: *t.req,
}
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
return t.sch.ddMsgStream.Broadcast(&msgPack)
}
//////////////////////////////////////////////////////////////////////////
@ -132,6 +178,7 @@ func (t *hasPartitionTask) Execute() error {
t.hasPartition = t.mt.HasPartition(collectionMeta.ID, partitionName.Tag)
return nil
}
//////////////////////////////////////////////////////////////////////////
@ -168,6 +215,7 @@ func (t *describePartitionTask) Execute() error {
t.description = &description
return nil
}
//////////////////////////////////////////////////////////////////////////
@ -208,4 +256,5 @@ func (t *showPartitionTask) Execute() error {
t.stringListResponse = &stringListResponse
return nil
}

View File

@ -1,17 +1,36 @@
package master
import (
"context"
"log"
"github.com/zilliztech/milvus-distributed/internal/errors"
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
)
//type ddRequestScheduler interface {}
//type ddReqFIFOScheduler struct {}
type ddRequestScheduler struct {
ctx context.Context
cancel context.CancelFunc
globalIDAllocator func() (UniqueID, error)
reqQueue chan task
scheduleTimeStamp Timestamp
ddMsgStream ms.MsgStream
}
func NewDDRequestScheduler(allocGlobalID func() (UniqueID, error)) *ddRequestScheduler {
func NewDDRequestScheduler(ctx context.Context) *ddRequestScheduler {
const channelSize = 1024
ctx2, cancel := context.WithCancel(ctx)
rs := ddRequestScheduler{
globalIDAllocator: allocGlobalID,
reqQueue: make(chan task, channelSize),
ctx: ctx2,
cancel: cancel,
reqQueue: make(chan task, channelSize),
}
return &rs
}
@ -20,3 +39,51 @@ func (rs *ddRequestScheduler) Enqueue(task task) error {
rs.reqQueue <- task
return nil
}
func (rs *ddRequestScheduler) SetIDAllocator(allocGlobalID func() (UniqueID, error)) {
rs.globalIDAllocator = allocGlobalID
}
func (rs *ddRequestScheduler) SetDDMsgStream(ddStream ms.MsgStream) {
rs.ddMsgStream = ddStream
}
func (rs *ddRequestScheduler) scheduleLoop() {
for {
select {
case task := <-rs.reqQueue:
err := rs.schedule(task)
if err != nil {
log.Println(err)
}
case <-rs.ctx.Done():
log.Print("server is closed, exit task execution loop")
return
}
}
}
func (rs *ddRequestScheduler) schedule(t task) error {
timeStamp, err := t.Ts()
if err != nil {
log.Println(err)
return err
}
if timeStamp < rs.scheduleTimeStamp {
t.Notify(errors.Errorf("input timestamp = %d, schduler timestamp = %d", timeStamp, rs.scheduleTimeStamp))
} else {
rs.scheduleTimeStamp = timeStamp
err = t.Execute()
t.Notify(err)
}
return nil
}
func (rs *ddRequestScheduler) Start() error {
go rs.scheduleLoop()
return nil
}
func (rs *ddRequestScheduler) Close() {
rs.cancel()
}

View File

@ -0,0 +1,342 @@
package master
import (
"context"
"math/rand"
"strconv"
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/zilliztech/milvus-distributed/internal/kv"
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
"go.etcd.io/etcd/clientv3"
)
func TestMaster_Scheduler_Collection(t *testing.T) {
Init()
etcdAddress := Params.EtcdAddress
kvRootPath := Params.MetaRootPath
pulsarAddr := Params.PulsarAddress
producerChannels := []string{"ddstream"}
consumerChannels := []string{"ddstream"}
consumerSubName := "substream"
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddress}})
assert.Nil(t, err)
etcdKV := kv.NewEtcdKV(cli, "/etcd/test/root")
meta, err := NewMetaTable(etcdKV)
assert.Nil(t, err)
defer meta.client.Close()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
pulsarDDStream := ms.NewPulsarMsgStream(ctx, 1024) //input stream
pulsarDDStream.SetPulsarClient(pulsarAddr)
pulsarDDStream.CreatePulsarProducers(producerChannels)
pulsarDDStream.Start()
defer pulsarDDStream.Close()
consumeMs := ms.NewPulsarMsgStream(ctx, 1024)
consumeMs.SetPulsarClient(pulsarAddr)
consumeMs.CreatePulsarConsumers(consumerChannels, consumerSubName, ms.NewUnmarshalDispatcher(), 1024)
consumeMs.Start()
defer consumeMs.Close()
idAllocator := NewGlobalIDAllocator("idTimestamp", tsoutil.NewTSOKVBase([]string{etcdAddress}, kvRootPath, "gid"))
err = idAllocator.Initialize()
assert.Nil(t, err)
scheduler := NewDDRequestScheduler(ctx)
scheduler.SetDDMsgStream(pulsarDDStream)
scheduler.SetIDAllocator(func() (UniqueID, error) { return idAllocator.AllocOne() })
scheduler.Start()
defer scheduler.Close()
rand.Seed(time.Now().Unix())
sch := schemapb.CollectionSchema{
Name: "name" + strconv.FormatUint(rand.Uint64(), 10),
Description: "string",
AutoID: true,
Fields: nil,
}
schemaBytes, err := proto.Marshal(&sch)
assert.Nil(t, err)
////////////////////////////CreateCollection////////////////////////
createCollectionReq := internalpb.CreateCollectionRequest{
MsgType: internalpb.MsgType_kCreateCollection,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
Schema: &commonpb.Blob{Value: schemaBytes},
}
var createCollectionTask task = &createCollectionTask{
req: &createCollectionReq,
baseTask: baseTask{
sch: scheduler,
mt: meta,
cv: make(chan error),
},
}
err = scheduler.Enqueue(createCollectionTask)
assert.Nil(t, err)
err = createCollectionTask.WaitToFinish(ctx)
assert.Nil(t, err)
var consumeMsg ms.MsgStream = consumeMs
var createCollectionMsg *ms.CreateCollectionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
createCollectionMsg = v.(*ms.CreateCollectionMsg)
}
break
}
}
assert.Equal(t, createCollectionReq.MsgType, createCollectionMsg.CreateCollectionRequest.MsgType)
assert.Equal(t, createCollectionReq.ReqID, createCollectionMsg.CreateCollectionRequest.ReqID)
assert.Equal(t, createCollectionReq.Timestamp, createCollectionMsg.CreateCollectionRequest.Timestamp)
assert.Equal(t, createCollectionReq.ProxyID, createCollectionMsg.CreateCollectionRequest.ProxyID)
assert.Equal(t, createCollectionReq.Schema.Value, createCollectionMsg.CreateCollectionRequest.Schema.Value)
////////////////////////////DropCollection////////////////////////
dropCollectionReq := internalpb.DropCollectionRequest{
MsgType: internalpb.MsgType_kDropCollection,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
CollectionName: &servicepb.CollectionName{CollectionName: sch.Name},
}
var dropCollectionTask task = &dropCollectionTask{
req: &dropCollectionReq,
baseTask: baseTask{
sch: scheduler,
mt: meta,
cv: make(chan error),
},
}
err = scheduler.Enqueue(dropCollectionTask)
assert.Nil(t, err)
err = dropCollectionTask.WaitToFinish(ctx)
assert.Nil(t, err)
var dropCollectionMsg *ms.DropCollectionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
dropCollectionMsg = v.(*ms.DropCollectionMsg)
}
break
}
}
assert.Equal(t, dropCollectionReq.MsgType, dropCollectionMsg.DropCollectionRequest.MsgType)
assert.Equal(t, dropCollectionReq.ReqID, dropCollectionMsg.DropCollectionRequest.ReqID)
assert.Equal(t, dropCollectionReq.Timestamp, dropCollectionMsg.DropCollectionRequest.Timestamp)
assert.Equal(t, dropCollectionReq.ProxyID, dropCollectionMsg.DropCollectionRequest.ProxyID)
assert.Equal(t, dropCollectionReq.CollectionName.CollectionName, dropCollectionMsg.DropCollectionRequest.CollectionName.CollectionName)
}
func TestMaster_Scheduler_Partition(t *testing.T) {
Init()
etcdAddress := Params.EtcdAddress
kvRootPath := Params.MetaRootPath
pulsarAddr := Params.PulsarAddress
producerChannels := []string{"ddstream"}
consumerChannels := []string{"ddstream"}
consumerSubName := "substream"
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddress}})
assert.Nil(t, err)
etcdKV := kv.NewEtcdKV(cli, "/etcd/test/root")
meta, err := NewMetaTable(etcdKV)
assert.Nil(t, err)
defer meta.client.Close()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
pulsarDDStream := ms.NewPulsarMsgStream(ctx, 1024) //input stream
pulsarDDStream.SetPulsarClient(pulsarAddr)
pulsarDDStream.CreatePulsarProducers(producerChannels)
pulsarDDStream.Start()
defer pulsarDDStream.Close()
consumeMs := ms.NewPulsarMsgStream(ctx, 1024)
consumeMs.SetPulsarClient(pulsarAddr)
consumeMs.CreatePulsarConsumers(consumerChannels, consumerSubName, ms.NewUnmarshalDispatcher(), 1024)
consumeMs.Start()
defer consumeMs.Close()
idAllocator := NewGlobalIDAllocator("idTimestamp", tsoutil.NewTSOKVBase([]string{etcdAddress}, kvRootPath, "gid"))
err = idAllocator.Initialize()
assert.Nil(t, err)
scheduler := NewDDRequestScheduler(ctx)
scheduler.SetDDMsgStream(pulsarDDStream)
scheduler.SetIDAllocator(func() (UniqueID, error) { return idAllocator.AllocOne() })
scheduler.Start()
defer scheduler.Close()
rand.Seed(time.Now().Unix())
sch := schemapb.CollectionSchema{
Name: "name" + strconv.FormatUint(rand.Uint64(), 10),
Description: "string",
AutoID: true,
Fields: nil,
}
schemaBytes, err := proto.Marshal(&sch)
assert.Nil(t, err)
////////////////////////////CreateCollection////////////////////////
createCollectionReq := internalpb.CreateCollectionRequest{
MsgType: internalpb.MsgType_kCreateCollection,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
Schema: &commonpb.Blob{Value: schemaBytes},
}
var createCollectionTask task = &createCollectionTask{
req: &createCollectionReq,
baseTask: baseTask{
sch: scheduler,
mt: meta,
cv: make(chan error),
},
}
err = scheduler.Enqueue(createCollectionTask)
assert.Nil(t, err)
err = createCollectionTask.WaitToFinish(ctx)
assert.Nil(t, err)
var consumeMsg ms.MsgStream = consumeMs
var createCollectionMsg *ms.CreateCollectionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
createCollectionMsg = v.(*ms.CreateCollectionMsg)
}
break
}
}
assert.Equal(t, createCollectionReq.MsgType, createCollectionMsg.CreateCollectionRequest.MsgType)
assert.Equal(t, createCollectionReq.ReqID, createCollectionMsg.CreateCollectionRequest.ReqID)
assert.Equal(t, createCollectionReq.Timestamp, createCollectionMsg.CreateCollectionRequest.Timestamp)
assert.Equal(t, createCollectionReq.ProxyID, createCollectionMsg.CreateCollectionRequest.ProxyID)
assert.Equal(t, createCollectionReq.Schema.Value, createCollectionMsg.CreateCollectionRequest.Schema.Value)
////////////////////////////CreatePartition////////////////////////
partitionName := "partitionName" + strconv.FormatUint(rand.Uint64(), 10)
createPartitionReq := internalpb.CreatePartitionRequest{
MsgType: internalpb.MsgType_kCreatePartition,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
PartitionName: &servicepb.PartitionName{
CollectionName: sch.Name,
Tag: partitionName,
},
}
var createPartitionTask task = &createPartitionTask{
req: &createPartitionReq,
baseTask: baseTask{
sch: scheduler,
mt: meta,
cv: make(chan error),
},
}
err = scheduler.Enqueue(createPartitionTask)
assert.Nil(t, err)
err = createPartitionTask.WaitToFinish(ctx)
assert.Nil(t, err)
var createPartitionMsg *ms.CreatePartitionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
createPartitionMsg = v.(*ms.CreatePartitionMsg)
}
break
}
}
assert.Equal(t, createPartitionReq.MsgType, createPartitionMsg.CreatePartitionRequest.MsgType)
assert.Equal(t, createPartitionReq.ReqID, createPartitionMsg.CreatePartitionRequest.ReqID)
assert.Equal(t, createPartitionReq.Timestamp, createPartitionMsg.CreatePartitionRequest.Timestamp)
assert.Equal(t, createPartitionReq.ProxyID, createPartitionMsg.CreatePartitionRequest.ProxyID)
assert.Equal(t, createPartitionReq.PartitionName.CollectionName, createPartitionMsg.CreatePartitionRequest.PartitionName.CollectionName)
assert.Equal(t, createPartitionReq.PartitionName.Tag, createPartitionMsg.CreatePartitionRequest.PartitionName.Tag)
////////////////////////////DropPartition////////////////////////
dropPartitionReq := internalpb.DropPartitionRequest{
MsgType: internalpb.MsgType_kDropPartition,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
PartitionName: &servicepb.PartitionName{
CollectionName: sch.Name,
Tag: partitionName,
},
}
var dropPartitionTask task = &dropPartitionTask{
req: &dropPartitionReq,
baseTask: baseTask{
sch: scheduler,
mt: meta,
cv: make(chan error),
},
}
err = scheduler.Enqueue(dropPartitionTask)
assert.Nil(t, err)
err = dropPartitionTask.WaitToFinish(ctx)
assert.Nil(t, err)
var dropPartitionMsg *ms.DropPartitionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
dropPartitionMsg = v.(*ms.DropPartitionMsg)
}
break
}
}
assert.Equal(t, dropPartitionReq.MsgType, dropPartitionMsg.DropPartitionRequest.MsgType)
assert.Equal(t, dropPartitionReq.ReqID, dropPartitionMsg.DropPartitionRequest.ReqID)
assert.Equal(t, dropPartitionReq.Timestamp, dropPartitionMsg.DropPartitionRequest.Timestamp)
assert.Equal(t, dropPartitionReq.ProxyID, dropPartitionMsg.DropPartitionRequest.ProxyID)
assert.Equal(t, dropPartitionReq.PartitionName.CollectionName, dropPartitionMsg.DropPartitionRequest.PartitionName.CollectionName)
}

View File

@ -0,0 +1,117 @@
package master
import (
"fmt"
"log"
"os"
"path"
"path/filepath"
"strings"
"github.com/spf13/viper"
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/kv"
)
type SysConfig struct {
kv *kv.EtcdKV
}
// Initialize Configs from config files, and store them in Etcd.
func (conf *SysConfig) InitFromFile(filePath string) error {
memConfigs, err := conf.getConfigFiles(filePath)
if err != nil {
return errors.Errorf("[Init SysConfig] %s\n", err.Error())
}
for _, memConfig := range memConfigs {
if err := conf.saveToEtcd(memConfig, "config"); err != nil {
return errors.Errorf("[Init SysConfig] %s\n", err.Error())
}
}
return nil
}
func (conf *SysConfig) GetByPrefix(keyPrefix string) (keys []string, values []string, err error) {
realPrefix := path.Join("config", strings.ToLower(keyPrefix))
keys, values, err = conf.kv.LoadWithPrefix(realPrefix)
for index := range keys {
keys[index] = strings.Replace(keys[index], conf.kv.GetPath("config"), "", 1)
}
if err != nil {
return nil, nil, err
}
log.Println("Loaded", len(keys), "pairs of configs with prefix", keyPrefix)
return keys, values, err
}
// Get specific configs for keys.
func (conf *SysConfig) Get(keys []string) ([]string, error) {
var keysToLoad []string
for i := range keys {
keysToLoad = append(keysToLoad, path.Join("config", strings.ToLower(keys[i])))
}
values, err := conf.kv.MultiLoad(keysToLoad)
if err != nil {
return nil, err
}
return values, nil
}
func (conf *SysConfig) getConfigFiles(filePath string) ([]*viper.Viper, error) {
var vipers []*viper.Viper
err := filepath.Walk(filePath,
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// all names
if !info.IsDir() && filepath.Ext(path) == ".yaml" {
log.Println("Config files ", info.Name())
currentConf := viper.New()
currentConf.SetConfigFile(path)
if err := currentConf.ReadInConfig(); err != nil {
log.Panic("Config file error: ", err)
}
vipers = append(vipers, currentConf)
}
return nil
})
if err != nil {
return nil, err
}
if len(vipers) == 0 {
return nil, errors.Errorf("There are no config files in the path `%s`.\n", filePath)
}
return vipers, nil
}
func (conf *SysConfig) saveToEtcd(memConfig *viper.Viper, secondRootPath string) error {
configMaps := map[string]string{}
allKeys := memConfig.AllKeys()
for _, key := range allKeys {
etcdKey := strings.ReplaceAll(key, ".", "/")
etcdKey = path.Join(secondRootPath, etcdKey)
val := memConfig.Get(key)
if val == nil {
configMaps[etcdKey] = ""
continue
}
configMaps[etcdKey] = fmt.Sprintf("%v", val)
}
if err := conf.kv.MultiSave(configMaps); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,209 @@
package master
import (
"context"
"fmt"
"log"
"path"
"strings"
"testing"
"time"
"github.com/zilliztech/milvus-distributed/internal/kv"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.etcd.io/etcd/clientv3"
)
func Test_SysConfig(t *testing.T) {
Init()
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
cli, err := clientv3.New(clientv3.Config{
Endpoints: []string{Params.EtcdAddress},
DialTimeout: 5 * time.Second,
})
require.Nil(t, err)
_, err = cli.Delete(ctx, "/test/root", clientv3.WithPrefix())
require.Nil(t, err)
rootPath := "/test/root"
configKV := kv.NewEtcdKV(cli, rootPath)
defer configKV.Close()
sc := SysConfig{kv: configKV}
require.Equal(t, rootPath, sc.kv.GetPath("."))
t.Run("tests on contig_test.yaml", func(t *testing.T) {
err = sc.InitFromFile(".")
require.Nil(t, err)
testKeys := []string{
"/etcd/address",
"/master/port",
"/master/proxyidlist",
"/master/segmentthresholdfactor",
"/pulsar/token",
"/reader/stopflag",
"/proxy/timezone",
"/proxy/network/address",
"/proxy/storage/path",
"/storage/accesskey",
}
testVals := []string{
"localhost",
"53100",
"[1 2]",
"0.75",
"eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJKb2UifQ.ipevRNuRP6HflG8cFKnmUPtypruRC4fb1DWtoLL62SY",
"-1",
"UTC+8",
"0.0.0.0",
"/var/lib/milvus",
"",
}
vals, err := sc.Get(testKeys)
assert.Nil(t, err)
for i := range testVals {
assert.Equal(t, testVals[i], vals[i])
}
keys, vals, err := sc.GetByPrefix("/master")
assert.Nil(t, err)
for i := range keys {
assert.True(t, strings.HasPrefix(keys[i], "/master/"))
}
assert.Equal(t, len(keys), len(vals))
assert.Equal(t, 21, len(keys))
// Test get all configs
keys, vals, err = sc.GetByPrefix("/")
assert.Nil(t, err)
assert.Equal(t, len(keys), len(vals))
assert.Equal(t, 73, len(vals))
// Test get configs with prefix not exist
keys, vals, err = sc.GetByPrefix("/config")
assert.Nil(t, err)
assert.Equal(t, len(keys), len(vals))
assert.Equal(t, 0, len(keys))
assert.Equal(t, 0, len(vals))
_, _, err = sc.GetByPrefix("//././../../../../../..//../")
assert.Nil(t, err)
_, _, err = sc.GetByPrefix("/master/./address")
assert.Nil(t, err)
_, _, err = sc.GetByPrefix(".")
assert.Nil(t, err)
_, _, err = sc.GetByPrefix("\\")
assert.Nil(t, err)
})
t.Run("getConfigFiles", func(t *testing.T) {
filePath := "../../configs"
vipers, err := sc.getConfigFiles(filePath)
assert.Nil(t, err)
assert.NotNil(t, vipers[0])
filePath = "/path/not/exists"
_, err = sc.getConfigFiles(filePath)
assert.NotNil(t, err)
log.Println(err)
})
t.Run("Test saveToEtcd Normal", func(t *testing.T) {
_, err = cli.Delete(ctx, "/test/root/config", clientv3.WithPrefix())
require.Nil(t, err)
v := viper.New()
v.Set("a.suba1", "v1")
v.Set("a.suba2", "v2")
v.Set("a.suba3.subsuba1", "v3")
v.Set("a.suba3.subsuba2", "v4")
secondRootPath := "config"
err := sc.saveToEtcd(v, secondRootPath)
assert.Nil(t, err)
value, err := sc.kv.Load(path.Join(secondRootPath, "a/suba1"))
assert.Nil(t, err)
assert.Equal(t, "v1", value)
value, err = sc.kv.Load(path.Join(secondRootPath, "a/suba2"))
assert.Nil(t, err)
assert.Equal(t, "v2", value)
value, err = sc.kv.Load(path.Join(secondRootPath, "a/suba3/subsuba1"))
assert.Nil(t, err)
assert.Equal(t, "v3", value)
value, err = sc.kv.Load(path.Join(secondRootPath, "a/suba3/subsuba2"))
assert.Nil(t, err)
assert.Equal(t, "v4", value)
keys, values, err := sc.kv.LoadWithPrefix(path.Join(secondRootPath, "a"))
assert.Nil(t, err)
assert.Equal(t, 4, len(keys))
assert.Equal(t, 4, len(values))
assert.ElementsMatch(t, []string{
path.Join(sc.kv.GetPath(secondRootPath), "/a/suba1"),
path.Join(sc.kv.GetPath(secondRootPath), "/a/suba2"),
path.Join(sc.kv.GetPath(secondRootPath), "/a/suba3/subsuba1"),
path.Join(sc.kv.GetPath(secondRootPath), "/a/suba3/subsuba2"),
}, keys)
assert.ElementsMatch(t, []string{"v1", "v2", "v3", "v4"}, values)
keys = []string{
"/a/suba1",
"/a/suba2",
"/a/suba3/subsuba1",
"/a/suba3/subsuba2",
}
values, err = sc.Get(keys)
assert.Nil(t, err)
assert.ElementsMatch(t, []string{"v1", "v2", "v3", "v4"}, values)
keysAfter, values, err := sc.GetByPrefix("/a")
fmt.Println(keysAfter)
assert.Nil(t, err)
assert.ElementsMatch(t, []string{"v1", "v2", "v3", "v4"}, values)
assert.ElementsMatch(t, keys, keysAfter)
})
t.Run("Test saveToEtcd Different value types", func(t *testing.T) {
v := viper.New()
v.Set("string", "string")
v.Set("number", 1)
v.Set("nil", nil)
v.Set("float", 1.2)
v.Set("intslice", []int{100, 200})
v.Set("stringslice", []string{"a", "b"})
v.Set("stringmapstring", map[string]string{"k1": "1", "k2": "2"})
secondRootPath := "test_save_to_etcd_different_value_types"
err := sc.saveToEtcd(v, secondRootPath)
require.Nil(t, err)
keys, values, err := sc.kv.LoadWithPrefix(path.Join("/", secondRootPath))
assert.Nil(t, err)
assert.Equal(t, 7, len(keys))
assert.Equal(t, 7, len(values))
assert.ElementsMatch(t, []string{
path.Join(sc.kv.GetPath(secondRootPath), "nil"),
path.Join(sc.kv.GetPath(secondRootPath), "string"),
path.Join(sc.kv.GetPath(secondRootPath), "number"),
path.Join(sc.kv.GetPath(secondRootPath), "float"),
path.Join(sc.kv.GetPath(secondRootPath), "intslice"),
path.Join(sc.kv.GetPath(secondRootPath), "stringslice"),
path.Join(sc.kv.GetPath(secondRootPath), "stringmapstring"),
}, keys)
assert.ElementsMatch(t, []string{"", "string", "1", "1.2", "[100 200]", "[a b]", "map[k1:1 k2:2]"}, values)
})
}

View File

@ -81,13 +81,18 @@ func receiveMsg(stream *ms.MsgStream) []uint64 {
func TestStream_PulsarMsgStream_TimeTick(t *testing.T) {
Init()
pulsarAddress := Params.PulsarAddress
producerChannels := []string{"proxyTtBarrier"}
consumerChannels := []string{"proxyTtBarrier"}
consumerSubName := "proxyTtBarrier"
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
proxyTtInputStream, proxyTtOutputStream := initTestPulsarStream(ctx, pulsarAddress, producerChannels, consumerChannels, consumerSubName)
producerChannels := []string{"proxyDMTtBarrier"}
consumerChannels := []string{"proxyDMTtBarrier"}
consumerSubName := "proxyDMTtBarrier"
proxyDMTtInputStream, proxyDMTtOutputStream := initTestPulsarStream(ctx, pulsarAddress, producerChannels, consumerChannels, consumerSubName)
producerChannels = []string{"proxyDDTtBarrier"}
consumerChannels = []string{"proxyDDTtBarrier"}
consumerSubName = "proxyDDTtBarrier"
proxyDDTtInputStream, proxyDDTtOutputStream := initTestPulsarStream(ctx, pulsarAddress, producerChannels, consumerChannels, consumerSubName)
producerChannels = []string{"writeNodeBarrier"}
consumerChannels = []string{"writeNodeBarrier"}
@ -97,14 +102,20 @@ func TestStream_PulsarMsgStream_TimeTick(t *testing.T) {
timeSyncProducer, _ := NewTimeSyncMsgProducer(ctx)
timeSyncProducer.SetProxyTtBarrier(&TestTickBarrier{ctx: ctx})
timeSyncProducer.SetWriteNodeTtBarrier(&TestTickBarrier{ctx: ctx})
timeSyncProducer.SetDMSyncStream(*proxyTtInputStream)
timeSyncProducer.SetDMSyncStream(*proxyDMTtInputStream)
timeSyncProducer.SetDDSyncStream(*proxyDDTtInputStream)
timeSyncProducer.SetK2sSyncStream(*writeNodeInputStream)
(*proxyTtOutputStream).Start()
(*proxyDMTtOutputStream).Start()
(*proxyDDTtOutputStream).Start()
(*writeNodeOutputStream).Start()
timeSyncProducer.Start()
expected := []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
result1 := receiveMsg(proxyTtOutputStream)
result1 := receiveMsg(proxyDMTtOutputStream)
assert.Equal(t, expected, result1)
expected = []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
result1 = receiveMsg(proxyDDTtOutputStream)
assert.Equal(t, expected, result1)
expected = []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
result2 := receiveMsg(writeNodeOutputStream)
assert.Equal(t, expected, result2)

View File

@ -15,7 +15,8 @@ type timeSyncMsgProducer struct {
//hardTimeTickBarrier
writeNodeTtBarrier TimeTickBarrier
dmSyncStream ms.MsgStream // insert & delete
ddSyncStream ms.MsgStream // insert & delete
dmSyncStream ms.MsgStream
k2sSyncStream ms.MsgStream
ctx context.Context
@ -34,6 +35,9 @@ func (syncMsgProducer *timeSyncMsgProducer) SetProxyTtBarrier(proxyTtBarrier Tim
func (syncMsgProducer *timeSyncMsgProducer) SetWriteNodeTtBarrier(writeNodeTtBarrier TimeTickBarrier) {
syncMsgProducer.writeNodeTtBarrier = writeNodeTtBarrier
}
func (syncMsgProducer *timeSyncMsgProducer) SetDDSyncStream(ddSync ms.MsgStream) {
syncMsgProducer.ddSyncStream = ddSync
}
func (syncMsgProducer *timeSyncMsgProducer) SetDMSyncStream(dmSync ms.MsgStream) {
syncMsgProducer.dmSyncStream = dmSync
@ -43,7 +47,7 @@ func (syncMsgProducer *timeSyncMsgProducer) SetK2sSyncStream(k2sSync ms.MsgStrea
syncMsgProducer.k2sSyncStream = k2sSync
}
func (syncMsgProducer *timeSyncMsgProducer) broadcastMsg(barrier TimeTickBarrier, stream ms.MsgStream) error {
func (syncMsgProducer *timeSyncMsgProducer) broadcastMsg(barrier TimeTickBarrier, streams []ms.MsgStream) error {
for {
select {
case <-syncMsgProducer.ctx.Done():
@ -72,7 +76,9 @@ func (syncMsgProducer *timeSyncMsgProducer) broadcastMsg(barrier TimeTickBarrier
TimeTickMsg: timeTickResult,
}
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
err = stream.Broadcast(&msgPack)
for _, stream := range streams {
err = stream.Broadcast(&msgPack)
}
if err != nil {
return err
}
@ -91,16 +97,17 @@ func (syncMsgProducer *timeSyncMsgProducer) Start() error {
return err
}
go syncMsgProducer.broadcastMsg(syncMsgProducer.proxyTtBarrier, syncMsgProducer.dmSyncStream)
go syncMsgProducer.broadcastMsg(syncMsgProducer.writeNodeTtBarrier, syncMsgProducer.k2sSyncStream)
go syncMsgProducer.broadcastMsg(syncMsgProducer.proxyTtBarrier, []ms.MsgStream{syncMsgProducer.dmSyncStream, syncMsgProducer.ddSyncStream})
go syncMsgProducer.broadcastMsg(syncMsgProducer.writeNodeTtBarrier, []ms.MsgStream{syncMsgProducer.k2sSyncStream})
return nil
}
func (syncMsgProducer *timeSyncMsgProducer) Close() {
syncMsgProducer.proxyTtBarrier.Close()
syncMsgProducer.writeNodeTtBarrier.Close()
syncMsgProducer.ddSyncStream.Close()
syncMsgProducer.dmSyncStream.Close()
syncMsgProducer.k2sSyncStream.Close()
syncMsgProducer.cancel()
syncMsgProducer.proxyTtBarrier.Close()
syncMsgProducer.writeNodeTtBarrier.Close()
}

593
internal/msgstream/msg.go Normal file
View File

@ -0,0 +1,593 @@
package msgstream
import (
"github.com/golang/protobuf/proto"
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
)
type MsgType = internalPb.MsgType
type TsMsg interface {
BeginTs() Timestamp
EndTs() Timestamp
Type() MsgType
HashKeys() []uint32
Marshal(TsMsg) ([]byte, error)
Unmarshal([]byte) (TsMsg, error)
}
type BaseMsg struct {
BeginTimestamp Timestamp
EndTimestamp Timestamp
HashValues []uint32
}
func (bm *BaseMsg) BeginTs() Timestamp {
return bm.BeginTimestamp
}
func (bm *BaseMsg) EndTs() Timestamp {
return bm.EndTimestamp
}
func (bm *BaseMsg) HashKeys() []uint32 {
return bm.HashValues
}
/////////////////////////////////////////Insert//////////////////////////////////////////
type InsertMsg struct {
BaseMsg
internalPb.InsertRequest
}
func (it *InsertMsg) Type() MsgType {
return it.MsgType
}
func (it *InsertMsg) Marshal(input TsMsg) ([]byte, error) {
insertMsg := input.(*InsertMsg)
insertRequest := &insertMsg.InsertRequest
mb, err := proto.Marshal(insertRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (it *InsertMsg) Unmarshal(input []byte) (TsMsg, error) {
insertRequest := internalPb.InsertRequest{}
err := proto.Unmarshal(input, &insertRequest)
if err != nil {
return nil, err
}
insertMsg := &InsertMsg{InsertRequest: insertRequest}
for _, timestamp := range insertMsg.Timestamps {
insertMsg.BeginTimestamp = timestamp
insertMsg.EndTimestamp = timestamp
break
}
for _, timestamp := range insertMsg.Timestamps {
if timestamp > insertMsg.EndTimestamp {
insertMsg.EndTimestamp = timestamp
}
if timestamp < insertMsg.BeginTimestamp {
insertMsg.BeginTimestamp = timestamp
}
}
return insertMsg, nil
}
/////////////////////////////////////////Delete//////////////////////////////////////////
type DeleteMsg struct {
BaseMsg
internalPb.DeleteRequest
}
func (dt *DeleteMsg) Type() MsgType {
return dt.MsgType
}
func (dt *DeleteMsg) Marshal(input TsMsg) ([]byte, error) {
deleteTask := input.(*DeleteMsg)
deleteRequest := &deleteTask.DeleteRequest
mb, err := proto.Marshal(deleteRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (dt *DeleteMsg) Unmarshal(input []byte) (TsMsg, error) {
deleteRequest := internalPb.DeleteRequest{}
err := proto.Unmarshal(input, &deleteRequest)
if err != nil {
return nil, err
}
deleteMsg := &DeleteMsg{DeleteRequest: deleteRequest}
for _, timestamp := range deleteMsg.Timestamps {
deleteMsg.BeginTimestamp = timestamp
deleteMsg.EndTimestamp = timestamp
break
}
for _, timestamp := range deleteMsg.Timestamps {
if timestamp > deleteMsg.EndTimestamp {
deleteMsg.EndTimestamp = timestamp
}
if timestamp < deleteMsg.BeginTimestamp {
deleteMsg.BeginTimestamp = timestamp
}
}
return deleteMsg, nil
}
/////////////////////////////////////////Search//////////////////////////////////////////
type SearchMsg struct {
BaseMsg
internalPb.SearchRequest
}
func (st *SearchMsg) Type() MsgType {
return st.MsgType
}
func (st *SearchMsg) Marshal(input TsMsg) ([]byte, error) {
searchTask := input.(*SearchMsg)
searchRequest := &searchTask.SearchRequest
mb, err := proto.Marshal(searchRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (st *SearchMsg) Unmarshal(input []byte) (TsMsg, error) {
searchRequest := internalPb.SearchRequest{}
err := proto.Unmarshal(input, &searchRequest)
if err != nil {
return nil, err
}
searchMsg := &SearchMsg{SearchRequest: searchRequest}
searchMsg.BeginTimestamp = searchMsg.Timestamp
searchMsg.EndTimestamp = searchMsg.Timestamp
return searchMsg, nil
}
/////////////////////////////////////////SearchResult//////////////////////////////////////////
type SearchResultMsg struct {
BaseMsg
internalPb.SearchResult
}
func (srt *SearchResultMsg) Type() MsgType {
return srt.MsgType
}
func (srt *SearchResultMsg) Marshal(input TsMsg) ([]byte, error) {
searchResultTask := input.(*SearchResultMsg)
searchResultRequest := &searchResultTask.SearchResult
mb, err := proto.Marshal(searchResultRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (srt *SearchResultMsg) Unmarshal(input []byte) (TsMsg, error) {
searchResultRequest := internalPb.SearchResult{}
err := proto.Unmarshal(input, &searchResultRequest)
if err != nil {
return nil, err
}
searchResultMsg := &SearchResultMsg{SearchResult: searchResultRequest}
searchResultMsg.BeginTimestamp = searchResultMsg.Timestamp
searchResultMsg.EndTimestamp = searchResultMsg.Timestamp
return searchResultMsg, nil
}
/////////////////////////////////////////TimeTick//////////////////////////////////////////
type TimeTickMsg struct {
BaseMsg
internalPb.TimeTickMsg
}
func (tst *TimeTickMsg) Type() MsgType {
return tst.MsgType
}
func (tst *TimeTickMsg) Marshal(input TsMsg) ([]byte, error) {
timeTickTask := input.(*TimeTickMsg)
timeTick := &timeTickTask.TimeTickMsg
mb, err := proto.Marshal(timeTick)
if err != nil {
return nil, err
}
return mb, nil
}
func (tst *TimeTickMsg) Unmarshal(input []byte) (TsMsg, error) {
timeTickMsg := internalPb.TimeTickMsg{}
err := proto.Unmarshal(input, &timeTickMsg)
if err != nil {
return nil, err
}
timeTick := &TimeTickMsg{TimeTickMsg: timeTickMsg}
timeTick.BeginTimestamp = timeTick.Timestamp
timeTick.EndTimestamp = timeTick.Timestamp
return timeTick, nil
}
/////////////////////////////////////////QueryNodeSegStats//////////////////////////////////////////
type QueryNodeSegStatsMsg struct {
BaseMsg
internalPb.QueryNodeSegStats
}
func (qs *QueryNodeSegStatsMsg) Type() MsgType {
return qs.MsgType
}
func (qs *QueryNodeSegStatsMsg) Marshal(input TsMsg) ([]byte, error) {
queryNodeSegStatsTask := input.(*QueryNodeSegStatsMsg)
queryNodeSegStats := &queryNodeSegStatsTask.QueryNodeSegStats
mb, err := proto.Marshal(queryNodeSegStats)
if err != nil {
return nil, err
}
return mb, nil
}
func (qs *QueryNodeSegStatsMsg) Unmarshal(input []byte) (TsMsg, error) {
queryNodeSegStats := internalPb.QueryNodeSegStats{}
err := proto.Unmarshal(input, &queryNodeSegStats)
if err != nil {
return nil, err
}
queryNodeSegStatsMsg := &QueryNodeSegStatsMsg{QueryNodeSegStats: queryNodeSegStats}
return queryNodeSegStatsMsg, nil
}
///////////////////////////////////////////Key2Seg//////////////////////////////////////////
//type Key2SegMsg struct {
// BaseMsg
// internalPb.Key2SegMsg
//}
//
//func (k2st *Key2SegMsg) Type() MsgType {
// return
//}
/////////////////////////////////////////CreateCollection//////////////////////////////////////////
type CreateCollectionMsg struct {
BaseMsg
internalPb.CreateCollectionRequest
}
func (cc *CreateCollectionMsg) Type() MsgType {
return cc.MsgType
}
func (cc *CreateCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
createCollectionMsg := input.(*CreateCollectionMsg)
createCollectionRequest := &createCollectionMsg.CreateCollectionRequest
mb, err := proto.Marshal(createCollectionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (cc *CreateCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
createCollectionRequest := internalPb.CreateCollectionRequest{}
err := proto.Unmarshal(input, &createCollectionRequest)
if err != nil {
return nil, err
}
createCollectionMsg := &CreateCollectionMsg{CreateCollectionRequest: createCollectionRequest}
createCollectionMsg.BeginTimestamp = createCollectionMsg.Timestamp
createCollectionMsg.EndTimestamp = createCollectionMsg.Timestamp
return createCollectionMsg, nil
}
/////////////////////////////////////////DropCollection//////////////////////////////////////////
type DropCollectionMsg struct {
BaseMsg
internalPb.DropCollectionRequest
}
func (dc *DropCollectionMsg) Type() MsgType {
return dc.MsgType
}
func (dc *DropCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
dropCollectionMsg := input.(*DropCollectionMsg)
dropCollectionRequest := &dropCollectionMsg.DropCollectionRequest
mb, err := proto.Marshal(dropCollectionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (dc *DropCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
dropCollectionRequest := internalPb.DropCollectionRequest{}
err := proto.Unmarshal(input, &dropCollectionRequest)
if err != nil {
return nil, err
}
dropCollectionMsg := &DropCollectionMsg{DropCollectionRequest: dropCollectionRequest}
dropCollectionMsg.BeginTimestamp = dropCollectionMsg.Timestamp
dropCollectionMsg.EndTimestamp = dropCollectionMsg.Timestamp
return dropCollectionMsg, nil
}
/////////////////////////////////////////HasCollection//////////////////////////////////////////
type HasCollectionMsg struct {
BaseMsg
internalPb.HasCollectionRequest
}
func (hc *HasCollectionMsg) Type() MsgType {
return hc.MsgType
}
func (hc *HasCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
hasCollectionMsg := input.(*HasCollectionMsg)
hasCollectionRequest := &hasCollectionMsg.HasCollectionRequest
mb, err := proto.Marshal(hasCollectionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (hc *HasCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
hasCollectionRequest := internalPb.HasCollectionRequest{}
err := proto.Unmarshal(input, &hasCollectionRequest)
if err != nil {
return nil, err
}
hasCollectionMsg := &HasCollectionMsg{HasCollectionRequest: hasCollectionRequest}
hasCollectionMsg.BeginTimestamp = hasCollectionMsg.Timestamp
hasCollectionMsg.EndTimestamp = hasCollectionMsg.Timestamp
return hasCollectionMsg, nil
}
/////////////////////////////////////////DescribeCollection//////////////////////////////////////////
type DescribeCollectionMsg struct {
BaseMsg
internalPb.DescribeCollectionRequest
}
func (dc *DescribeCollectionMsg) Type() MsgType {
return dc.MsgType
}
func (dc *DescribeCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
describeCollectionMsg := input.(*DescribeCollectionMsg)
describeCollectionRequest := &describeCollectionMsg.DescribeCollectionRequest
mb, err := proto.Marshal(describeCollectionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (dc *DescribeCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
describeCollectionRequest := internalPb.DescribeCollectionRequest{}
err := proto.Unmarshal(input, &describeCollectionRequest)
if err != nil {
return nil, err
}
describeCollectionMsg := &DescribeCollectionMsg{DescribeCollectionRequest: describeCollectionRequest}
describeCollectionMsg.BeginTimestamp = describeCollectionMsg.Timestamp
describeCollectionMsg.EndTimestamp = describeCollectionMsg.Timestamp
return describeCollectionMsg, nil
}
/////////////////////////////////////////ShowCollection//////////////////////////////////////////
type ShowCollectionMsg struct {
BaseMsg
internalPb.ShowCollectionRequest
}
func (sc *ShowCollectionMsg) Type() MsgType {
return sc.MsgType
}
func (sc *ShowCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
showCollectionMsg := input.(*ShowCollectionMsg)
showCollectionRequest := &showCollectionMsg.ShowCollectionRequest
mb, err := proto.Marshal(showCollectionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (sc *ShowCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
showCollectionRequest := internalPb.ShowCollectionRequest{}
err := proto.Unmarshal(input, &showCollectionRequest)
if err != nil {
return nil, err
}
showCollectionMsg := &ShowCollectionMsg{ShowCollectionRequest: showCollectionRequest}
showCollectionMsg.BeginTimestamp = showCollectionMsg.Timestamp
showCollectionMsg.EndTimestamp = showCollectionMsg.Timestamp
return showCollectionMsg, nil
}
/////////////////////////////////////////CreatePartition//////////////////////////////////////////
type CreatePartitionMsg struct {
BaseMsg
internalPb.CreatePartitionRequest
}
func (cc *CreatePartitionMsg) Type() MsgType {
return cc.MsgType
}
func (cc *CreatePartitionMsg) Marshal(input TsMsg) ([]byte, error) {
createPartitionMsg := input.(*CreatePartitionMsg)
createPartitionRequest := &createPartitionMsg.CreatePartitionRequest
mb, err := proto.Marshal(createPartitionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (cc *CreatePartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
createPartitionRequest := internalPb.CreatePartitionRequest{}
err := proto.Unmarshal(input, &createPartitionRequest)
if err != nil {
return nil, err
}
createPartitionMsg := &CreatePartitionMsg{CreatePartitionRequest: createPartitionRequest}
createPartitionMsg.BeginTimestamp = createPartitionMsg.Timestamp
createPartitionMsg.EndTimestamp = createPartitionMsg.Timestamp
return createPartitionMsg, nil
}
/////////////////////////////////////////DropPartition//////////////////////////////////////////
type DropPartitionMsg struct {
BaseMsg
internalPb.DropPartitionRequest
}
func (dc *DropPartitionMsg) Type() MsgType {
return dc.MsgType
}
func (dc *DropPartitionMsg) Marshal(input TsMsg) ([]byte, error) {
dropPartitionMsg := input.(*DropPartitionMsg)
dropPartitionRequest := &dropPartitionMsg.DropPartitionRequest
mb, err := proto.Marshal(dropPartitionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (dc *DropPartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
dropPartitionRequest := internalPb.DropPartitionRequest{}
err := proto.Unmarshal(input, &dropPartitionRequest)
if err != nil {
return nil, err
}
dropPartitionMsg := &DropPartitionMsg{DropPartitionRequest: dropPartitionRequest}
dropPartitionMsg.BeginTimestamp = dropPartitionMsg.Timestamp
dropPartitionMsg.EndTimestamp = dropPartitionMsg.Timestamp
return dropPartitionMsg, nil
}
/////////////////////////////////////////HasPartition//////////////////////////////////////////
type HasPartitionMsg struct {
BaseMsg
internalPb.HasPartitionRequest
}
func (hc *HasPartitionMsg) Type() MsgType {
return hc.MsgType
}
func (hc *HasPartitionMsg) Marshal(input TsMsg) ([]byte, error) {
hasPartitionMsg := input.(*HasPartitionMsg)
hasPartitionRequest := &hasPartitionMsg.HasPartitionRequest
mb, err := proto.Marshal(hasPartitionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (hc *HasPartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
hasPartitionRequest := internalPb.HasPartitionRequest{}
err := proto.Unmarshal(input, &hasPartitionRequest)
if err != nil {
return nil, err
}
hasPartitionMsg := &HasPartitionMsg{HasPartitionRequest: hasPartitionRequest}
hasPartitionMsg.BeginTimestamp = hasPartitionMsg.Timestamp
hasPartitionMsg.EndTimestamp = hasPartitionMsg.Timestamp
return hasPartitionMsg, nil
}
/////////////////////////////////////////DescribePartition//////////////////////////////////////////
type DescribePartitionMsg struct {
BaseMsg
internalPb.DescribePartitionRequest
}
func (dc *DescribePartitionMsg) Type() MsgType {
return dc.MsgType
}
func (dc *DescribePartitionMsg) Marshal(input TsMsg) ([]byte, error) {
describePartitionMsg := input.(*DescribePartitionMsg)
describePartitionRequest := &describePartitionMsg.DescribePartitionRequest
mb, err := proto.Marshal(describePartitionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (dc *DescribePartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
describePartitionRequest := internalPb.DescribePartitionRequest{}
err := proto.Unmarshal(input, &describePartitionRequest)
if err != nil {
return nil, err
}
describePartitionMsg := &DescribePartitionMsg{DescribePartitionRequest: describePartitionRequest}
describePartitionMsg.BeginTimestamp = describePartitionMsg.Timestamp
describePartitionMsg.EndTimestamp = describePartitionMsg.Timestamp
return describePartitionMsg, nil
}
/////////////////////////////////////////ShowPartition//////////////////////////////////////////
type ShowPartitionMsg struct {
BaseMsg
internalPb.ShowPartitionRequest
}
func (sc *ShowPartitionMsg) Type() MsgType {
return sc.MsgType
}
func (sc *ShowPartitionMsg) Marshal(input TsMsg) ([]byte, error) {
showPartitionMsg := input.(*ShowPartitionMsg)
showPartitionRequest := &showPartitionMsg.ShowPartitionRequest
mb, err := proto.Marshal(showPartitionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (sc *ShowPartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
showPartitionRequest := internalPb.ShowPartitionRequest{}
err := proto.Unmarshal(input, &showPartitionRequest)
if err != nil {
return nil, err
}
showPartitionMsg := &ShowPartitionMsg{ShowPartitionRequest: showPartitionRequest}
showPartitionMsg.BeginTimestamp = showPartitionMsg.Timestamp
showPartitionMsg.EndTimestamp = showPartitionMsg.Timestamp
return showPartitionMsg, nil
}

View File

@ -1,263 +0,0 @@
package msgstream
import (
"github.com/golang/protobuf/proto"
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
)
type MsgType = internalPb.MsgType
type TsMsg interface {
BeginTs() Timestamp
EndTs() Timestamp
Type() MsgType
HashKeys() []uint32
Marshal(TsMsg) ([]byte, error)
Unmarshal([]byte) (TsMsg, error)
}
type BaseMsg struct {
BeginTimestamp Timestamp
EndTimestamp Timestamp
HashValues []uint32
}
func (bm *BaseMsg) BeginTs() Timestamp {
return bm.BeginTimestamp
}
func (bm *BaseMsg) EndTs() Timestamp {
return bm.EndTimestamp
}
func (bm *BaseMsg) HashKeys() []uint32 {
return bm.HashValues
}
/////////////////////////////////////////Insert//////////////////////////////////////////
type InsertMsg struct {
BaseMsg
internalPb.InsertRequest
}
func (it *InsertMsg) Type() MsgType {
return it.MsgType
}
func (it *InsertMsg) Marshal(input TsMsg) ([]byte, error) {
insertMsg := input.(*InsertMsg)
insertRequest := &insertMsg.InsertRequest
mb, err := proto.Marshal(insertRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (it *InsertMsg) Unmarshal(input []byte) (TsMsg, error) {
insertRequest := internalPb.InsertRequest{}
err := proto.Unmarshal(input, &insertRequest)
if err != nil {
return nil, err
}
insertMsg := &InsertMsg{InsertRequest: insertRequest}
for _, timestamp := range insertMsg.Timestamps {
insertMsg.BeginTimestamp = timestamp
insertMsg.EndTimestamp = timestamp
break
}
for _, timestamp := range insertMsg.Timestamps {
if timestamp > insertMsg.EndTimestamp {
insertMsg.EndTimestamp = timestamp
}
if timestamp < insertMsg.BeginTimestamp {
insertMsg.BeginTimestamp = timestamp
}
}
return insertMsg, nil
}
/////////////////////////////////////////Delete//////////////////////////////////////////
type DeleteMsg struct {
BaseMsg
internalPb.DeleteRequest
}
func (dt *DeleteMsg) Type() MsgType {
return dt.MsgType
}
func (dt *DeleteMsg) Marshal(input TsMsg) ([]byte, error) {
deleteTask := input.(*DeleteMsg)
deleteRequest := &deleteTask.DeleteRequest
mb, err := proto.Marshal(deleteRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (dt *DeleteMsg) Unmarshal(input []byte) (TsMsg, error) {
deleteRequest := internalPb.DeleteRequest{}
err := proto.Unmarshal(input, &deleteRequest)
if err != nil {
return nil, err
}
deleteMsg := &DeleteMsg{DeleteRequest: deleteRequest}
for _, timestamp := range deleteMsg.Timestamps {
deleteMsg.BeginTimestamp = timestamp
deleteMsg.EndTimestamp = timestamp
break
}
for _, timestamp := range deleteMsg.Timestamps {
if timestamp > deleteMsg.EndTimestamp {
deleteMsg.EndTimestamp = timestamp
}
if timestamp < deleteMsg.BeginTimestamp {
deleteMsg.BeginTimestamp = timestamp
}
}
return deleteMsg, nil
}
/////////////////////////////////////////Search//////////////////////////////////////////
type SearchMsg struct {
BaseMsg
internalPb.SearchRequest
}
func (st *SearchMsg) Type() MsgType {
return st.MsgType
}
func (st *SearchMsg) Marshal(input TsMsg) ([]byte, error) {
searchTask := input.(*SearchMsg)
searchRequest := &searchTask.SearchRequest
mb, err := proto.Marshal(searchRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (st *SearchMsg) Unmarshal(input []byte) (TsMsg, error) {
searchRequest := internalPb.SearchRequest{}
err := proto.Unmarshal(input, &searchRequest)
if err != nil {
return nil, err
}
searchMsg := &SearchMsg{SearchRequest: searchRequest}
searchMsg.BeginTimestamp = searchMsg.Timestamp
searchMsg.EndTimestamp = searchMsg.Timestamp
return searchMsg, nil
}
/////////////////////////////////////////SearchResult//////////////////////////////////////////
type SearchResultMsg struct {
BaseMsg
internalPb.SearchResult
}
func (srt *SearchResultMsg) Type() MsgType {
return srt.MsgType
}
func (srt *SearchResultMsg) Marshal(input TsMsg) ([]byte, error) {
searchResultTask := input.(*SearchResultMsg)
searchResultRequest := &searchResultTask.SearchResult
mb, err := proto.Marshal(searchResultRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (srt *SearchResultMsg) Unmarshal(input []byte) (TsMsg, error) {
searchResultRequest := internalPb.SearchResult{}
err := proto.Unmarshal(input, &searchResultRequest)
if err != nil {
return nil, err
}
searchResultMsg := &SearchResultMsg{SearchResult: searchResultRequest}
searchResultMsg.BeginTimestamp = searchResultMsg.Timestamp
searchResultMsg.EndTimestamp = searchResultMsg.Timestamp
return searchResultMsg, nil
}
/////////////////////////////////////////TimeTick//////////////////////////////////////////
type TimeTickMsg struct {
BaseMsg
internalPb.TimeTickMsg
}
func (tst *TimeTickMsg) Type() MsgType {
return tst.MsgType
}
func (tst *TimeTickMsg) Marshal(input TsMsg) ([]byte, error) {
timeTickTask := input.(*TimeTickMsg)
timeTick := &timeTickTask.TimeTickMsg
mb, err := proto.Marshal(timeTick)
if err != nil {
return nil, err
}
return mb, nil
}
func (tst *TimeTickMsg) Unmarshal(input []byte) (TsMsg, error) {
timeTickMsg := internalPb.TimeTickMsg{}
err := proto.Unmarshal(input, &timeTickMsg)
if err != nil {
return nil, err
}
timeTick := &TimeTickMsg{TimeTickMsg: timeTickMsg}
timeTick.BeginTimestamp = timeTick.Timestamp
timeTick.EndTimestamp = timeTick.Timestamp
return timeTick, nil
}
/////////////////////////////////////////QueryNodeSegStats//////////////////////////////////////////
type QueryNodeSegStatsMsg struct {
BaseMsg
internalPb.QueryNodeSegStats
}
func (qs *QueryNodeSegStatsMsg) Type() MsgType {
return qs.MsgType
}
func (qs *QueryNodeSegStatsMsg) Marshal(input TsMsg) ([]byte, error) {
queryNodeSegStatsTask := input.(*QueryNodeSegStatsMsg)
queryNodeSegStats := &queryNodeSegStatsTask.QueryNodeSegStats
mb, err := proto.Marshal(queryNodeSegStats)
if err != nil {
return nil, err
}
return mb, nil
}
func (qs *QueryNodeSegStatsMsg) Unmarshal(input []byte) (TsMsg, error) {
queryNodeSegStats := internalPb.QueryNodeSegStats{}
err := proto.Unmarshal(input, &queryNodeSegStats)
if err != nil {
return nil, err
}
queryNodeSegStatsMsg := &QueryNodeSegStatsMsg{QueryNodeSegStats: queryNodeSegStats}
return queryNodeSegStatsMsg, nil
}
///////////////////////////////////////////Key2Seg//////////////////////////////////////////
//type Key2SegMsg struct {
// BaseMsg
// internalPb.Key2SegMsg
//}
//
//func (k2st *Key2SegMsg) Type() MsgType {
// return
//}

View File

@ -30,6 +30,11 @@ func (dispatcher *UnmarshalDispatcher) addDefaultMsgTemplates() {
searchMsg := SearchMsg{}
searchResultMsg := SearchResultMsg{}
timeTickMsg := TimeTickMsg{}
createCollectionMsg := CreateCollectionMsg{}
dropCollectionMsg := DropCollectionMsg{}
createPartitionMsg := CreatePartitionMsg{}
dropPartitionMsg := DropPartitionMsg{}
queryNodeSegStatsMsg := QueryNodeSegStatsMsg{}
dispatcher.tempMap = make(map[internalPb.MsgType]UnmarshalFunc)
dispatcher.tempMap[internalPb.MsgType_kInsert] = insertMsg.Unmarshal
@ -38,6 +43,11 @@ func (dispatcher *UnmarshalDispatcher) addDefaultMsgTemplates() {
dispatcher.tempMap[internalPb.MsgType_kSearchResult] = searchResultMsg.Unmarshal
dispatcher.tempMap[internalPb.MsgType_kTimeTick] = timeTickMsg.Unmarshal
dispatcher.tempMap[internalPb.MsgType_kQueryNodeSegStats] = queryNodeSegStatsMsg.Unmarshal
dispatcher.tempMap[internalPb.MsgType_kCreateCollection] = createCollectionMsg.Unmarshal
dispatcher.tempMap[internalPb.MsgType_kDropCollection] = dropCollectionMsg.Unmarshal
dispatcher.tempMap[internalPb.MsgType_kCreatePartition] = createPartitionMsg.Unmarshal
dispatcher.tempMap[internalPb.MsgType_kDropPartition] = dropPartitionMsg.Unmarshal
}
func NewUnmarshalDispatcher() *UnmarshalDispatcher {

View File

@ -14,6 +14,7 @@ enum MsgType {
kHasCollection = 102;
kDescribeCollection = 103;
kShowCollections = 104;
kGetSysConfigs = 105;
/* Definition Requests: partition */
kCreatePartition = 200;
@ -33,6 +34,7 @@ enum MsgType {
/* System Control */
kTimeTick = 1200;
kQueryNodeSegStats = 1201;
}
enum PeerRole {
@ -223,6 +225,19 @@ message SearchRequest {
}
/**
* @brief Request of DescribePartition
*/
message SysConfigRequest {
MsgType msg_type = 1;
int64 reqID = 2;
int64 proxyID = 3;
uint64 timestamp = 4;
repeated string keys = 5;
repeated string key_prefixes = 6;
}
message SearchResult {
MsgType msg_type = 1;
common.Status status = 2;
@ -266,4 +281,4 @@ message QueryNodeSegStats {
MsgType msg_type = 1;
int64 peerID = 2;
repeated SegmentStats seg_stats = 3;
}
}

View File

@ -32,6 +32,7 @@ const (
MsgType_kHasCollection MsgType = 102
MsgType_kDescribeCollection MsgType = 103
MsgType_kShowCollections MsgType = 104
MsgType_kGetSysConfigs MsgType = 105
// Definition Requests: partition
MsgType_kCreatePartition MsgType = 200
MsgType_kDropPartition MsgType = 201
@ -56,6 +57,7 @@ var MsgType_name = map[int32]string{
102: "kHasCollection",
103: "kDescribeCollection",
104: "kShowCollections",
105: "kGetSysConfigs",
200: "kCreatePartition",
201: "kDropPartition",
202: "kHasPartition",
@ -76,6 +78,7 @@ var MsgType_value = map[string]int32{
"kHasCollection": 102,
"kDescribeCollection": 103,
"kShowCollections": 104,
"kGetSysConfigs": 105,
"kCreatePartition": 200,
"kDropPartition": 201,
"kHasPartition": 202,
@ -1579,6 +1582,87 @@ func (m *SearchRequest) GetQuery() *commonpb.Blob {
return nil
}
//*
// @brief Request of DescribePartition
type SysConfigRequest struct {
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
ReqID int64 `protobuf:"varint,2,opt,name=reqID,proto3" json:"reqID,omitempty"`
ProxyID int64 `protobuf:"varint,3,opt,name=proxyID,proto3" json:"proxyID,omitempty"`
Timestamp uint64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
Keys []string `protobuf:"bytes,5,rep,name=keys,proto3" json:"keys,omitempty"`
KeyPrefixes []string `protobuf:"bytes,6,rep,name=key_prefixes,json=keyPrefixes,proto3" json:"key_prefixes,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SysConfigRequest) Reset() { *m = SysConfigRequest{} }
func (m *SysConfigRequest) String() string { return proto.CompactTextString(m) }
func (*SysConfigRequest) ProtoMessage() {}
func (*SysConfigRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_7eb37f6b80b23116, []int{21}
}
func (m *SysConfigRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SysConfigRequest.Unmarshal(m, b)
}
func (m *SysConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SysConfigRequest.Marshal(b, m, deterministic)
}
func (m *SysConfigRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_SysConfigRequest.Merge(m, src)
}
func (m *SysConfigRequest) XXX_Size() int {
return xxx_messageInfo_SysConfigRequest.Size(m)
}
func (m *SysConfigRequest) XXX_DiscardUnknown() {
xxx_messageInfo_SysConfigRequest.DiscardUnknown(m)
}
var xxx_messageInfo_SysConfigRequest proto.InternalMessageInfo
func (m *SysConfigRequest) GetMsgType() MsgType {
if m != nil {
return m.MsgType
}
return MsgType_kNone
}
func (m *SysConfigRequest) GetReqID() int64 {
if m != nil {
return m.ReqID
}
return 0
}
func (m *SysConfigRequest) GetProxyID() int64 {
if m != nil {
return m.ProxyID
}
return 0
}
func (m *SysConfigRequest) GetTimestamp() uint64 {
if m != nil {
return m.Timestamp
}
return 0
}
func (m *SysConfigRequest) GetKeys() []string {
if m != nil {
return m.Keys
}
return nil
}
func (m *SysConfigRequest) GetKeyPrefixes() []string {
if m != nil {
return m.KeyPrefixes
}
return nil
}
type SearchResult struct {
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
Status *commonpb.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
@ -1597,7 +1681,7 @@ func (m *SearchResult) Reset() { *m = SearchResult{} }
func (m *SearchResult) String() string { return proto.CompactTextString(m) }
func (*SearchResult) ProtoMessage() {}
func (*SearchResult) Descriptor() ([]byte, []int) {
return fileDescriptor_7eb37f6b80b23116, []int{21}
return fileDescriptor_7eb37f6b80b23116, []int{22}
}
func (m *SearchResult) XXX_Unmarshal(b []byte) error {
@ -1687,7 +1771,7 @@ func (m *TimeTickMsg) Reset() { *m = TimeTickMsg{} }
func (m *TimeTickMsg) String() string { return proto.CompactTextString(m) }
func (*TimeTickMsg) ProtoMessage() {}
func (*TimeTickMsg) Descriptor() ([]byte, []int) {
return fileDescriptor_7eb37f6b80b23116, []int{22}
return fileDescriptor_7eb37f6b80b23116, []int{23}
}
func (m *TimeTickMsg) XXX_Unmarshal(b []byte) error {
@ -1744,7 +1828,7 @@ func (m *Key2Seg) Reset() { *m = Key2Seg{} }
func (m *Key2Seg) String() string { return proto.CompactTextString(m) }
func (*Key2Seg) ProtoMessage() {}
func (*Key2Seg) Descriptor() ([]byte, []int) {
return fileDescriptor_7eb37f6b80b23116, []int{23}
return fileDescriptor_7eb37f6b80b23116, []int{24}
}
func (m *Key2Seg) XXX_Unmarshal(b []byte) error {
@ -1812,7 +1896,7 @@ func (m *Key2SegMsg) Reset() { *m = Key2SegMsg{} }
func (m *Key2SegMsg) String() string { return proto.CompactTextString(m) }
func (*Key2SegMsg) ProtoMessage() {}
func (*Key2SegMsg) Descriptor() ([]byte, []int) {
return fileDescriptor_7eb37f6b80b23116, []int{24}
return fileDescriptor_7eb37f6b80b23116, []int{25}
}
func (m *Key2SegMsg) XXX_Unmarshal(b []byte) error {
@ -1861,7 +1945,7 @@ func (m *SegmentStats) Reset() { *m = SegmentStats{} }
func (m *SegmentStats) String() string { return proto.CompactTextString(m) }
func (*SegmentStats) ProtoMessage() {}
func (*SegmentStats) Descriptor() ([]byte, []int) {
return fileDescriptor_7eb37f6b80b23116, []int{25}
return fileDescriptor_7eb37f6b80b23116, []int{26}
}
func (m *SegmentStats) XXX_Unmarshal(b []byte) error {
@ -1923,7 +2007,7 @@ func (m *QueryNodeSegStats) Reset() { *m = QueryNodeSegStats{} }
func (m *QueryNodeSegStats) String() string { return proto.CompactTextString(m) }
func (*QueryNodeSegStats) ProtoMessage() {}
func (*QueryNodeSegStats) Descriptor() ([]byte, []int) {
return fileDescriptor_7eb37f6b80b23116, []int{26}
return fileDescriptor_7eb37f6b80b23116, []int{27}
}
func (m *QueryNodeSegStats) XXX_Unmarshal(b []byte) error {
@ -1989,6 +2073,7 @@ func init() {
proto.RegisterType((*InsertRequest)(nil), "milvus.proto.internal.InsertRequest")
proto.RegisterType((*DeleteRequest)(nil), "milvus.proto.internal.DeleteRequest")
proto.RegisterType((*SearchRequest)(nil), "milvus.proto.internal.SearchRequest")
proto.RegisterType((*SysConfigRequest)(nil), "milvus.proto.internal.SysConfigRequest")
proto.RegisterType((*SearchResult)(nil), "milvus.proto.internal.SearchResult")
proto.RegisterType((*TimeTickMsg)(nil), "milvus.proto.internal.TimeTickMsg")
proto.RegisterType((*Key2Seg)(nil), "milvus.proto.internal.Key2Seg")
@ -2000,94 +2085,98 @@ func init() {
func init() { proto.RegisterFile("internal_msg.proto", fileDescriptor_7eb37f6b80b23116) }
var fileDescriptor_7eb37f6b80b23116 = []byte{
// 1416 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x58, 0x4b, 0x6f, 0x1c, 0xc5,
0x13, 0x4f, 0xef, 0xac, 0xf7, 0x51, 0x6b, 0xaf, 0xc7, 0x6d, 0x3b, 0xd9, 0x24, 0x7f, 0x25, 0xce,
0xe4, 0x2f, 0x62, 0x82, 0xb0, 0x85, 0xc3, 0x81, 0xdc, 0x20, 0xde, 0x43, 0x96, 0xc8, 0x51, 0x18,
0x5b, 0x20, 0xa1, 0x48, 0xa3, 0xf1, 0x6e, 0x65, 0x77, 0x34, 0x4f, 0x77, 0xcf, 0xda, 0x59, 0x1f,
0x38, 0xe5, 0x03, 0xc0, 0x81, 0x03, 0x07, 0x24, 0x8e, 0x9c, 0x22, 0xf8, 0x16, 0xbc, 0xae, 0x1c,
0xf8, 0x0a, 0x20, 0x88, 0x04, 0xe1, 0x8e, 0xba, 0x7b, 0x1e, 0x3b, 0x7e, 0x46, 0x4a, 0x0c, 0x96,
0x7c, 0x9b, 0xaa, 0xe9, 0xe9, 0xaa, 0xfa, 0xfd, 0xaa, 0x6b, 0xaa, 0x1a, 0xa8, 0x13, 0xc4, 0xc8,
0x02, 0xdb, 0xb3, 0x7c, 0xde, 0x5f, 0x8a, 0x58, 0x18, 0x87, 0x74, 0xde, 0x77, 0xbc, 0xed, 0x21,
0x57, 0xd2, 0x52, 0xba, 0xe0, 0xd2, 0x64, 0x37, 0xf4, 0xfd, 0x30, 0x50, 0xea, 0x4b, 0x33, 0x1c,
0xd9, 0xb6, 0xd3, 0xc5, 0xfc, 0x3b, 0x23, 0x80, 0x7a, 0xa7, 0x6d, 0xe2, 0xd6, 0x10, 0x79, 0x4c,
0xcf, 0x43, 0x25, 0x42, 0x64, 0x9d, 0x76, 0x8b, 0x2c, 0x90, 0x45, 0xcd, 0x4c, 0x24, 0x7a, 0x0b,
0xca, 0x2c, 0xf4, 0xb0, 0x55, 0x5a, 0x20, 0x8b, 0xcd, 0x95, 0xab, 0x4b, 0x07, 0xda, 0x5a, 0x7a,
0x80, 0xc8, 0xcc, 0xd0, 0x43, 0x53, 0x2e, 0xa6, 0x73, 0x30, 0xd1, 0x0d, 0x87, 0x41, 0xdc, 0xd2,
0x16, 0xc8, 0xe2, 0x94, 0xa9, 0x04, 0xa3, 0x0f, 0x20, 0xec, 0xf1, 0x28, 0x0c, 0x38, 0xd2, 0x5b,
0x50, 0xe1, 0xb1, 0x1d, 0x0f, 0xb9, 0x34, 0xd8, 0x58, 0xb9, 0x5c, 0xdc, 0x3a, 0x71, 0x7e, 0x5d,
0x2e, 0x31, 0x93, 0xa5, 0xb4, 0x09, 0xa5, 0x4e, 0x5b, 0xfa, 0xa2, 0x99, 0xa5, 0x4e, 0xfb, 0x10,
0x43, 0x21, 0xc0, 0x06, 0x0f, 0xff, 0xc5, 0xc8, 0xb6, 0xa1, 0x21, 0x0d, 0xbe, 0x4c, 0x68, 0xff,
0x83, 0x7a, 0xec, 0xf8, 0xc8, 0x63, 0xdb, 0x8f, 0xa4, 0x4f, 0x65, 0x33, 0x57, 0x1c, 0x62, 0xf7,
0x09, 0x81, 0xc9, 0x75, 0xec, 0xe7, 0x2c, 0x66, 0xcb, 0xc8, 0xd8, 0x32, 0xb1, 0x75, 0x77, 0x60,
0x07, 0x01, 0x7a, 0x09, 0x78, 0x13, 0x66, 0xae, 0xa0, 0x97, 0xa1, 0xde, 0x0d, 0x3d, 0xcf, 0x0a,
0x6c, 0x1f, 0xe5, 0xf6, 0x75, 0xb3, 0x26, 0x14, 0xf7, 0x6d, 0x1f, 0xe9, 0x75, 0x98, 0x8a, 0x6c,
0x16, 0x3b, 0xb1, 0x13, 0x06, 0x56, 0x6c, 0xf7, 0x5b, 0x65, 0xb9, 0x60, 0x32, 0x53, 0x6e, 0xd8,
0x7d, 0xe3, 0x29, 0x01, 0xfa, 0x1e, 0xe7, 0x4e, 0x3f, 0x28, 0x38, 0xf3, 0x4a, 0x81, 0xbf, 0x07,
0xd3, 0x11, 0x32, 0x2b, 0x71, 0xdb, 0x62, 0xb8, 0xd5, 0xd2, 0x16, 0xb4, 0xc5, 0xc6, 0xca, 0xf5,
0x43, 0xbe, 0x1f, 0x77, 0xc5, 0x9c, 0x8a, 0x90, 0xad, 0xaa, 0x4f, 0x4d, 0xdc, 0x32, 0xbe, 0x24,
0x30, 0x2d, 0xdf, 0x2b, 0xaf, 0x7d, 0x0c, 0x24, 0x74, 0x5c, 0xa8, 0x12, 0x67, 0x95, 0x70, 0x0c,
0x74, 0x07, 0xb2, 0x52, 0x04, 0xb4, 0x7c, 0x1c, 0xa0, 0x13, 0x07, 0x00, 0xfa, 0x8c, 0xc0, 0x6c,
0x01, 0xd0, 0x93, 0x4b, 0xac, 0x1b, 0x30, 0x8d, 0x8f, 0x23, 0x87, 0xa1, 0xd5, 0x1b, 0x32, 0x5b,
0x38, 0x20, 0x83, 0x29, 0x9b, 0x4d, 0xa5, 0x6e, 0x27, 0x5a, 0xfa, 0x10, 0xce, 0x8f, 0x13, 0x60,
0x67, 0xc8, 0xb5, 0xca, 0x92, 0x87, 0xd7, 0x8e, 0xe2, 0x21, 0xc7, 0xd9, 0x9c, 0xcb, 0xa9, 0xc8,
0xb5, 0xc6, 0xcf, 0x04, 0x2e, 0xac, 0x32, 0xb4, 0x63, 0x5c, 0x0d, 0x3d, 0x0f, 0xbb, 0xc2, 0x64,
0x9a, 0x47, 0xb7, 0xa1, 0xe6, 0xf3, 0xbe, 0x15, 0x8f, 0x22, 0x94, 0x71, 0x37, 0x57, 0xae, 0x1c,
0x62, 0x6b, 0x8d, 0xf7, 0x37, 0x46, 0x11, 0x9a, 0x55, 0x5f, 0x3d, 0x08, 0x82, 0x18, 0x6e, 0x65,
0x25, 0x43, 0x09, 0x45, 0x44, 0xb4, 0xbd, 0x88, 0xb4, 0xa0, 0x1a, 0xb1, 0xf0, 0xf1, 0xa8, 0xd3,
0x96, 0xe4, 0x69, 0x66, 0x2a, 0xd2, 0xb7, 0xa0, 0xc2, 0xbb, 0x03, 0xf4, 0x6d, 0x49, 0x5a, 0x63,
0xe5, 0xe2, 0x81, 0xf0, 0xdf, 0xf1, 0xc2, 0x4d, 0x33, 0x59, 0x28, 0x98, 0x9c, 0x6f, 0xb3, 0x30,
0x3a, 0xc5, 0x51, 0xad, 0xc1, 0x74, 0x37, 0xf3, 0x4e, 0x25, 0xad, 0x0a, 0xef, 0xff, 0x45, 0x7f,
0x92, 0x1f, 0xc8, 0x52, 0x1e, 0x8a, 0x48, 0x68, 0xb3, 0xd9, 0x2d, 0xc8, 0xc6, 0x1f, 0x04, 0xe6,
0xee, 0xda, 0xfc, 0xec, 0x04, 0xfc, 0x17, 0x81, 0x8b, 0x6d, 0xe4, 0x5d, 0xe6, 0x6c, 0xe2, 0xd9,
0x89, 0xfa, 0x2b, 0x02, 0xf3, 0xeb, 0x83, 0x70, 0xe7, 0xf4, 0x46, 0x6c, 0xfc, 0x4e, 0xe0, 0xbc,
0xaa, 0x29, 0x0f, 0xd2, 0xe2, 0x7a, 0xea, 0x58, 0x79, 0x1f, 0x9a, 0xf9, 0xef, 0x60, 0x8c, 0x94,
0xeb, 0x07, 0x93, 0x92, 0x05, 0x22, 0x39, 0xc9, 0xff, 0x24, 0x92, 0x92, 0xdf, 0x08, 0xcc, 0x89,
0x5a, 0x73, 0x36, 0xa2, 0xfd, 0x95, 0xc0, 0xec, 0x5d, 0x9b, 0x9f, 0x8d, 0x60, 0x9f, 0x11, 0x68,
0xa5, 0x35, 0xe6, 0x6c, 0x44, 0x2c, 0x7e, 0x23, 0xa2, 0xbe, 0x9c, 0xde, 0x68, 0x5f, 0x71, 0x41,
0xfd, 0xb3, 0x04, 0x53, 0x9d, 0x80, 0x23, 0x8b, 0x4f, 0x2c, 0xd2, 0x1b, 0xfb, 0x3d, 0x56, 0xfd,
0xfe, 0x1e, 0x5f, 0x5e, 0xa8, 0xeb, 0x17, 0xb8, 0x71, 0xec, 0x8b, 0xee, 0xad, 0xd3, 0x96, 0x91,
0x6b, 0x66, 0xae, 0x28, 0x36, 0xce, 0x15, 0xf5, 0x36, 0x6f, 0x9c, 0xc7, 0x50, 0xad, 0x16, 0x51,
0xbd, 0x02, 0x90, 0x81, 0xcf, 0x5b, 0xb5, 0x05, 0x6d, 0xb1, 0x6c, 0x8e, 0x69, 0xc4, 0x50, 0xc1,
0xc2, 0x9d, 0x4e, 0x9b, 0xb7, 0xea, 0x0b, 0x9a, 0x18, 0x2a, 0x94, 0x44, 0xdf, 0x86, 0x1a, 0x0b,
0x77, 0xac, 0x9e, 0x1d, 0xdb, 0x2d, 0x90, 0x0d, 0xe9, 0x11, 0xdd, 0x59, 0x95, 0x85, 0x3b, 0x6d,
0x3b, 0xb6, 0x8d, 0x27, 0x25, 0x98, 0x6a, 0xa3, 0x87, 0x31, 0xfe, 0xf7, 0xa0, 0x17, 0x10, 0x2b,
0x1f, 0x81, 0xd8, 0xc4, 0x51, 0x88, 0x55, 0xf6, 0x21, 0x76, 0x0d, 0x26, 0x23, 0xe6, 0xf8, 0x36,
0x1b, 0x59, 0x2e, 0x8e, 0x78, 0xab, 0x2a, 0x71, 0x6b, 0x24, 0xba, 0x7b, 0x38, 0xe2, 0xc6, 0x73,
0x02, 0x53, 0xeb, 0x68, 0xb3, 0xee, 0xe0, 0xc4, 0x60, 0x18, 0xf3, 0x5f, 0x2b, 0xfa, 0x5f, 0x38,
0x7f, 0xe5, 0xbd, 0xe7, 0xef, 0x75, 0xd0, 0x19, 0xf2, 0xa1, 0x17, 0x5b, 0x39, 0x38, 0x0a, 0x80,
0x69, 0xa5, 0x5f, 0xcd, 0x20, 0x5a, 0x86, 0x89, 0xad, 0x21, 0xb2, 0x91, 0x4c, 0xb7, 0x23, 0xf9,
0x57, 0xeb, 0x8c, 0xa7, 0x25, 0x31, 0x3e, 0xab, 0xb0, 0xc5, 0x56, 0x2f, 0x13, 0x75, 0x3e, 0x9a,
0x95, 0x5e, 0x7c, 0x34, 0xcb, 0xa0, 0xd2, 0x0e, 0x81, 0x6a, 0x4f, 0xc9, 0xb9, 0x06, 0x93, 0xd2,
0x73, 0x2b, 0x08, 0x7b, 0x98, 0x01, 0xd1, 0x90, 0xba, 0xfb, 0x52, 0x55, 0x44, 0xb3, 0xf2, 0x22,
0x68, 0x56, 0x0f, 0x46, 0x93, 0x42, 0x79, 0xe0, 0xc4, 0xea, 0x08, 0x4e, 0x9a, 0xf2, 0xd9, 0xf8,
0x04, 0x1a, 0x1b, 0x8e, 0x8f, 0x1b, 0x4e, 0xd7, 0x5d, 0xe3, 0xfd, 0x97, 0x81, 0x2b, 0xbf, 0x1b,
0x28, 0x15, 0xee, 0x06, 0x8e, 0x2c, 0xc6, 0xc6, 0x17, 0x04, 0xaa, 0xf7, 0x70, 0xb4, 0xb2, 0x8e,
0x7d, 0x89, 0x9d, 0x38, 0xfa, 0xe9, 0xbc, 0x2e, 0x05, 0x7a, 0x15, 0x1a, 0x63, 0xc9, 0x9e, 0x6c,
0x0e, 0x79, 0xae, 0x1f, 0x53, 0xed, 0x2f, 0x42, 0xcd, 0xe1, 0xd6, 0xb6, 0xed, 0x39, 0x3d, 0x89,
0x7d, 0xcd, 0xac, 0x3a, 0xfc, 0x43, 0x21, 0x8a, 0x63, 0x96, 0x55, 0x37, 0xde, 0x9a, 0x90, 0x87,
0x68, 0x4c, 0x63, 0x3c, 0x04, 0x48, 0x5c, 0x13, 0xd0, 0x64, 0xcc, 0x92, 0x71, 0x66, 0xdf, 0x81,
0xaa, 0x8b, 0xa3, 0x15, 0x8e, 0xfd, 0x56, 0x49, 0xd6, 0xa8, 0xc3, 0xf0, 0x4a, 0x76, 0x32, 0xd3,
0xe5, 0xc6, 0xe7, 0xea, 0xa6, 0x47, 0x18, 0x13, 0x39, 0xc4, 0x8b, 0xd5, 0x97, 0xec, 0xad, 0xbe,
0x57, 0xa1, 0xe1, 0xa3, 0x1f, 0xb2, 0x91, 0xc5, 0x9d, 0x5d, 0x4c, 0x61, 0x50, 0xaa, 0x75, 0x67,
0x17, 0x45, 0xa0, 0xc1, 0xd0, 0xb7, 0x58, 0xb8, 0xc3, 0xd3, 0xf3, 0x18, 0x0c, 0x7d, 0x33, 0xdc,
0xe1, 0xf4, 0x0d, 0x98, 0x61, 0xd8, 0xc5, 0x20, 0xf6, 0x46, 0x96, 0x1f, 0xf6, 0x9c, 0x47, 0x0e,
0xa6, 0x60, 0xe8, 0xe9, 0x8b, 0xb5, 0x44, 0x6f, 0x7c, 0x4d, 0x60, 0xe6, 0x83, 0x34, 0xfd, 0xd6,
0xb1, 0xaf, 0x9c, 0x3b, 0x81, 0xc4, 0x78, 0x57, 0xc6, 0x6b, 0x89, 0x83, 0xc3, 0x8f, 0xbf, 0xf9,
0xc9, 0x70, 0x32, 0x6b, 0x3c, 0x71, 0xea, 0xe6, 0x2f, 0x25, 0xa8, 0x26, 0xe6, 0x68, 0x1d, 0x26,
0xdc, 0xfb, 0x61, 0x80, 0xfa, 0x39, 0x3a, 0x0f, 0x33, 0xee, 0xde, 0x9b, 0x07, 0xbd, 0x47, 0x67,
0x61, 0xda, 0x2d, 0x0e, 0xee, 0x3a, 0x52, 0x0a, 0x4d, 0xb7, 0x30, 0xdb, 0xea, 0x8f, 0xe8, 0x05,
0x98, 0x75, 0xf7, 0x8f, 0x7f, 0xba, 0x48, 0x01, 0xdd, 0x2d, 0x4e, 0x48, 0x5c, 0x1f, 0xd0, 0x79,
0xd0, 0xdd, 0x3d, 0x43, 0x89, 0xfe, 0x1d, 0xa1, 0xb3, 0xd0, 0x74, 0x0b, 0xbd, 0xbb, 0xfe, 0x3d,
0xa1, 0x14, 0xa6, 0xdc, 0xf1, 0x16, 0x57, 0xff, 0x81, 0xd0, 0x0b, 0x40, 0xdd, 0x7d, 0x9d, 0xa0,
0xfe, 0x23, 0xa1, 0x73, 0x30, 0xed, 0x16, 0x1a, 0x26, 0xae, 0xff, 0x44, 0xe8, 0x24, 0x54, 0x5d,
0xd5, 0x55, 0xe8, 0x9f, 0x6a, 0x52, 0x52, 0xbf, 0x3b, 0xfd, 0x33, 0x25, 0xa9, 0xf2, 0xa7, 0x3f,
0xd7, 0xa4, 0xb1, 0xf1, 0x62, 0xa8, 0xff, 0xad, 0xd1, 0x26, 0xd4, 0xdd, 0xf4, 0xc0, 0xeb, 0xdf,
0xd4, 0xa5, 0xf1, 0x7d, 0x6c, 0xeb, 0xdf, 0xd6, 0x6f, 0xde, 0x86, 0x5a, 0x7a, 0x5f, 0x47, 0x01,
0x2a, 0x6b, 0x36, 0x8f, 0x91, 0xe9, 0xe7, 0xc4, 0xb3, 0x89, 0x76, 0x0f, 0x99, 0x4e, 0xc4, 0xf3,
0x47, 0xcc, 0x11, 0xfa, 0x92, 0xc0, 0xff, 0x81, 0xa8, 0x69, 0xba, 0x76, 0xa7, 0xfd, 0xf1, 0x9d,
0xbe, 0x13, 0x0f, 0x86, 0x9b, 0xa2, 0x46, 0x2e, 0xef, 0x3a, 0x9e, 0xe7, 0xec, 0xc6, 0xd8, 0x1d,
0x2c, 0x2b, 0x72, 0xdf, 0xec, 0x39, 0x3c, 0x66, 0xce, 0xe6, 0x30, 0xc6, 0xde, 0x72, 0x4a, 0xf1,
0xb2, 0x64, 0x3c, 0x13, 0xa3, 0xcd, 0xcd, 0x8a, 0xd4, 0xdc, 0xfa, 0x27, 0x00, 0x00, 0xff, 0xff,
0x56, 0x57, 0x32, 0x28, 0x20, 0x17, 0x00, 0x00,
// 1474 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x58, 0x4b, 0x6f, 0x1b, 0x47,
0x12, 0xf6, 0x70, 0x28, 0x3e, 0x8a, 0x14, 0x35, 0x6a, 0x49, 0x36, 0x6d, 0x2f, 0x6c, 0x79, 0xbc,
0x58, 0x6b, 0xbd, 0x58, 0x09, 0x2b, 0xef, 0x61, 0x7d, 0xdb, 0xb5, 0x08, 0xac, 0xb9, 0x86, 0x0c,
0xed, 0x50, 0x48, 0x80, 0xc0, 0xc0, 0x60, 0x44, 0x96, 0x86, 0x83, 0x79, 0xaa, 0x7b, 0x28, 0x99,
0x3a, 0xe4, 0xe4, 0x1f, 0x90, 0x1c, 0x72, 0xc8, 0x21, 0x40, 0x8e, 0x39, 0x19, 0xc9, 0xbf, 0xc8,
0xeb, 0x14, 0x20, 0x7f, 0x22, 0x81, 0x63, 0x20, 0x71, 0xee, 0x41, 0x77, 0xcf, 0x83, 0xa3, 0xa7,
0x01, 0x5b, 0x89, 0x00, 0xdd, 0xba, 0x6a, 0x7a, 0xba, 0xaa, 0xbe, 0xaf, 0xba, 0xba, 0xba, 0x81,
0x38, 0x41, 0x8c, 0x34, 0xb0, 0x3c, 0xd3, 0x67, 0xf6, 0x72, 0x44, 0xc3, 0x38, 0x24, 0x0b, 0xbe,
0xe3, 0xed, 0x8e, 0x98, 0x94, 0x96, 0xd3, 0x09, 0xd7, 0x9a, 0xfd, 0xd0, 0xf7, 0xc3, 0x40, 0xaa,
0xaf, 0xcd, 0x32, 0xa4, 0xbb, 0x4e, 0x1f, 0xf3, 0xff, 0xf4, 0x00, 0xea, 0xdd, 0x8e, 0x81, 0x3b,
0x23, 0x64, 0x31, 0xb9, 0x0c, 0x95, 0x08, 0x91, 0x76, 0x3b, 0x6d, 0x65, 0x51, 0x59, 0x52, 0x8d,
0x44, 0x22, 0xf7, 0xa0, 0x4c, 0x43, 0x0f, 0xdb, 0xa5, 0x45, 0x65, 0xa9, 0xb5, 0x7a, 0x73, 0xf9,
0x48, 0x5b, 0xcb, 0x1b, 0x88, 0xd4, 0x08, 0x3d, 0x34, 0xc4, 0x64, 0x32, 0x0f, 0x53, 0xfd, 0x70,
0x14, 0xc4, 0x6d, 0x75, 0x51, 0x59, 0x9a, 0x36, 0xa4, 0xa0, 0xdb, 0x00, 0xdc, 0x1e, 0x8b, 0xc2,
0x80, 0x21, 0xb9, 0x07, 0x15, 0x16, 0x5b, 0xf1, 0x88, 0x09, 0x83, 0x8d, 0xd5, 0xeb, 0xc5, 0xa5,
0x13, 0xe7, 0x7b, 0x62, 0x8a, 0x91, 0x4c, 0x25, 0x2d, 0x28, 0x75, 0x3b, 0xc2, 0x17, 0xd5, 0x28,
0x75, 0x3b, 0xc7, 0x18, 0x0a, 0x01, 0x36, 0x59, 0xf8, 0x3b, 0x46, 0xb6, 0x0b, 0x0d, 0x61, 0xf0,
0x4d, 0x42, 0xfb, 0x13, 0xd4, 0x63, 0xc7, 0x47, 0x16, 0x5b, 0x7e, 0x24, 0x7c, 0x2a, 0x1b, 0xb9,
0xe2, 0x18, 0xbb, 0xcf, 0x14, 0x68, 0xf6, 0xd0, 0xce, 0x59, 0xcc, 0xa6, 0x29, 0x13, 0xd3, 0xf8,
0xd2, 0xfd, 0xa1, 0x15, 0x04, 0xe8, 0x25, 0xe0, 0x4d, 0x19, 0xb9, 0x82, 0x5c, 0x87, 0x7a, 0x3f,
0xf4, 0x3c, 0x33, 0xb0, 0x7c, 0x14, 0xcb, 0xd7, 0x8d, 0x1a, 0x57, 0x3c, 0xb6, 0x7c, 0x24, 0xb7,
0x61, 0x3a, 0xb2, 0x68, 0xec, 0xc4, 0x4e, 0x18, 0x98, 0xb1, 0x65, 0xb7, 0xcb, 0x62, 0x42, 0x33,
0x53, 0x6e, 0x5a, 0xb6, 0xfe, 0x5c, 0x01, 0xf2, 0x1f, 0xc6, 0x1c, 0x3b, 0x28, 0x38, 0xf3, 0x56,
0x81, 0x7f, 0x04, 0x33, 0x11, 0x52, 0x33, 0x71, 0xdb, 0xa4, 0xb8, 0xd3, 0x56, 0x17, 0xd5, 0xa5,
0xc6, 0xea, 0xed, 0x63, 0xfe, 0x9f, 0x74, 0xc5, 0x98, 0x8e, 0x90, 0xae, 0xc9, 0x5f, 0x0d, 0xdc,
0xd1, 0x3f, 0x51, 0x60, 0x46, 0x7c, 0x97, 0x5e, 0xfb, 0x18, 0x08, 0xe8, 0x18, 0x57, 0x25, 0xce,
0x4a, 0xe1, 0x14, 0xe8, 0x8e, 0x64, 0xa5, 0x08, 0x68, 0xf9, 0x34, 0x40, 0xa7, 0x8e, 0x00, 0xf4,
0xa5, 0x02, 0x73, 0x05, 0x40, 0xcf, 0x2e, 0xb1, 0xee, 0xc0, 0x0c, 0x3e, 0x8d, 0x1c, 0x8a, 0xe6,
0x60, 0x44, 0x2d, 0xee, 0x80, 0x08, 0xa6, 0x6c, 0xb4, 0xa4, 0xba, 0x93, 0x68, 0xc9, 0x13, 0xb8,
0x3c, 0x49, 0x80, 0x95, 0x21, 0xd7, 0x2e, 0x0b, 0x1e, 0xfe, 0x72, 0x12, 0x0f, 0x39, 0xce, 0xc6,
0x7c, 0x4e, 0x45, 0xae, 0xd5, 0xbf, 0x57, 0xe0, 0xca, 0x1a, 0x45, 0x2b, 0xc6, 0xb5, 0xd0, 0xf3,
0xb0, 0xcf, 0x4d, 0xa6, 0x79, 0x74, 0x1f, 0x6a, 0x3e, 0xb3, 0xcd, 0x78, 0x1c, 0xa1, 0x88, 0xbb,
0xb5, 0x7a, 0xe3, 0x18, 0x5b, 0xeb, 0xcc, 0xde, 0x1c, 0x47, 0x68, 0x54, 0x7d, 0x39, 0xe0, 0x04,
0x51, 0xdc, 0xc9, 0x4a, 0x86, 0x14, 0x8a, 0x88, 0xa8, 0x07, 0x11, 0x69, 0x43, 0x35, 0xa2, 0xe1,
0xd3, 0x71, 0xb7, 0x23, 0xc8, 0x53, 0x8d, 0x54, 0x24, 0xff, 0x80, 0x0a, 0xeb, 0x0f, 0xd1, 0xb7,
0x04, 0x69, 0x8d, 0xd5, 0xab, 0x47, 0xc2, 0xff, 0xc0, 0x0b, 0xb7, 0x8c, 0x64, 0x22, 0x67, 0x72,
0xa1, 0x43, 0xc3, 0xe8, 0x1c, 0x47, 0xb5, 0x0e, 0x33, 0xfd, 0xcc, 0x3b, 0x99, 0xb4, 0x32, 0xbc,
0x3f, 0x17, 0xfd, 0x49, 0x0e, 0x90, 0xe5, 0x3c, 0x14, 0x9e, 0xd0, 0x46, 0xab, 0x5f, 0x90, 0xf5,
0x9f, 0x14, 0x98, 0x7f, 0x68, 0xb1, 0x8b, 0x13, 0xf0, 0x2f, 0x0a, 0x5c, 0xed, 0x20, 0xeb, 0x53,
0x67, 0x0b, 0x2f, 0x4e, 0xd4, 0x9f, 0x2a, 0xb0, 0xd0, 0x1b, 0x86, 0x7b, 0xe7, 0x37, 0x62, 0xfd,
0x85, 0x02, 0x97, 0x65, 0x4d, 0xd9, 0x48, 0x8b, 0xeb, 0xb9, 0x63, 0xe5, 0x7f, 0xd0, 0xca, 0x8f,
0x83, 0x09, 0x52, 0x6e, 0x1f, 0x4d, 0x4a, 0x16, 0x88, 0xe0, 0x24, 0x3f, 0x49, 0x04, 0x25, 0x3f,
0x2a, 0x30, 0xcf, 0x6b, 0xcd, 0xc5, 0x88, 0xf6, 0x07, 0x05, 0xe6, 0x1e, 0x5a, 0xec, 0x62, 0x04,
0xfb, 0x52, 0x81, 0x76, 0x5a, 0x63, 0x2e, 0x46, 0xc4, 0xfc, 0x18, 0xe1, 0xf5, 0xe5, 0xfc, 0x46,
0xfb, 0x96, 0x0b, 0xea, 0xcf, 0x25, 0x98, 0xee, 0x06, 0x0c, 0x69, 0x7c, 0x66, 0x91, 0xde, 0x39,
0xec, 0xb1, 0xec, 0xf7, 0x0f, 0xf8, 0xf2, 0x5a, 0x5d, 0x3f, 0xc7, 0x8d, 0xa1, 0xcd, 0xbb, 0xb7,
0x6e, 0x47, 0x44, 0xae, 0x1a, 0xb9, 0xa2, 0xd8, 0x38, 0x57, 0xe4, 0xd7, 0xbc, 0x71, 0x9e, 0x40,
0xb5, 0x5a, 0x44, 0xf5, 0x06, 0x40, 0x06, 0x3e, 0x6b, 0xd7, 0x16, 0xd5, 0xa5, 0xb2, 0x31, 0xa1,
0xe1, 0x97, 0x0a, 0x1a, 0xee, 0x75, 0x3b, 0xac, 0x5d, 0x5f, 0x54, 0xf9, 0xa5, 0x42, 0x4a, 0xe4,
0x9f, 0x50, 0xa3, 0xe1, 0x9e, 0x39, 0xb0, 0x62, 0xab, 0x0d, 0xa2, 0x21, 0x3d, 0xa1, 0x3b, 0xab,
0xd2, 0x70, 0xaf, 0x63, 0xc5, 0x96, 0xfe, 0xac, 0x04, 0xd3, 0x1d, 0xf4, 0x30, 0xc6, 0x3f, 0x1e,
0xf4, 0x02, 0x62, 0xe5, 0x13, 0x10, 0x9b, 0x3a, 0x09, 0xb1, 0xca, 0x21, 0xc4, 0x6e, 0x41, 0x33,
0xa2, 0x8e, 0x6f, 0xd1, 0xb1, 0xe9, 0xe2, 0x98, 0xb5, 0xab, 0x02, 0xb7, 0x46, 0xa2, 0x7b, 0x84,
0x63, 0xa6, 0xbf, 0x52, 0x60, 0xba, 0x87, 0x16, 0xed, 0x0f, 0xcf, 0x0c, 0x86, 0x09, 0xff, 0xd5,
0xa2, 0xff, 0x85, 0xfd, 0x57, 0x3e, 0xb8, 0xff, 0xfe, 0x0a, 0x1a, 0x45, 0x36, 0xf2, 0x62, 0x33,
0x07, 0x47, 0x02, 0x30, 0x23, 0xf5, 0x6b, 0x19, 0x44, 0x2b, 0x30, 0xb5, 0x33, 0x42, 0x3a, 0x16,
0xe9, 0x76, 0x22, 0xff, 0x72, 0x9e, 0xfe, 0x9d, 0x02, 0x5a, 0x6f, 0xcc, 0xd6, 0xc2, 0x60, 0xdb,
0xb1, 0xcf, 0x5d, 0xe4, 0x04, 0xca, 0x82, 0xaf, 0xa9, 0x45, 0x75, 0xa9, 0x6e, 0x88, 0x31, 0xe7,
0xd2, 0xc5, 0xb1, 0x19, 0x51, 0xdc, 0x76, 0x9e, 0xa2, 0x64, 0xbb, 0x6e, 0x34, 0x5c, 0x1c, 0x6f,
0x24, 0x2a, 0xfd, 0x79, 0x09, 0x9a, 0x29, 0x97, 0x1c, 0x9f, 0x37, 0x09, 0x28, 0xbf, 0x6f, 0x96,
0x5e, 0xff, 0xbe, 0x99, 0xa1, 0xa0, 0x1e, 0x83, 0xc2, 0x81, 0x3a, 0x7a, 0x0b, 0x9a, 0x82, 0x0e,
0x33, 0x08, 0x07, 0x98, 0xb1, 0xdb, 0x10, 0xba, 0xc7, 0x42, 0x55, 0x04, 0xaa, 0xf2, 0x3a, 0x29,
0x52, 0x3d, 0x3a, 0x45, 0x08, 0x94, 0x87, 0x4e, 0x2c, 0xeb, 0x4a, 0xd3, 0x10, 0x63, 0xfd, 0x7d,
0x68, 0x6c, 0x3a, 0x3e, 0x6e, 0x3a, 0x7d, 0x77, 0x9d, 0xd9, 0x6f, 0x02, 0x57, 0xfe, 0xe0, 0x51,
0x2a, 0x3c, 0x78, 0x9c, 0x78, 0xc2, 0xe8, 0x1f, 0x2b, 0x50, 0x7d, 0x84, 0xe3, 0xd5, 0x1e, 0xda,
0x02, 0x3b, 0x5e, 0xcf, 0xd2, 0x47, 0x08, 0x21, 0x90, 0x9b, 0xd0, 0x98, 0xd8, 0xc1, 0xc9, 0xe2,
0x90, 0x6f, 0xe0, 0x53, 0x8e, 0xb0, 0xab, 0x50, 0x73, 0x98, 0xb9, 0x6b, 0x79, 0xce, 0x40, 0x60,
0x5f, 0x33, 0xaa, 0x0e, 0x7b, 0x87, 0x8b, 0xbc, 0x76, 0x64, 0x25, 0x5b, 0x66, 0x9a, 0x6a, 0x4c,
0x68, 0xf4, 0x27, 0x00, 0x89, 0x6b, 0x1c, 0x9a, 0x8c, 0x59, 0x65, 0x92, 0xd9, 0x7f, 0x41, 0xd5,
0xc5, 0xf1, 0x2a, 0x43, 0xbb, 0x5d, 0x12, 0x85, 0xf7, 0x38, 0xbc, 0x92, 0x95, 0x8c, 0x74, 0xba,
0xfe, 0x91, 0x7c, 0xbe, 0xe2, 0xc6, 0x78, 0x0e, 0xb1, 0xe2, 0x91, 0xa2, 0x1c, 0x3c, 0x52, 0x6e,
0x42, 0xc3, 0x47, 0x3f, 0xa4, 0x63, 0x93, 0x39, 0xfb, 0x98, 0xc2, 0x20, 0x55, 0x3d, 0x67, 0x1f,
0x79, 0xa0, 0xc1, 0xc8, 0x37, 0x69, 0xb8, 0xc7, 0xd2, 0xad, 0x16, 0x8c, 0x7c, 0x23, 0xdc, 0x63,
0xe4, 0x6f, 0x30, 0x4b, 0xb1, 0x8f, 0x41, 0xec, 0x8d, 0x4d, 0x3f, 0x1c, 0x38, 0xdb, 0x0e, 0xa6,
0x60, 0x68, 0xe9, 0x87, 0xf5, 0x44, 0xaf, 0x7f, 0xa6, 0xc0, 0xec, 0xff, 0xd3, 0xf4, 0xeb, 0xa1,
0x2d, 0x9d, 0x3b, 0x83, 0xc4, 0xf8, 0xb7, 0x88, 0xd7, 0xe4, 0x1b, 0x87, 0x9d, 0xfe, 0x9c, 0x95,
0xe1, 0x64, 0xd4, 0x58, 0xe2, 0xd4, 0xdd, 0x17, 0x25, 0xa8, 0x26, 0xe6, 0x48, 0x1d, 0xa6, 0xdc,
0xc7, 0x61, 0x80, 0xda, 0x25, 0xb2, 0x00, 0xb3, 0xee, 0xc1, 0xe7, 0x14, 0x6d, 0x40, 0xe6, 0x60,
0xc6, 0x2d, 0xbe, 0x46, 0x68, 0x48, 0x08, 0xb4, 0xdc, 0xc2, 0x85, 0x5d, 0xdb, 0x26, 0x57, 0x60,
0xce, 0x3d, 0x7c, 0xa7, 0xd5, 0x78, 0x0a, 0x68, 0x6e, 0xf1, 0xda, 0xc7, 0xb4, 0xa1, 0x58, 0xe2,
0xbf, 0x18, 0x67, 0xb5, 0x94, 0x69, 0x0e, 0x59, 0x00, 0xcd, 0x3d, 0x70, 0xfb, 0xd2, 0xbe, 0x54,
0xc8, 0x1c, 0xb4, 0xdc, 0xc2, 0x25, 0x45, 0xfb, 0x4a, 0x21, 0x04, 0xa6, 0xdd, 0xc9, 0x5e, 0x5e,
0xfb, 0x5a, 0x21, 0x57, 0x80, 0xb8, 0x87, 0x5a, 0x5e, 0xed, 0x1b, 0x85, 0xcc, 0xc3, 0x8c, 0x5b,
0xe8, 0x0c, 0x99, 0xf6, 0xad, 0x42, 0x9a, 0x50, 0x75, 0x65, 0xfb, 0xa4, 0x7d, 0xa0, 0x0a, 0x49,
0x9e, 0xeb, 0xda, 0x87, 0x52, 0x92, 0x25, 0x51, 0x7b, 0xa5, 0x0a, 0x63, 0x93, 0x05, 0x52, 0xfb,
0x55, 0x25, 0x2d, 0xa8, 0xbb, 0x69, 0x11, 0xd0, 0x3e, 0xaf, 0x0b, 0xe3, 0x87, 0x32, 0x40, 0xfb,
0xa2, 0x7e, 0xf7, 0x3e, 0xd4, 0xd2, 0x87, 0x49, 0x02, 0x50, 0x59, 0xb7, 0x58, 0x8c, 0x54, 0xbb,
0xc4, 0xc7, 0x06, 0x5a, 0x03, 0xa4, 0x9a, 0xc2, 0xc7, 0xef, 0x52, 0x87, 0xeb, 0x4b, 0x9c, 0x93,
0x0d, 0x5e, 0xe7, 0x34, 0xf5, 0x41, 0xe7, 0xbd, 0x07, 0xb6, 0x13, 0x0f, 0x47, 0x5b, 0xbc, 0x6e,
0xae, 0xec, 0x3b, 0x9e, 0xe7, 0xec, 0xc7, 0xd8, 0x1f, 0xae, 0x48, 0xc2, 0xff, 0x3e, 0x70, 0x58,
0x4c, 0x9d, 0xad, 0x51, 0x8c, 0x83, 0x95, 0x94, 0xf6, 0x15, 0x91, 0x05, 0x99, 0x18, 0x6d, 0x6d,
0x55, 0x84, 0xe6, 0xde, 0x6f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x9e, 0x9f, 0x3d, 0x09, 0x18,
0x00, 0x00,
}

View File

@ -89,6 +89,15 @@ service Master {
rpc ShowPartitions(internal.ShowPartitionRequest) returns (service.StringListResponse) {}
/**
* @brief This method is used to get system configs
*
* @param SysConfigRequest, keys or key_prefixes of the configs.
*
* @return SysConfigResponse
*/
rpc GetSysConfigs(internal.SysConfigRequest) returns (service.SysConfigResponse) {}
rpc AllocTimestamp(internal.TsoRequest) returns (internal.TsoResponse) {}
rpc AllocID(internal.IDRequest) returns (internal.IDResponse) {}

View File

@ -30,36 +30,38 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
func init() { proto.RegisterFile("master.proto", fileDescriptor_f9c348dec43a6705) }
var fileDescriptor_f9c348dec43a6705 = []byte{
// 458 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x41, 0x6f, 0xd3, 0x30,
0x14, 0xc7, 0x7b, 0x1a, 0x92, 0xd5, 0xb5, 0xcc, 0xdc, 0xca, 0x85, 0xf5, 0x04, 0x2d, 0x4b, 0x10,
0x7c, 0x01, 0xd6, 0xe5, 0xb0, 0x4a, 0x20, 0x4d, 0xeb, 0x2e, 0x80, 0xd0, 0x70, 0xb2, 0xa7, 0xf4,
0x81, 0x13, 0x07, 0xbf, 0x97, 0x21, 0xed, 0x23, 0xf1, 0x29, 0x51, 0x93, 0x26, 0xa9, 0x69, 0x5d,
0xca, 0x6e, 0xb5, 0xfd, 0xf3, 0xef, 0x5f, 0xbf, 0xf7, 0x14, 0xd1, 0xcf, 0x14, 0x31, 0xd8, 0xa0,
0xb0, 0x86, 0x8d, 0x7c, 0x96, 0xa1, 0xbe, 0x2f, 0xa9, 0x5e, 0x05, 0xf5, 0xd1, 0xa8, 0x9f, 0x98,
0x2c, 0x33, 0x79, 0xbd, 0x39, 0x92, 0x98, 0x33, 0xd8, 0x5c, 0xe9, 0xdb, 0x8c, 0xd2, 0xf5, 0xde,
0x09, 0x81, 0xbd, 0xc7, 0x04, 0xba, 0xad, 0xb7, 0xbf, 0x85, 0x38, 0xfa, 0x58, 0xdd, 0x97, 0x4a,
0x3c, 0xbd, 0xb0, 0xa0, 0x18, 0x2e, 0x8c, 0xd6, 0x90, 0x30, 0x9a, 0x5c, 0x06, 0x81, 0x93, 0xd4,
0x38, 0x83, 0xbf, 0xc1, 0x6b, 0xf8, 0x59, 0x02, 0xf1, 0xe8, 0xb9, 0xcb, 0xaf, 0xff, 0xd1, 0x82,
0x15, 0x97, 0x34, 0xee, 0xc9, 0xaf, 0x62, 0x10, 0x59, 0x53, 0x6c, 0x04, 0xbc, 0xf6, 0x04, 0xb8,
0xd8, 0x81, 0xfa, 0x58, 0x1c, 0x5f, 0x2a, 0xda, 0xb0, 0x4f, 0x3d, 0x76, 0x87, 0x6a, 0xe4, 0x63,
0x17, 0x5e, 0xd7, 0x2a, 0x98, 0x19, 0xa3, 0xaf, 0x81, 0x0a, 0x93, 0x13, 0x8c, 0x7b, 0xb2, 0x14,
0x32, 0x02, 0x4a, 0x2c, 0xc6, 0x9b, 0x75, 0x7a, 0xe3, 0x7b, 0xc6, 0x16, 0xda, 0xa4, 0x4d, 0x77,
0xa7, 0x75, 0x60, 0x7d, 0xb5, 0x58, 0xfd, 0x1c, 0xf7, 0xe4, 0x0f, 0x31, 0x5c, 0x2c, 0xcd, 0xaf,
0xee, 0x98, 0xbc, 0xa5, 0x73, 0xb9, 0x26, 0xef, 0xe5, 0xee, 0xbc, 0x05, 0x5b, 0xcc, 0xd3, 0x0f,
0x48, 0xbc, 0xf1, 0xc6, 0x5b, 0x31, 0xac, 0x1b, 0x7c, 0xa5, 0x2c, 0x63, 0xf5, 0xc0, 0xb3, 0xbd,
0x83, 0xd0, 0x72, 0x07, 0x36, 0xea, 0x8b, 0x38, 0x5e, 0x35, 0xb8, 0xd3, 0x4f, 0xf7, 0x8c, 0xc1,
0xff, 0xca, 0xbf, 0x89, 0xfe, 0xa5, 0xa2, 0xce, 0x3d, 0xf1, 0x0f, 0xc1, 0x96, 0xfa, 0xb0, 0x19,
0xb0, 0xe2, 0xa4, 0x69, 0x6c, 0x17, 0x13, 0xfe, 0x63, 0x04, 0xb6, 0xb2, 0x26, 0xbb, 0xb3, 0x5a,
0xce, 0x1d, 0x00, 0x14, 0x83, 0x55, 0x63, 0xdb, 0x53, 0xf2, 0xd6, 0xcc, 0xc1, 0x1e, 0xd3, 0xfe,
0x4f, 0x62, 0x70, 0xae, 0xb5, 0x49, 0x6e, 0x30, 0x03, 0x62, 0x95, 0x15, 0xf2, 0xd4, 0x13, 0x75,
0x43, 0xc6, 0x53, 0x39, 0x17, 0x69, 0xd5, 0x57, 0xe2, 0x49, 0xa5, 0x9e, 0x47, 0xf2, 0x85, 0xe7,
0xc2, 0x3c, 0x6a, 0x94, 0xa7, 0x7b, 0x88, 0xd6, 0xf8, 0x5d, 0x0c, 0xcf, 0x89, 0x30, 0xcd, 0x17,
0x90, 0x66, 0x90, 0xf3, 0x3c, 0x92, 0xaf, 0x3c, 0xf7, 0x5a, 0xae, 0x8b, 0x98, 0x1c, 0x82, 0x36,
0x59, 0xb3, 0xd9, 0xe7, 0xf7, 0x29, 0xf2, 0xb2, 0x8c, 0x57, 0x33, 0x17, 0x3e, 0xa0, 0xd6, 0xf8,
0xc0, 0x90, 0x2c, 0xc3, 0x5a, 0x72, 0x76, 0x87, 0xc4, 0x16, 0xe3, 0x92, 0xe1, 0x2e, 0x6c, 0x54,
0x61, 0x65, 0x0e, 0xeb, 0x6f, 0x74, 0x11, 0xc7, 0x47, 0xd5, 0xfa, 0xdd, 0x9f, 0x00, 0x00, 0x00,
0xff, 0xff, 0xa0, 0xb5, 0xeb, 0xf6, 0xd1, 0x05, 0x00, 0x00,
// 484 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x95, 0xdf, 0x6e, 0xd3, 0x30,
0x14, 0x87, 0x7b, 0x35, 0x24, 0xd3, 0x3f, 0xcc, 0xdc, 0x95, 0x1b, 0xd6, 0x9b, 0x41, 0xcb, 0x12,
0x04, 0x2f, 0xc0, 0xda, 0x48, 0xac, 0x12, 0x48, 0xd3, 0xb2, 0x1b, 0x40, 0x68, 0x24, 0xd9, 0x21,
0x35, 0x24, 0x76, 0xf0, 0x39, 0x19, 0xa2, 0x2f, 0xc1, 0x2b, 0xa3, 0x26, 0x75, 0x52, 0xd3, 0xba,
0x94, 0xdd, 0xd5, 0xf6, 0xe7, 0xdf, 0x57, 0x9f, 0x73, 0xa4, 0xb0, 0x6e, 0x1e, 0x21, 0x81, 0xf6,
0x0a, 0xad, 0x48, 0xf1, 0xc7, 0xb9, 0xc8, 0xee, 0x4a, 0xac, 0x57, 0x5e, 0x7d, 0x34, 0xec, 0x26,
0x2a, 0xcf, 0x95, 0xac, 0x37, 0x87, 0x5c, 0x48, 0x02, 0x2d, 0xa3, 0xec, 0x26, 0xc7, 0x74, 0xbd,
0x77, 0x8c, 0xa0, 0xef, 0x44, 0x02, 0xed, 0xd6, 0xab, 0xdf, 0x0f, 0xd9, 0xd1, 0xfb, 0xea, 0x3e,
0x8f, 0xd8, 0xa3, 0x99, 0x86, 0x88, 0x60, 0xa6, 0xb2, 0x0c, 0x12, 0x12, 0x4a, 0x72, 0xcf, 0xb3,
0x4c, 0x26, 0xd3, 0xfb, 0x1b, 0xbc, 0x82, 0x1f, 0x25, 0x20, 0x0d, 0x9f, 0xd8, 0xfc, 0xfa, 0x1f,
0x85, 0x14, 0x51, 0x89, 0xa3, 0x0e, 0xff, 0xcc, 0xfa, 0x81, 0x56, 0xc5, 0x86, 0xe0, 0x85, 0x43,
0x60, 0x63, 0x07, 0xc6, 0xc7, 0xac, 0x77, 0x11, 0xe1, 0x46, 0xfa, 0xc4, 0x91, 0x6e, 0x51, 0x26,
0x7c, 0x64, 0xc3, 0xeb, 0x5a, 0x79, 0x53, 0xa5, 0xb2, 0x2b, 0xc0, 0x42, 0x49, 0x84, 0x51, 0x87,
0x97, 0x8c, 0x07, 0x80, 0x89, 0x16, 0xf1, 0x66, 0x9d, 0x5e, 0xba, 0x9e, 0xb1, 0x85, 0x1a, 0xdb,
0x64, 0xb7, 0xad, 0x05, 0xeb, 0xab, 0xc5, 0xea, 0xe7, 0xa8, 0xc3, 0xbf, 0xb3, 0x41, 0xb8, 0x50,
0x3f, 0xdb, 0x63, 0x74, 0x96, 0xce, 0xe6, 0x8c, 0xef, 0xd9, 0x6e, 0x5f, 0x48, 0x5a, 0xc8, 0xf4,
0x9d, 0x40, 0xda, 0x78, 0xe3, 0x0d, 0x1b, 0xd4, 0x0d, 0xbe, 0x8c, 0x34, 0x89, 0xea, 0x81, 0x67,
0x7b, 0x07, 0xa1, 0xe1, 0x0e, 0x6c, 0xd4, 0x27, 0xd6, 0x5b, 0x35, 0xb8, 0x8d, 0x9f, 0xec, 0x19,
0x83, 0xff, 0x0d, 0xff, 0xc2, 0xba, 0x17, 0x11, 0xb6, 0xd9, 0x63, 0xf7, 0x10, 0x6c, 0x45, 0x1f,
0x36, 0x03, 0x9a, 0x1d, 0x9b, 0xc6, 0xb6, 0x1a, 0xff, 0x1f, 0x23, 0xb0, 0xe5, 0x1a, 0xef, 0x76,
0x35, 0x9c, 0x3d, 0x00, 0x82, 0xf5, 0x57, 0x8d, 0x6d, 0x4e, 0xd1, 0x59, 0x33, 0x0b, 0xbb, 0x4f,
0xfb, 0x13, 0xd6, 0x7b, 0x0b, 0x14, 0xfe, 0xc2, 0x99, 0x92, 0x5f, 0x45, 0x8a, 0xfc, 0xd4, 0x65,
0x32, 0x88, 0xb1, 0x9c, 0x3a, 0x2c, 0x2d, 0xd7, 0x48, 0x3e, 0xb0, 0xfe, 0x79, 0x96, 0xa9, 0xe4,
0x5a, 0xe4, 0x80, 0x14, 0xe5, 0x05, 0x3f, 0x71, 0x58, 0xae, 0x51, 0x39, 0xda, 0x63, 0x23, 0x4d,
0xf4, 0x25, 0x7b, 0x50, 0x45, 0xcf, 0x03, 0xfe, 0xd4, 0x71, 0x61, 0x1e, 0x98, 0xc8, 0x93, 0x3d,
0x44, 0x93, 0xf8, 0x8d, 0x0d, 0xce, 0x11, 0x45, 0x2a, 0x43, 0x48, 0x73, 0x90, 0x34, 0x0f, 0xf8,
0x73, 0xc7, 0xbd, 0x86, 0x6b, 0x15, 0xe3, 0x43, 0x50, 0xe3, 0x9a, 0x4e, 0x3f, 0xbe, 0x49, 0x05,
0x2d, 0xca, 0x78, 0x35, 0xd8, 0xfe, 0x52, 0x64, 0x99, 0x58, 0x12, 0x24, 0x0b, 0xbf, 0x0e, 0x39,
0xbb, 0x15, 0x48, 0x5a, 0xc4, 0x25, 0xc1, 0xad, 0x6f, 0xa2, 0xfc, 0x2a, 0xd9, 0xaf, 0x3f, 0x04,
0x45, 0x1c, 0x1f, 0x55, 0xeb, 0xd7, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x75, 0x7d, 0xec,
0x36, 0x06, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@ -134,6 +136,13 @@ type MasterClient interface {
//
// @return StringListResponse
ShowPartitions(ctx context.Context, in *internalpb.ShowPartitionRequest, opts ...grpc.CallOption) (*servicepb.StringListResponse, error)
//*
// @brief This method is used to get system configs
//
// @param SysConfigRequest, keys or key_prefixes of the configs.
//
// @return SysConfigResponse
GetSysConfigs(ctx context.Context, in *internalpb.SysConfigRequest, opts ...grpc.CallOption) (*servicepb.SysConfigResponse, error)
AllocTimestamp(ctx context.Context, in *internalpb.TsoRequest, opts ...grpc.CallOption) (*internalpb.TsoResponse, error)
AllocID(ctx context.Context, in *internalpb.IDRequest, opts ...grpc.CallOption) (*internalpb.IDResponse, error)
AssignSegmentID(ctx context.Context, in *internalpb.AssignSegIDRequest, opts ...grpc.CallOption) (*internalpb.AssignSegIDResponse, error)
@ -237,6 +246,15 @@ func (c *masterClient) ShowPartitions(ctx context.Context, in *internalpb.ShowPa
return out, nil
}
func (c *masterClient) GetSysConfigs(ctx context.Context, in *internalpb.SysConfigRequest, opts ...grpc.CallOption) (*servicepb.SysConfigResponse, error) {
out := new(servicepb.SysConfigResponse)
err := c.cc.Invoke(ctx, "/milvus.proto.master.Master/GetSysConfigs", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *masterClient) AllocTimestamp(ctx context.Context, in *internalpb.TsoRequest, opts ...grpc.CallOption) (*internalpb.TsoResponse, error) {
out := new(internalpb.TsoResponse)
err := c.cc.Invoke(ctx, "/milvus.proto.master.Master/AllocTimestamp", in, out, opts...)
@ -326,6 +344,13 @@ type MasterServer interface {
//
// @return StringListResponse
ShowPartitions(context.Context, *internalpb.ShowPartitionRequest) (*servicepb.StringListResponse, error)
//*
// @brief This method is used to get system configs
//
// @param SysConfigRequest, keys or key_prefixes of the configs.
//
// @return SysConfigResponse
GetSysConfigs(context.Context, *internalpb.SysConfigRequest) (*servicepb.SysConfigResponse, error)
AllocTimestamp(context.Context, *internalpb.TsoRequest) (*internalpb.TsoResponse, error)
AllocID(context.Context, *internalpb.IDRequest) (*internalpb.IDResponse, error)
AssignSegmentID(context.Context, *internalpb.AssignSegIDRequest) (*internalpb.AssignSegIDResponse, error)
@ -365,6 +390,9 @@ func (*UnimplementedMasterServer) DescribePartition(ctx context.Context, req *in
func (*UnimplementedMasterServer) ShowPartitions(ctx context.Context, req *internalpb.ShowPartitionRequest) (*servicepb.StringListResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ShowPartitions not implemented")
}
func (*UnimplementedMasterServer) GetSysConfigs(ctx context.Context, req *internalpb.SysConfigRequest) (*servicepb.SysConfigResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetSysConfigs not implemented")
}
func (*UnimplementedMasterServer) AllocTimestamp(ctx context.Context, req *internalpb.TsoRequest) (*internalpb.TsoResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method AllocTimestamp not implemented")
}
@ -559,6 +587,24 @@ func _Master_ShowPartitions_Handler(srv interface{}, ctx context.Context, dec fu
return interceptor(ctx, in, info, handler)
}
func _Master_GetSysConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(internalpb.SysConfigRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MasterServer).GetSysConfigs(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/milvus.proto.master.Master/GetSysConfigs",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MasterServer).GetSysConfigs(ctx, req.(*internalpb.SysConfigRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Master_AllocTimestamp_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(internalpb.TsoRequest)
if err := dec(in); err != nil {
@ -657,6 +703,10 @@ var _Master_serviceDesc = grpc.ServiceDesc{
MethodName: "ShowPartitions",
Handler: _Master_ShowPartitions_Handler,
},
{
MethodName: "GetSysConfigs",
Handler: _Master_GetSysConfigs_Handler,
},
{
MethodName: "AllocTimestamp",
Handler: _Master_AllocTimestamp_Handler,

View File

@ -135,6 +135,14 @@ message PartitionDescription {
repeated common.KeyValuePair statistics = 3;
}
/**
* @brief Response of GetSysConfig
*/
message SysConfigResponse {
common.Status status = 1;
repeated string keys = 2;
repeated string values = 3;
}
/**
* @brief Entities hit by query

View File

@ -737,6 +737,63 @@ func (m *PartitionDescription) GetStatistics() []*commonpb.KeyValuePair {
return nil
}
//*
// @brief Response of GetSysConfig
type SysConfigResponse struct {
Status *commonpb.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"`
Keys []string `protobuf:"bytes,2,rep,name=keys,proto3" json:"keys,omitempty"`
Values []string `protobuf:"bytes,3,rep,name=values,proto3" json:"values,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SysConfigResponse) Reset() { *m = SysConfigResponse{} }
func (m *SysConfigResponse) String() string { return proto.CompactTextString(m) }
func (*SysConfigResponse) ProtoMessage() {}
func (*SysConfigResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_b4b40b84dd2f74cb, []int{13}
}
func (m *SysConfigResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SysConfigResponse.Unmarshal(m, b)
}
func (m *SysConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SysConfigResponse.Marshal(b, m, deterministic)
}
func (m *SysConfigResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_SysConfigResponse.Merge(m, src)
}
func (m *SysConfigResponse) XXX_Size() int {
return xxx_messageInfo_SysConfigResponse.Size(m)
}
func (m *SysConfigResponse) XXX_DiscardUnknown() {
xxx_messageInfo_SysConfigResponse.DiscardUnknown(m)
}
var xxx_messageInfo_SysConfigResponse proto.InternalMessageInfo
func (m *SysConfigResponse) GetStatus() *commonpb.Status {
if m != nil {
return m.Status
}
return nil
}
func (m *SysConfigResponse) GetKeys() []string {
if m != nil {
return m.Keys
}
return nil
}
func (m *SysConfigResponse) GetValues() []string {
if m != nil {
return m.Values
}
return nil
}
//*
// @brief Entities hit by query
type Hits struct {
@ -752,7 +809,7 @@ func (m *Hits) Reset() { *m = Hits{} }
func (m *Hits) String() string { return proto.CompactTextString(m) }
func (*Hits) ProtoMessage() {}
func (*Hits) Descriptor() ([]byte, []int) {
return fileDescriptor_b4b40b84dd2f74cb, []int{13}
return fileDescriptor_b4b40b84dd2f74cb, []int{14}
}
func (m *Hits) XXX_Unmarshal(b []byte) error {
@ -808,7 +865,7 @@ func (m *QueryResult) Reset() { *m = QueryResult{} }
func (m *QueryResult) String() string { return proto.CompactTextString(m) }
func (*QueryResult) ProtoMessage() {}
func (*QueryResult) Descriptor() ([]byte, []int) {
return fileDescriptor_b4b40b84dd2f74cb, []int{14}
return fileDescriptor_b4b40b84dd2f74cb, []int{15}
}
func (m *QueryResult) XXX_Unmarshal(b []byte) error {
@ -858,6 +915,7 @@ func init() {
proto.RegisterType((*IntegerRangeResponse)(nil), "milvus.proto.service.IntegerRangeResponse")
proto.RegisterType((*CollectionDescription)(nil), "milvus.proto.service.CollectionDescription")
proto.RegisterType((*PartitionDescription)(nil), "milvus.proto.service.PartitionDescription")
proto.RegisterType((*SysConfigResponse)(nil), "milvus.proto.service.SysConfigResponse")
proto.RegisterType((*Hits)(nil), "milvus.proto.service.Hits")
proto.RegisterType((*QueryResult)(nil), "milvus.proto.service.QueryResult")
}
@ -865,52 +923,53 @@ func init() {
func init() { proto.RegisterFile("service_msg.proto", fileDescriptor_b4b40b84dd2f74cb) }
var fileDescriptor_b4b40b84dd2f74cb = []byte{
// 739 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xdd, 0x4e, 0xdb, 0x4a,
0x10, 0x3e, 0x8e, 0x43, 0x4e, 0x98, 0x38, 0x21, 0xec, 0xc9, 0x41, 0x06, 0x6e, 0x72, 0x8c, 0x38,
0x8d, 0x5a, 0x35, 0x91, 0xa0, 0x52, 0xc5, 0x45, 0xa5, 0x26, 0x40, 0x5b, 0x7e, 0x14, 0xe8, 0x12,
0x21, 0xd1, 0x4a, 0x8d, 0x36, 0xf6, 0xca, 0x5e, 0xd5, 0xf1, 0x5a, 0xde, 0x35, 0x51, 0x78, 0x90,
0xbe, 0x44, 0x1f, 0xa4, 0x77, 0x7d, 0xa6, 0xca, 0x6b, 0x93, 0x1f, 0x4a, 0x55, 0x0a, 0xdc, 0xcd,
0xcc, 0xee, 0xcc, 0x37, 0xbf, 0x1f, 0x2c, 0x0b, 0x1a, 0x5d, 0x32, 0x9b, 0xf6, 0x87, 0xc2, 0x6d,
0x86, 0x11, 0x97, 0x1c, 0xd5, 0x86, 0xcc, 0xbf, 0x8c, 0x45, 0xaa, 0x35, 0xb3, 0xf7, 0x35, 0xc3,
0xe6, 0xc3, 0x21, 0x0f, 0x52, 0xeb, 0x9a, 0x21, 0x6c, 0x8f, 0x0e, 0x49, 0xaa, 0x59, 0x3b, 0x50,
0xd9, 0xe5, 0xbe, 0x4f, 0x6d, 0xc9, 0x78, 0xd0, 0x25, 0x43, 0x8a, 0x9e, 0xc0, 0x92, 0x3d, 0xb1,
0xf4, 0x03, 0x32, 0xa4, 0xa6, 0x56, 0xd7, 0x1a, 0x8b, 0xb8, 0x62, 0xcf, 0x7d, 0xb4, 0x0e, 0xa1,
0x7c, 0x4a, 0x22, 0xc9, 0xfe, 0xd8, 0x13, 0x55, 0x41, 0x97, 0xc4, 0x35, 0x73, 0xea, 0x31, 0x11,
0xad, 0xaf, 0x1a, 0x14, 0x31, 0x1f, 0x75, 0x88, 0xb4, 0xbd, 0xbb, 0xc7, 0xd9, 0x80, 0x72, 0x78,
0x9d, 0x41, 0x7f, 0x1a, 0xd1, 0x98, 0x18, 0x7b, 0xc4, 0x45, 0x2f, 0xa0, 0x18, 0xf1, 0x51, 0xdf,
0x21, 0x92, 0x98, 0x7a, 0x5d, 0x6f, 0x94, 0xb6, 0x56, 0x9b, 0x73, 0x6d, 0xca, 0xba, 0xd3, 0xf1,
0xf9, 0x00, 0xff, 0x1d, 0xf1, 0xd1, 0x1e, 0x91, 0x04, 0xad, 0xc3, 0xa2, 0x47, 0x84, 0xd7, 0xff,
0x4c, 0xc7, 0xc2, 0xcc, 0xd7, 0xf5, 0x46, 0x19, 0x17, 0x13, 0xc3, 0x11, 0x1d, 0x0b, 0x6b, 0x04,
0xd5, 0x53, 0x9f, 0xd8, 0xd4, 0xe3, 0xbe, 0x43, 0xa3, 0x73, 0xe2, 0xc7, 0x93, 0x9a, 0xb4, 0x49,
0x4d, 0x68, 0x07, 0xf2, 0x72, 0x1c, 0x52, 0x95, 0x54, 0x65, 0x6b, 0xb3, 0x79, 0xdb, 0x6c, 0x9a,
0x33, 0x71, 0x7a, 0xe3, 0x90, 0x62, 0xe5, 0x82, 0x56, 0xa0, 0x70, 0x99, 0x44, 0x15, 0x2a, 0x63,
0x03, 0x67, 0x9a, 0xf5, 0x69, 0x0e, 0xf8, 0x6d, 0xc4, 0xe3, 0x10, 0x1d, 0x82, 0x11, 0x4e, 0x6d,
0xc2, 0xd4, 0x54, 0x8d, 0xff, 0xff, 0x16, 0x4e, 0xa5, 0x8d, 0xe7, 0x7c, 0xad, 0x2f, 0x1a, 0x2c,
0xbc, 0x8f, 0x69, 0x34, 0xbe, 0xfb, 0x0c, 0x36, 0xa1, 0x32, 0x37, 0x03, 0x61, 0xe6, 0xea, 0x7a,
0x63, 0x11, 0x97, 0x67, 0x87, 0x20, 0x92, 0xf6, 0x38, 0xc2, 0x37, 0xf5, 0xb4, 0x3d, 0x8e, 0xf0,
0xd1, 0x33, 0x58, 0x9e, 0xc1, 0xee, 0xbb, 0x49, 0x31, 0x66, 0xbe, 0xae, 0x35, 0x0c, 0x5c, 0x0d,
0x6f, 0x14, 0x69, 0x7d, 0x84, 0xca, 0x99, 0x8c, 0x58, 0xe0, 0x62, 0x2a, 0x42, 0x1e, 0x08, 0x8a,
0xb6, 0xa1, 0x20, 0x24, 0x91, 0xb1, 0x50, 0x79, 0x95, 0xb6, 0xd6, 0x6f, 0x1d, 0xea, 0x99, 0xfa,
0x82, 0xb3, 0xaf, 0xa8, 0x06, 0x0b, 0xaa, 0x93, 0xd9, 0xa2, 0xa4, 0x8a, 0x75, 0x01, 0x46, 0x87,
0x73, 0xff, 0x11, 0x43, 0x17, 0xaf, 0x43, 0x13, 0x40, 0x69, 0xde, 0xc7, 0x4c, 0xc8, 0x87, 0x01,
0x4c, 0x77, 0x22, 0x6d, 0xf0, 0xf5, 0x4e, 0x0c, 0xe0, 0x9f, 0x83, 0x40, 0x52, 0x97, 0x46, 0x8f,
0x8d, 0xa1, 0x4f, 0x30, 0x04, 0xd4, 0x32, 0x0c, 0x4c, 0x02, 0x97, 0x3e, 0xb8, 0x53, 0x03, 0xea,
0xb2, 0x40, 0x75, 0x4a, 0xc7, 0xa9, 0x92, 0x2c, 0x08, 0x0d, 0x1c, 0xb5, 0x20, 0x3a, 0x4e, 0x44,
0xeb, 0xbb, 0x06, 0xff, 0x4e, 0xb9, 0x69, 0x8f, 0x0a, 0x3b, 0x62, 0x61, 0x22, 0xde, 0x0f, 0xf6,
0x15, 0x14, 0x52, 0xe6, 0x53, 0xb8, 0xa5, 0x9f, 0x0e, 0x32, 0x65, 0xc5, 0x29, 0xe0, 0x99, 0x32,
0xe0, 0xcc, 0x09, 0xb5, 0x01, 0x92, 0x40, 0x4c, 0x48, 0x66, 0x8b, 0x8c, 0x48, 0xfe, 0xbb, 0x15,
0xf7, 0x88, 0x8e, 0xd5, 0x6d, 0x9d, 0x12, 0x16, 0xe1, 0x19, 0x27, 0xeb, 0x9b, 0x06, 0xb5, 0x09,
0x63, 0x3e, 0xb8, 0x9e, 0x97, 0x90, 0x57, 0x67, 0x99, 0x56, 0xb3, 0xf1, 0x8b, 0x7b, 0x9f, 0x25,
0x68, 0xac, 0x1c, 0x1e, 0xa3, 0x92, 0x23, 0xc8, 0xbf, 0x63, 0x52, 0x5d, 0xf5, 0xc1, 0x5e, 0x4a,
0x39, 0x3a, 0x4e, 0x44, 0xb4, 0x3a, 0xc3, 0xb6, 0x39, 0xc5, 0x5d, 0x13, 0x4a, 0x5d, 0x49, 0x06,
0xc0, 0xa3, 0x8c, 0xd4, 0x72, 0x38, 0xd3, 0xac, 0x73, 0x28, 0x29, 0xce, 0xc1, 0x54, 0xc4, 0xbe,
0xbc, 0x5f, 0x33, 0x10, 0xe4, 0x3d, 0x26, 0x45, 0x06, 0xa9, 0xe4, 0xa7, 0xaf, 0x61, 0xe9, 0x06,
0xbb, 0xa2, 0x22, 0xe4, 0xbb, 0x27, 0xdd, 0xfd, 0xea, 0x5f, 0x68, 0x19, 0xca, 0xe7, 0xfb, 0xbb,
0xbd, 0x13, 0xdc, 0xef, 0x1c, 0x74, 0xdb, 0xf8, 0xa2, 0xea, 0xa0, 0x2a, 0x18, 0x99, 0xe9, 0xcd,
0xf1, 0x49, 0xbb, 0x57, 0xa5, 0x9d, 0xdd, 0x0f, 0x6d, 0x97, 0x49, 0x2f, 0x1e, 0x24, 0xa8, 0xad,
0x2b, 0xe6, 0xfb, 0xec, 0x4a, 0x52, 0xdb, 0x6b, 0xa5, 0x19, 0x3d, 0x77, 0x98, 0x90, 0x11, 0x1b,
0xc4, 0x92, 0x3a, 0x2d, 0x16, 0x48, 0x1a, 0x05, 0xc4, 0x6f, 0xa9, 0x34, 0x5b, 0xd9, 0x00, 0xc2,
0xc1, 0xa0, 0xa0, 0x0c, 0xdb, 0x3f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xee, 0x08, 0x5d, 0xa4, 0xaf,
0x07, 0x00, 0x00,
// 762 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xdd, 0x4e, 0xe3, 0x46,
0x14, 0xae, 0xe3, 0x90, 0x86, 0x13, 0x27, 0x24, 0xd3, 0x14, 0x19, 0xb8, 0x49, 0x8d, 0x68, 0xa3,
0x56, 0x4d, 0x24, 0xa8, 0x54, 0x71, 0x51, 0xa9, 0x49, 0xa0, 0x2d, 0x3f, 0x0a, 0x74, 0x12, 0x21,
0xd1, 0x4a, 0x8d, 0x26, 0xf6, 0xd4, 0x1e, 0xd5, 0xf1, 0x58, 0x9e, 0x31, 0x51, 0x78, 0x90, 0xbe,
0xc4, 0x3e, 0xc8, 0xde, 0xed, 0x33, 0xad, 0x3c, 0x36, 0xf9, 0x61, 0x59, 0x2d, 0x4b, 0xb8, 0x3b,
0xe7, 0xcc, 0x9c, 0xf3, 0x9d, 0xdf, 0x0f, 0x6a, 0x82, 0x46, 0x77, 0xcc, 0xa6, 0xa3, 0x89, 0x70,
0x5b, 0x61, 0xc4, 0x25, 0x47, 0xf5, 0x09, 0xf3, 0xef, 0x62, 0x91, 0x6a, 0xad, 0xec, 0x7d, 0xd7,
0xb0, 0xf9, 0x64, 0xc2, 0x83, 0xd4, 0xba, 0x6b, 0x08, 0xdb, 0xa3, 0x13, 0x92, 0x6a, 0xd6, 0x31,
0x54, 0x7a, 0xdc, 0xf7, 0xa9, 0x2d, 0x19, 0x0f, 0xfa, 0x64, 0x42, 0xd1, 0x77, 0xb0, 0x65, 0xcf,
0x2d, 0xa3, 0x80, 0x4c, 0xa8, 0xa9, 0x35, 0xb4, 0xe6, 0x26, 0xae, 0xd8, 0x2b, 0x1f, 0xad, 0x73,
0x28, 0x5f, 0x93, 0x48, 0xb2, 0xcf, 0xf6, 0x44, 0x55, 0xd0, 0x25, 0x71, 0xcd, 0x9c, 0x7a, 0x4c,
0x44, 0xeb, 0x8d, 0x06, 0x45, 0xcc, 0xa7, 0x5d, 0x22, 0x6d, 0xef, 0xf9, 0x71, 0xf6, 0xa1, 0x1c,
0x3e, 0x64, 0x30, 0x5a, 0x44, 0x34, 0xe6, 0xc6, 0x21, 0x71, 0xd1, 0x4f, 0x50, 0x8c, 0xf8, 0x74,
0xe4, 0x10, 0x49, 0x4c, 0xbd, 0xa1, 0x37, 0x4b, 0x87, 0x3b, 0xad, 0x95, 0x36, 0x65, 0xdd, 0xe9,
0xfa, 0x7c, 0x8c, 0xbf, 0x8c, 0xf8, 0xf4, 0x84, 0x48, 0x82, 0xf6, 0x60, 0xd3, 0x23, 0xc2, 0x1b,
0xfd, 0x47, 0x67, 0xc2, 0xcc, 0x37, 0xf4, 0x66, 0x19, 0x17, 0x13, 0xc3, 0x05, 0x9d, 0x09, 0x6b,
0x0a, 0xd5, 0x6b, 0x9f, 0xd8, 0xd4, 0xe3, 0xbe, 0x43, 0xa3, 0x1b, 0xe2, 0xc7, 0xf3, 0x9a, 0xb4,
0x79, 0x4d, 0xe8, 0x18, 0xf2, 0x72, 0x16, 0x52, 0x95, 0x54, 0xe5, 0xf0, 0xa0, 0xf5, 0xd4, 0x6c,
0x5a, 0x4b, 0x71, 0x86, 0xb3, 0x90, 0x62, 0xe5, 0x82, 0xb6, 0xa1, 0x70, 0x97, 0x44, 0x15, 0x2a,
0x63, 0x03, 0x67, 0x9a, 0xf5, 0xcf, 0x0a, 0xf0, 0xef, 0x11, 0x8f, 0x43, 0x74, 0x0e, 0x46, 0xb8,
0xb0, 0x09, 0x53, 0x53, 0x35, 0x7e, 0xfb, 0x49, 0x38, 0x95, 0x36, 0x5e, 0xf1, 0xb5, 0xfe, 0xd7,
0x60, 0xe3, 0xcf, 0x98, 0x46, 0xb3, 0xe7, 0xcf, 0xe0, 0x00, 0x2a, 0x2b, 0x33, 0x10, 0x66, 0xae,
0xa1, 0x37, 0x37, 0x71, 0x79, 0x79, 0x08, 0x22, 0x69, 0x8f, 0x23, 0x7c, 0x53, 0x4f, 0xdb, 0xe3,
0x08, 0x1f, 0xfd, 0x00, 0xb5, 0x25, 0xec, 0x91, 0x9b, 0x14, 0x63, 0xe6, 0x1b, 0x5a, 0xd3, 0xc0,
0xd5, 0xf0, 0x51, 0x91, 0xd6, 0xdf, 0x50, 0x19, 0xc8, 0x88, 0x05, 0x2e, 0xa6, 0x22, 0xe4, 0x81,
0xa0, 0xe8, 0x08, 0x0a, 0x42, 0x12, 0x19, 0x0b, 0x95, 0x57, 0xe9, 0x70, 0xef, 0xc9, 0xa1, 0x0e,
0xd4, 0x17, 0x9c, 0x7d, 0x45, 0x75, 0xd8, 0x50, 0x9d, 0xcc, 0x16, 0x25, 0x55, 0xac, 0x5b, 0x30,
0xba, 0x9c, 0xfb, 0xaf, 0x18, 0xba, 0xf8, 0x10, 0x9a, 0x00, 0x4a, 0xf3, 0xbe, 0x64, 0x42, 0xae,
0x07, 0xb0, 0xd8, 0x89, 0xb4, 0xc1, 0x0f, 0x3b, 0x31, 0x86, 0xaf, 0xce, 0x02, 0x49, 0x5d, 0x1a,
0xbd, 0x36, 0x86, 0x3e, 0xc7, 0x10, 0x50, 0xcf, 0x30, 0x30, 0x09, 0x5c, 0xba, 0x76, 0xa7, 0xc6,
0xd4, 0x65, 0x81, 0xea, 0x94, 0x8e, 0x53, 0x25, 0x59, 0x10, 0x1a, 0x38, 0x6a, 0x41, 0x74, 0x9c,
0x88, 0xd6, 0x3b, 0x0d, 0xbe, 0x5e, 0x70, 0xd3, 0x09, 0x15, 0x76, 0xc4, 0xc2, 0x44, 0x7c, 0x19,
0xec, 0x2f, 0x50, 0x48, 0x99, 0x4f, 0xe1, 0x96, 0x3e, 0x38, 0xc8, 0x94, 0x15, 0x17, 0x80, 0x03,
0x65, 0xc0, 0x99, 0x13, 0xea, 0x00, 0x24, 0x81, 0x98, 0x90, 0xcc, 0x16, 0x19, 0x91, 0x7c, 0xf3,
0x24, 0xee, 0x05, 0x9d, 0xa9, 0xdb, 0xba, 0x26, 0x2c, 0xc2, 0x4b, 0x4e, 0xd6, 0x5b, 0x0d, 0xea,
0x73, 0xc6, 0x5c, 0xbb, 0x9e, 0x9f, 0x21, 0xaf, 0xce, 0x32, 0xad, 0x66, 0xff, 0x23, 0xf7, 0xbe,
0x4c, 0xd0, 0x58, 0x39, 0xbc, 0x46, 0x25, 0x12, 0x6a, 0x83, 0x99, 0xe8, 0xf1, 0xe0, 0x5f, 0xb6,
0xe6, 0x45, 0x22, 0xc8, 0x2b, 0x8a, 0x4d, 0x77, 0x5a, 0xc9, 0x8f, 0xd8, 0x6f, 0xb1, 0xe9, 0x17,
0x90, 0xff, 0x83, 0x49, 0xc5, 0x25, 0x67, 0x27, 0x29, 0xd1, 0xe9, 0x38, 0x11, 0xd1, 0xce, 0x12,
0xc7, 0xe7, 0x14, 0x63, 0xce, 0x89, 0x7c, 0x3b, 0x19, 0x3b, 0x8f, 0xb2, 0x60, 0x39, 0x9c, 0x69,
0xd6, 0x0d, 0x94, 0x14, 0xd3, 0x61, 0x2a, 0x62, 0x5f, 0xbe, 0x38, 0x79, 0x8f, 0x49, 0x91, 0x41,
0x2a, 0xf9, 0xfb, 0x5f, 0x61, 0xeb, 0x11, 0xa7, 0xa3, 0x22, 0xe4, 0xfb, 0x57, 0xfd, 0xd3, 0xea,
0x17, 0xa8, 0x06, 0xe5, 0x9b, 0xd3, 0xde, 0xf0, 0x0a, 0x8f, 0xba, 0x67, 0xfd, 0x0e, 0xbe, 0xad,
0x3a, 0xa8, 0x0a, 0x46, 0x66, 0xfa, 0xed, 0xf2, 0xaa, 0x33, 0xac, 0xd2, 0x6e, 0xef, 0xaf, 0x8e,
0xcb, 0xa4, 0x17, 0x8f, 0x13, 0xd4, 0xf6, 0x3d, 0xf3, 0x7d, 0x76, 0x2f, 0xa9, 0xed, 0xb5, 0xd3,
0x8c, 0x7e, 0x74, 0x98, 0x90, 0x11, 0x1b, 0xc7, 0x92, 0x3a, 0x6d, 0x16, 0x48, 0x1a, 0x05, 0xc4,
0x6f, 0xab, 0x34, 0xdb, 0xd9, 0xd8, 0xc3, 0xf1, 0xb8, 0xa0, 0x0c, 0x47, 0xef, 0x03, 0x00, 0x00,
0xff, 0xff, 0x8d, 0x5a, 0x44, 0x98, 0x25, 0x08, 0x00, 0x00,
}

View File

@ -4,26 +4,30 @@ import (
"context"
"sync"
"github.com/zilliztech/milvus-distributed/internal/allocator"
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
)
type Cache interface {
Hit(collectionName string) bool
Get(collectionName string) (*servicepb.CollectionDescription, error)
Sync(collectionName string) error
Update(collectionName string, desc *servicepb.CollectionDescription) error
Update(collectionName string) error
Remove(collectionName string) error
}
var globalMetaCache Cache
type SimpleMetaCache struct {
mu sync.RWMutex
metas map[string]*servicepb.CollectionDescription // collection name to schema
ctx context.Context
proxyInstance *Proxy
mu sync.RWMutex
proxyID UniqueID
metas map[string]*servicepb.CollectionDescription // collection name to schema
masterClient masterpb.MasterClient
reqIDAllocator *allocator.IDAllocator
tsoAllocator *allocator.TimestampAllocator
ctx context.Context
}
func (metaCache *SimpleMetaCache) Hit(collectionName string) bool {
@ -43,34 +47,58 @@ func (metaCache *SimpleMetaCache) Get(collectionName string) (*servicepb.Collect
return schema, nil
}
func (metaCache *SimpleMetaCache) Sync(collectionName string) error {
dct := &DescribeCollectionTask{
Condition: NewTaskCondition(metaCache.ctx),
DescribeCollectionRequest: internalpb.DescribeCollectionRequest{
MsgType: internalpb.MsgType_kDescribeCollection,
CollectionName: &servicepb.CollectionName{
CollectionName: collectionName,
},
},
masterClient: metaCache.proxyInstance.masterClient,
func (metaCache *SimpleMetaCache) Update(collectionName string) error {
reqID, err := metaCache.reqIDAllocator.AllocOne()
if err != nil {
return err
}
ts, err := metaCache.tsoAllocator.AllocOne()
if err != nil {
return err
}
hasCollectionReq := &internalpb.HasCollectionRequest{
MsgType: internalpb.MsgType_kHasCollection,
ReqID: reqID,
Timestamp: ts,
ProxyID: metaCache.proxyID,
CollectionName: &servicepb.CollectionName{
CollectionName: collectionName,
},
}
has, err := metaCache.masterClient.HasCollection(metaCache.ctx, hasCollectionReq)
if err != nil {
return err
}
if !has.Value {
return errors.New("collection " + collectionName + " not exists")
}
var cancel func()
dct.ctx, cancel = context.WithTimeout(metaCache.ctx, reqTimeoutInterval)
defer cancel()
err := metaCache.proxyInstance.sched.DdQueue.Enqueue(dct)
reqID, err = metaCache.reqIDAllocator.AllocOne()
if err != nil {
return err
}
ts, err = metaCache.tsoAllocator.AllocOne()
if err != nil {
return err
}
req := &internalpb.DescribeCollectionRequest{
MsgType: internalpb.MsgType_kDescribeCollection,
ReqID: reqID,
Timestamp: ts,
ProxyID: metaCache.proxyID,
CollectionName: &servicepb.CollectionName{
CollectionName: collectionName,
},
}
resp, err := metaCache.masterClient.DescribeCollection(metaCache.ctx, req)
if err != nil {
return err
}
return dct.WaitToFinish()
}
func (metaCache *SimpleMetaCache) Update(collectionName string, desc *servicepb.CollectionDescription) error {
metaCache.mu.Lock()
defer metaCache.mu.Unlock()
metaCache.metas[collectionName] = resp
metaCache.metas[collectionName] = desc
return nil
}
@ -87,14 +115,23 @@ func (metaCache *SimpleMetaCache) Remove(collectionName string) error {
return nil
}
func newSimpleMetaCache(ctx context.Context, proxyInstance *Proxy) *SimpleMetaCache {
func newSimpleMetaCache(ctx context.Context,
mCli masterpb.MasterClient,
idAllocator *allocator.IDAllocator,
tsoAllocator *allocator.TimestampAllocator) *SimpleMetaCache {
return &SimpleMetaCache{
metas: make(map[string]*servicepb.CollectionDescription),
proxyInstance: proxyInstance,
ctx: ctx,
metas: make(map[string]*servicepb.CollectionDescription),
masterClient: mCli,
reqIDAllocator: idAllocator,
tsoAllocator: tsoAllocator,
proxyID: Params.ProxyID(),
ctx: ctx,
}
}
func initGlobalMetaCache(ctx context.Context, proxyInstance *Proxy) {
globalMetaCache = newSimpleMetaCache(ctx, proxyInstance)
func initGlobalMetaCache(ctx context.Context,
mCli masterpb.MasterClient,
idAllocator *allocator.IDAllocator,
tsoAllocator *allocator.TimestampAllocator) {
globalMetaCache = newSimpleMetaCache(ctx, mCli, idAllocator, tsoAllocator)
}

View File

@ -109,7 +109,7 @@ func (p *Proxy) startProxy() error {
if err != nil {
return err
}
initGlobalMetaCache(p.proxyLoopCtx, p)
initGlobalMetaCache(p.proxyLoopCtx, p.masterClient, p.idAllocator, p.tsoAllocator)
p.manipulationMsgStream.Start()
p.queryMsgStream.Start()
p.sched.Start()

View File

@ -91,7 +91,7 @@ func (it *InsertTask) PreExecute() error {
func (it *InsertTask) Execute() error {
collectionName := it.BaseInsertTask.CollectionName
if !globalMetaCache.Hit(collectionName) {
err := globalMetaCache.Sync(collectionName)
err := globalMetaCache.Update(collectionName)
if err != nil {
return err
}
@ -352,7 +352,7 @@ func (qt *QueryTask) SetTs(ts Timestamp) {
func (qt *QueryTask) PreExecute() error {
collectionName := qt.query.CollectionName
if !globalMetaCache.Hit(collectionName) {
err := globalMetaCache.Sync(collectionName)
err := globalMetaCache.Update(collectionName)
if err != nil {
return err
}
@ -605,9 +605,14 @@ func (dct *DescribeCollectionTask) PreExecute() error {
}
func (dct *DescribeCollectionTask) Execute() error {
if !globalMetaCache.Hit(dct.CollectionName.CollectionName) {
err := globalMetaCache.Update(dct.CollectionName.CollectionName)
if err != nil {
return err
}
}
var err error
dct.result, err = dct.masterClient.DescribeCollection(dct.ctx, &dct.DescribeCollectionRequest)
globalMetaCache.Update(dct.CollectionName.CollectionName, dct.result)
dct.result, err = globalMetaCache.Get(dct.CollectionName.CollectionName)
return err
}

View File

@ -29,7 +29,7 @@ func createPlan(col Collection, dsl string) (*Plan, error) {
if errorCode != 0 {
errorMsg := C.GoString(status.error_msg)
defer C.free(unsafe.Pointer(status.error_msg))
return nil, errors.New("Create plan failed, C runtime error detected, error code = " + strconv.Itoa(int(errorCode)) + ", error msg = " + errorMsg)
return nil, errors.New("Insert failed, C runtime error detected, error code = " + strconv.Itoa(int(errorCode)) + ", error msg = " + errorMsg)
}
var newPlan = &Plan{cPlan: cPlan}
@ -60,7 +60,7 @@ func parserPlaceholderGroup(plan *Plan, placeHolderBlob []byte) (*PlaceholderGro
if errorCode != 0 {
errorMsg := C.GoString(status.error_msg)
defer C.free(unsafe.Pointer(status.error_msg))
return nil, errors.New("Parser placeholder group failed, C runtime error detected, error code = " + strconv.Itoa(int(errorCode)) + ", error msg = " + errorMsg)
return nil, errors.New("Insert failed, C runtime error detected, error code = " + strconv.Itoa(int(errorCode)) + ", error msg = " + errorMsg)
}
var newPlaceholderGroup = &PlaceholderGroup{cPlaceholderGroup: cPlaceholderGroup}

View File

@ -139,7 +139,7 @@ func (ss *searchService) receiveSearchMsg() {
err := ss.search(msg)
if err != nil {
log.Println(err)
err = ss.publishFailedSearchResult(msg, err.Error())
err = ss.publishFailedSearchResult(msg)
if err != nil {
log.Println("publish FailedSearchResult failed, error message: ", err)
}
@ -191,7 +191,7 @@ func (ss *searchService) doUnsolvedMsgSearch() {
err := ss.search(msg)
if err != nil {
log.Println(err)
err = ss.publishFailedSearchResult(msg, err.Error())
err = ss.publishFailedSearchResult(msg)
if err != nil {
log.Println("publish FailedSearchResult failed, error message: ", err)
}
@ -346,7 +346,7 @@ func (ss *searchService) publishSearchResult(msg msgstream.TsMsg) error {
return nil
}
func (ss *searchService) publishFailedSearchResult(msg msgstream.TsMsg, errMsg string) error {
func (ss *searchService) publishFailedSearchResult(msg msgstream.TsMsg) error {
msgPack := msgstream.MsgPack{}
searchMsg, ok := msg.(*msgstream.SearchMsg)
if !ok {
@ -354,7 +354,7 @@ func (ss *searchService) publishFailedSearchResult(msg msgstream.TsMsg, errMsg s
}
var results = internalpb.SearchResult{
MsgType: internalpb.MsgType_kSearchResult,
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR, Reason: errMsg},
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR},
ReqID: searchMsg.ReqID,
ProxyID: searchMsg.ProxyID,
QueryNodeID: searchMsg.ProxyID,

View File

@ -1,235 +0,0 @@
import socket
import pytest
from .utils import *
timeout = 60
dimension = 128
delete_timeout = 60
def pytest_addoption(parser):
parser.addoption("--ip", action="store", default="localhost")
parser.addoption("--service", action="store", default="")
parser.addoption("--port", action="store", default=19530)
parser.addoption("--http-port", action="store", default=19121)
parser.addoption("--handler", action="store", default="GRPC")
parser.addoption("--tag", action="store", default="all", help="only run tests matching the tag.")
parser.addoption('--dry-run', action='store_true', default=False)
def pytest_configure(config):
# register an additional marker
config.addinivalue_line(
"markers", "tag(name): mark test to run only matching the tag"
)
def pytest_runtest_setup(item):
tags = list()
for marker in item.iter_markers(name="tag"):
for tag in marker.args:
tags.append(tag)
if tags:
cmd_tag = item.config.getoption("--tag")
if cmd_tag != "all" and cmd_tag not in tags:
pytest.skip("test requires tag in {!r}".format(tags))
def pytest_runtestloop(session):
if session.config.getoption('--dry-run'):
total_passed = 0
total_skipped = 0
test_file_to_items = {}
for item in session.items:
file_name, test_class, test_func = item.nodeid.split("::")
if test_file_to_items.get(file_name) is not None:
test_file_to_items[file_name].append(item)
else:
test_file_to_items[file_name] = [item]
for k, items in test_file_to_items.items():
skip_case = []
should_pass_but_skipped = []
skipped_other_reason = []
level2_case = []
for item in items:
if "pytestmark" in item.keywords.keys():
markers = item.keywords["pytestmark"]
skip_case.extend([item.nodeid for marker in markers if marker.name == 'skip'])
should_pass_but_skipped.extend([item.nodeid for marker in markers if marker.name == 'skip' and len(marker.args) > 0 and marker.args[0] == "should pass"])
skipped_other_reason.extend([item.nodeid for marker in markers if marker.name == 'skip' and (len(marker.args) < 1 or marker.args[0] != "should pass")])
level2_case.extend([item.nodeid for marker in markers if marker.name == 'level' and marker.args[0] == 2])
print("")
print(f"[{k}]:")
print(f" Total : {len(items):13}")
print(f" Passed : {len(items) - len(skip_case):13}")
print(f" Skipped : {len(skip_case):13}")
print(f" - should pass: {len(should_pass_but_skipped):4}")
print(f" - not supported: {len(skipped_other_reason):4}")
print(f" Level2 : {len(level2_case):13}")
print(f" ---------------------------------------")
print(f" should pass but skipped: ")
print("")
for nodeid in should_pass_but_skipped:
name, test_class, test_func = nodeid.split("::")
print(f" {name:8}: {test_class}.{test_func}")
print("")
print(f"===============================================")
total_passed += len(items) - len(skip_case)
total_skipped += len(skip_case)
print("Total tests : ", len(session.items))
print("Total passed: ", total_passed)
print("Total skiped: ", total_skipped)
return True
def check_server_connection(request):
ip = request.config.getoption("--ip")
port = request.config.getoption("--port")
connected = True
if ip and (ip not in ['localhost', '127.0.0.1']):
try:
socket.getaddrinfo(ip, port, 0, 0, socket.IPPROTO_TCP)
except Exception as e:
print("Socket connnet failed: %s" % str(e))
connected = False
return connected
@pytest.fixture(scope="module")
def connect(request):
ip = request.config.getoption("--ip")
service_name = request.config.getoption("--service")
port = request.config.getoption("--port")
http_port = request.config.getoption("--http-port")
handler = request.config.getoption("--handler")
if handler == "HTTP":
port = http_port
try:
milvus = get_milvus(host=ip, port=port, handler=handler)
# reset_build_index_threshold(milvus)
except Exception as e:
logging.getLogger().error(str(e))
pytest.exit("Milvus server can not connected, exit pytest ...")
def fin():
try:
milvus.close()
pass
except Exception as e:
logging.getLogger().info(str(e))
request.addfinalizer(fin)
return milvus
@pytest.fixture(scope="module")
def dis_connect(request):
ip = request.config.getoption("--ip")
service_name = request.config.getoption("--service")
port = request.config.getoption("--port")
http_port = request.config.getoption("--http-port")
handler = request.config.getoption("--handler")
if handler == "HTTP":
port = http_port
milvus = get_milvus(host=ip, port=port, handler=handler)
milvus.close()
return milvus
@pytest.fixture(scope="module")
def args(request):
ip = request.config.getoption("--ip")
service_name = request.config.getoption("--service")
port = request.config.getoption("--port")
http_port = request.config.getoption("--http-port")
handler = request.config.getoption("--handler")
if handler == "HTTP":
port = http_port
args = {"ip": ip, "port": port, "handler": handler, "service_name": service_name}
return args
@pytest.fixture(scope="module")
def milvus(request):
ip = request.config.getoption("--ip")
port = request.config.getoption("--port")
http_port = request.config.getoption("--http-port")
handler = request.config.getoption("--handler")
if handler == "HTTP":
port = http_port
return get_milvus(host=ip, port=port, handler=handler)
@pytest.fixture(scope="function")
def collection(request, connect):
ori_collection_name = getattr(request.module, "collection_id", "test")
collection_name = gen_unique_str(ori_collection_name)
try:
default_fields = gen_default_fields()
connect.create_collection(collection_name, default_fields)
except Exception as e:
pytest.exit(str(e))
def teardown():
if connect.has_collection(collection_name):
connect.drop_collection(collection_name, timeout=delete_timeout)
request.addfinalizer(teardown)
assert connect.has_collection(collection_name)
return collection_name
# customised id
@pytest.fixture(scope="function")
def id_collection(request, connect):
ori_collection_name = getattr(request.module, "collection_id", "test")
collection_name = gen_unique_str(ori_collection_name)
try:
fields = gen_default_fields(auto_id=False)
connect.create_collection(collection_name, fields)
except Exception as e:
pytest.exit(str(e))
def teardown():
if connect.has_collection(collection_name):
connect.drop_collection(collection_name, timeout=delete_timeout)
request.addfinalizer(teardown)
assert connect.has_collection(collection_name)
return collection_name
@pytest.fixture(scope="function")
def binary_collection(request, connect):
ori_collection_name = getattr(request.module, "collection_id", "test")
collection_name = gen_unique_str(ori_collection_name)
try:
fields = gen_binary_default_fields()
connect.create_collection(collection_name, fields)
except Exception as e:
pytest.exit(str(e))
def teardown():
collection_names = connect.list_collections()
if connect.has_collection(collection_name):
connect.drop_collection(collection_name, timeout=delete_timeout)
request.addfinalizer(teardown)
assert connect.has_collection(collection_name)
return collection_name
# customised id
@pytest.fixture(scope="function")
def binary_id_collection(request, connect):
ori_collection_name = getattr(request.module, "collection_id", "test")
collection_name = gen_unique_str(ori_collection_name)
try:
fields = gen_binary_default_fields(auto_id=False)
connect.create_collection(collection_name, fields)
except Exception as e:
pytest.exit(str(e))
def teardown():
if connect.has_collection(collection_name):
connect.drop_collection(collection_name, timeout=delete_timeout)
request.addfinalizer(teardown)
assert connect.has_collection(collection_name)
return collection_name

View File

@ -1,22 +0,0 @@
from . import utils
default_fields = utils.gen_default_fields()
default_binary_fields = utils.gen_binary_default_fields()
default_entity = utils.gen_entities(1)
default_raw_binary_vector, default_binary_entity = utils.gen_binary_entities(1)
default_entity_row = utils.gen_entities_rows(1)
default_raw_binary_vector_row, default_binary_entity_row = utils.gen_binary_entities_rows(1)
default_entities = utils.gen_entities(utils.default_nb)
default_raw_binary_vectors, default_binary_entities = utils.gen_binary_entities(utils.default_nb)
default_entities_new = utils.gen_entities_new(utils.default_nb)
default_raw_binary_vectors_new, default_binary_entities_new = utils.gen_binary_entities_new(utils.default_nb)
default_entities_rows = utils.gen_entities_rows(utils.default_nb)
default_raw_binary_vectors_rows, default_binary_entities_rows = utils.gen_binary_entities_rows(utils.default_nb)

View File

@ -1,127 +0,0 @@
# STL imports
import random
import string
import time
import datetime
import random
import struct
import sys
import uuid
from functools import wraps
sys.path.append('..')
# Third party imports
import numpy as np
import faker
from faker.providers import BaseProvider
# local application imports
from milvus.client.types import IndexType, MetricType, DataType
# grpc
from milvus.client.grpc_handler import Prepare as gPrepare
from milvus.grpc_gen import milvus_pb2
def gen_vectors(num, dim):
return [[random.random() for _ in range(dim)] for _ in range(num)]
def gen_single_vector(dim):
return [[random.random() for _ in range(dim)]]
def gen_vector(nb, d, seed=np.random.RandomState(1234)):
xb = seed.rand(nb, d).astype("float32")
return xb.tolist()
def gen_unique_str(str=None):
prefix = "".join(random.choice(string.ascii_letters + string.digits) for _ in range(8))
return prefix if str is None else str + "_" + prefix
def get_current_day():
return time.strftime('%Y-%m-%d', time.localtime())
def get_last_day(day):
tmp = datetime.datetime.now() - datetime.timedelta(days=day)
return tmp.strftime('%Y-%m-%d')
def get_next_day(day):
tmp = datetime.datetime.now() + datetime.timedelta(days=day)
return tmp.strftime('%Y-%m-%d')
def gen_long_str(num):
string = ''
for _ in range(num):
char = random.choice('tomorrow')
string += char
def gen_one_binary(topk):
ids = [random.randrange(10000000, 99999999) for _ in range(topk)]
distances = [random.random() for _ in range(topk)]
return milvus_pb2.TopKQueryResult(struct.pack(str(topk) + 'l', *ids), struct.pack(str(topk) + 'd', *distances))
def gen_nq_binaries(nq, topk):
return [gen_one_binary(topk) for _ in range(nq)]
def fake_query_bin_result(nq, topk):
return gen_nq_binaries(nq, topk)
class FakerProvider(BaseProvider):
def collection_name(self):
return 'collection_names' + str(uuid.uuid4()).replace('-', '_')
def normal_field_name(self):
return 'normal_field_names' + str(uuid.uuid4()).replace('-', '_')
def vector_field_name(self):
return 'vector_field_names' + str(uuid.uuid4()).replace('-', '_')
def name(self):
return 'name' + str(random.randint(1000, 9999))
def dim(self):
return random.randint(0, 999)
fake = faker.Faker()
fake.add_provider(FakerProvider)
def collection_name_factory():
return fake.collection_name()
def collection_schema_factory():
param = {
"fields": [
{"name": fake.normal_field_name(),"type": DataType.INT32},
{"name": fake.vector_field_name(),"type": DataType.FLOAT_VECTOR, "params": {"dim": random.randint(1, 999)}},
],
"auto_id": True,
}
return param
def records_factory(dimension, nq):
return [[random.random() for _ in range(dimension)] for _ in range(nq)]
def time_it(func):
@wraps(func)
def inner(*args, **kwrgs):
pref = time.perf_counter()
result = func(*args, **kwrgs)
delt = time.perf_counter() - pref
print(f"[{func.__name__}][{delt:.4}s]")
return result
return inner

View File

@ -1,8 +0,0 @@
grpcio==1.26.0
grpcio-tools==1.26.0
numpy==1.18.1
pytest==5.3.4
pytest-cov==2.8.1
pytest-timeout==1.3.4
pymilvus-distributed==0.0.1
sklearn==0.0

File diff suppressed because it is too large Load Diff

View File

@ -1,315 +0,0 @@
import pytest
from .utils import *
from .constants import *
uid = "create_collection"
class TestCreateCollection:
"""
******************************************************************
The following cases are used to test `create_collection` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_segment_row_limits()
)
def get_segment_row_limit(self, request):
yield request.param
def test_create_collection_fields(self, connect, get_filter_field, get_vector_field):
'''
target: test create normal collection with different fields
method: create collection with diff fields: metric/field_type/...
expected: no exception raised
'''
filter_field = get_filter_field
# logging.getLogger().info(filter_field)
vector_field = get_vector_field
collection_name = gen_unique_str(uid)
fields = {
"fields": [filter_field, vector_field],
}
# logging.getLogger().info(fields)
connect.create_collection(collection_name, fields)
assert connect.has_collection(collection_name)
def test_create_collection_fields_create_index(self, connect, get_filter_field, get_vector_field):
'''
target: test create normal collection with different fields
method: create collection with diff fields: metric/field_type/...
expected: no exception raised
'''
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str(uid)
fields = {
"fields": [filter_field, vector_field],
}
connect.create_collection(collection_name, fields)
assert connect.has_collection(collection_name)
@pytest.mark.skip("no segment_row_limit")
def test_create_collection_segment_row_limit(self, connect):
'''
target: test create normal collection with different fields
method: create collection with diff segment_row_limit
expected: no exception raised
'''
collection_name = gen_unique_str(uid)
fields = copy.deepcopy(default_fields)
# fields["segment_row_limit"] = get_segment_row_limit
connect.create_collection(collection_name, fields)
assert connect.has_collection(collection_name)
@pytest.mark.skip("no flush")
def _test_create_collection_auto_flush_disabled(self, connect):
'''
target: test create normal collection, with large auto_flush_interval
method: create collection with corrent params
expected: create status return ok
'''
disable_flush(connect)
collection_name = gen_unique_str(uid)
try:
connect.create_collection(collection_name, default_fields)
finally:
enable_flush(connect)
def test_create_collection_after_insert(self, connect, collection):
'''
target: test insert vector, then create collection again
method: insert vector and create collection
expected: error raised
'''
# pdb.set_trace()
connect.bulk_insert(collection, default_entity)
with pytest.raises(Exception) as e:
connect.create_collection(collection, default_fields)
def test_create_collection_after_insert_flush(self, connect, collection):
'''
target: test insert vector, then create collection again
method: insert vector and create collection
expected: error raised
'''
connect.bulk_insert(collection, default_entity)
connect.flush([collection])
with pytest.raises(Exception) as e:
connect.create_collection(collection, default_fields)
def test_create_collection_without_connection(self, dis_connect):
'''
target: test create collection, without connection
method: create collection with correct params, with a disconnected instance
expected: error raised
'''
collection_name = gen_unique_str(uid)
with pytest.raises(Exception) as e:
dis_connect.create_collection(collection_name, default_fields)
def test_create_collection_existed(self, connect):
'''
target: test create collection but the collection name have already existed
method: create collection with the same collection_name
expected: error raised
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, default_fields)
def test_create_after_drop_collection(self, connect, collection):
'''
target: create with the same collection name after collection dropped
method: delete, then create
expected: create success
'''
connect.drop_collection(collection)
time.sleep(2)
connect.create_collection(collection, default_fields)
@pytest.mark.level(2)
def test_create_collection_multithread(self, connect):
'''
target: test create collection with multithread
method: create collection using multithread,
expected: collections are created
'''
threads_num = 8
threads = []
collection_names = []
def create():
collection_name = gen_unique_str(uid)
collection_names.append(collection_name)
connect.create_collection(collection_name, default_fields)
for i in range(threads_num):
t = threading.Thread(target=create, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
for item in collection_names:
assert item in connect.list_collections()
connect.drop_collection(item)
class TestCreateCollectionInvalid(object):
"""
Test creating collections with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_metric_types()
)
def get_metric_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_segment_row_limit(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_dim(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_invalid_string(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_field_types()
)
def get_field_type(self, request):
yield request.param
@pytest.mark.level(2)
@pytest.mark.skip("no segment row limit")
def test_create_collection_with_invalid_segment_row_limit(self, connect, get_segment_row_limit):
collection_name = gen_unique_str()
fields = copy.deepcopy(default_fields)
fields["segment_row_limit"] = get_segment_row_limit
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, fields)
@pytest.mark.level(2)
def test_create_collection_with_invalid_dimension(self, connect, get_dim):
dimension = get_dim
collection_name = gen_unique_str()
fields = copy.deepcopy(default_fields)
fields["fields"][-1]["params"]["dim"] = dimension
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, fields)
@pytest.mark.level(2)
def test_create_collection_with_invalid_collectionname(self, connect, get_invalid_string):
collection_name = get_invalid_string
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, default_fields)
@pytest.mark.level(2)
def test_create_collection_with_empty_collectionname(self, connect):
collection_name = ''
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, default_fields)
@pytest.mark.level(2)
def test_create_collection_with_none_collectionname(self, connect):
collection_name = None
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, default_fields)
def test_create_collection_None(self, connect):
'''
target: test create collection but the collection name is None
method: create collection, param collection_name is None
expected: create raise error
'''
with pytest.raises(Exception) as e:
connect.create_collection(None, default_fields)
def test_create_collection_no_dimension(self, connect):
'''
target: test create collection with no dimension params
method: create collection with corrent params
expected: create status return ok
'''
collection_name = gen_unique_str(uid)
fields = copy.deepcopy(default_fields)
fields["fields"][-1]["params"].pop("dim")
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, fields)
@pytest.mark.skip("no segment row limit")
def test_create_collection_no_segment_row_limit(self, connect):
'''
target: test create collection with no segment_row_limit params
method: create collection with correct params
expected: use default default_segment_row_limit
'''
collection_name = gen_unique_str(uid)
fields = copy.deepcopy(default_fields)
fields.pop("segment_row_limit")
connect.create_collection(collection_name, fields)
res = connect.get_collection_info(collection_name)
# logging.getLogger().info(res)
assert res["segment_row_limit"] == default_server_segment_row_limit
def test_create_collection_limit_fields(self, connect):
collection_name = gen_unique_str(uid)
limit_num = 64
fields = copy.deepcopy(default_fields)
for i in range(limit_num):
field_name = gen_unique_str("field_name")
field = {"name": field_name, "type": DataType.INT64}
fields["fields"].append(field)
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, fields)
@pytest.mark.level(2)
def test_create_collection_invalid_field_name(self, connect, get_invalid_string):
collection_name = gen_unique_str(uid)
fields = copy.deepcopy(default_fields)
field_name = get_invalid_string
field = {"name": field_name, "type": DataType.INT64}
fields["fields"].append(field)
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, fields)
def test_create_collection_invalid_field_type(self, connect, get_field_type):
collection_name = gen_unique_str(uid)
fields = copy.deepcopy(default_fields)
field_type = get_field_type
field = {"name": "test_field", "type": field_type}
fields["fields"].append(field)
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, fields)

View File

@ -1,98 +0,0 @@
import pytest
from .utils import *
from .constants import *
uniq_id = "drop_collection"
class TestDropCollection:
"""
******************************************************************
The following cases are used to test `drop_collection` function
******************************************************************
"""
def test_drop_collection(self, connect, collection):
'''
target: test delete collection created with correct params
method: create collection and then delete,
assert the value returned by delete method
expected: status ok, and no collection in collections
'''
connect.drop_collection(collection)
time.sleep(2)
assert not connect.has_collection(collection)
def test_drop_collection_without_connection(self, collection, dis_connect):
'''
target: test describe collection, without connection
method: drop collection with correct params, with a disconnected instance
expected: drop raise exception
'''
with pytest.raises(Exception) as e:
dis_connect.drop_collection(collection)
def test_drop_collection_not_existed(self, connect):
'''
target: test if collection not created
method: random a collection name, which not existed in db,
assert the exception raised returned by drp_collection method
expected: False
'''
collection_name = gen_unique_str(uniq_id)
with pytest.raises(Exception) as e:
connect.drop_collection(collection_name)
@pytest.mark.level(2)
def test_create_drop_collection_multithread(self, connect):
'''
target: test create and drop collection with multithread
method: create and drop collection using multithread,
expected: collections are created, and dropped
'''
threads_num = 8
threads = []
collection_names = []
def create():
collection_name = gen_unique_str(uniq_id)
collection_names.append(collection_name)
connect.create_collection(collection_name, default_fields)
connect.drop_collection(collection_name)
for i in range(threads_num):
t = threading.Thread(target=create, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
for item in collection_names:
assert not connect.has_collection(item)
class TestDropCollectionInvalid(object):
"""
Test has collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.level(2)
def test_drop_collection_with_invalid_collectionname(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
def test_drop_collection_with_empty_collectionname(self, connect):
collection_name = ''
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
def test_drop_collection_with_none_collectionname(self, connect):
collection_name = None
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)

View File

@ -1,234 +0,0 @@
import pytest
from .utils import *
from .constants import *
uid = "collection_info"
class TestInfoBase:
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_segment_row_limits()
)
def get_segment_row_limit(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
logging.getLogger().info(request.param)
if str(connect._cmd("mode")) == "CPU":
if request.param["index_type"] in index_cpu_not_support():
pytest.skip("sq8h not support in CPU mode")
return request.param
"""
******************************************************************
The following cases are used to test `get_collection_info` function, no data in collection
******************************************************************
"""
@pytest.mark.skip("no segment row limit and type")
def test_info_collection_fields(self, connect, get_filter_field, get_vector_field):
'''
target: test create normal collection with different fields, check info returned
method: create collection with diff fields: metric/field_type/..., calling `get_collection_info`
expected: no exception raised, and value returned correct
'''
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str(uid)
fields = {
"fields": [filter_field, vector_field],
"segment_row_limit": default_segment_row_limit
}
connect.create_collection(collection_name, fields)
res = connect.get_collection_info(collection_name)
assert res['auto_id'] == True
assert res['segment_row_limit'] == default_segment_row_limit
assert len(res["fields"]) == 2
for field in res["fields"]:
if field["type"] == filter_field:
assert field["name"] == filter_field["name"]
elif field["type"] == vector_field:
assert field["name"] == vector_field["name"]
assert field["params"] == vector_field["params"]
@pytest.mark.skip("no segment row limit and type")
def test_create_collection_segment_row_limit(self, connect, get_segment_row_limit):
'''
target: test create normal collection with different fields
method: create collection with diff segment_row_limit
expected: no exception raised
'''
collection_name = gen_unique_str(uid)
fields = copy.deepcopy(default_fields)
fields["segment_row_limit"] = get_segment_row_limit
connect.create_collection(collection_name, fields)
# assert segment row count
res = connect.get_collection_info(collection_name)
assert res['segment_row_limit'] == get_segment_row_limit
@pytest.mark.skip("no create Index")
def test_get_collection_info_after_index_created(self, connect, collection, get_simple_index):
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
res = connect.get_collection_info(collection)
for field in res["fields"]:
if field["name"] == default_float_vec_field_name:
index = field["indexes"][0]
assert index["index_type"] == get_simple_index["index_type"]
assert index["metric_type"] == get_simple_index["metric_type"]
@pytest.mark.level(2)
def test_get_collection_info_without_connection(self, connect, collection, dis_connect):
'''
target: test get collection info, without connection
method: calling get collection info with correct params, with a disconnected instance
expected: get collection info raise exception
'''
with pytest.raises(Exception) as e:
assert connect.get_collection_info(dis_connect, collection)
def test_get_collection_info_not_existed(self, connect):
'''
target: test if collection not created
method: random a collection name, which not existed in db,
assert the value returned by get_collection_info method
expected: False
'''
collection_name = gen_unique_str(uid)
with pytest.raises(Exception) as e:
res = connect.get_collection_info(connect, collection_name)
@pytest.mark.level(2)
def test_get_collection_info_multithread(self, connect):
'''
target: test create collection with multithread
method: create collection using multithread,
expected: collections are created
'''
threads_num = 4
threads = []
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
def get_info():
res = connect.get_collection_info(connect, collection_name)
# assert
for i in range(threads_num):
t = threading.Thread(target=get_info, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
"""
******************************************************************
The following cases are used to test `get_collection_info` function, and insert data in collection
******************************************************************
"""
@pytest.mark.skip("no segment row limit and type")
def test_info_collection_fields_after_insert(self, connect, get_filter_field, get_vector_field):
'''
target: test create normal collection with different fields, check info returned
method: create collection with diff fields: metric/field_type/..., calling `get_collection_info`
expected: no exception raised, and value returned correct
'''
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str(uid)
fields = {
"fields": [filter_field, vector_field],
"segment_row_limit": default_segment_row_limit
}
connect.create_collection(collection_name, fields)
entities = gen_entities_by_fields(fields["fields"], default_nb, vector_field["params"]["dim"])
res_ids = connect.bulk_insert(collection_name, entities)
connect.flush([collection_name])
res = connect.get_collection_info(collection_name)
assert res['auto_id'] == True
assert res['segment_row_limit'] == default_segment_row_limit
assert len(res["fields"]) == 2
for field in res["fields"]:
if field["type"] == filter_field:
assert field["name"] == filter_field["name"]
elif field["type"] == vector_field:
assert field["name"] == vector_field["name"]
assert field["params"] == vector_field["params"]
@pytest.mark.skip("not segment row limit")
def test_create_collection_segment_row_limit_after_insert(self, connect, get_segment_row_limit):
'''
target: test create normal collection with different fields
method: create collection with diff segment_row_limit
expected: no exception raised
'''
collection_name = gen_unique_str(uid)
fields = copy.deepcopy(default_fields)
fields["segment_row_limit"] = get_segment_row_limit
connect.create_collection(collection_name, fields)
entities = gen_entities_by_fields(fields["fields"], default_nb, fields["fields"][-1]["params"]["dim"])
res_ids = connect.bulk_insert(collection_name, entities)
connect.flush([collection_name])
res = connect.get_collection_info(collection_name)
assert res['auto_id'] == True
assert res['segment_row_limit'] == get_segment_row_limit
class TestInfoInvalid(object):
"""
Test get collection info with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.level(2)
def test_get_collection_info_with_invalid_collectionname(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.get_collection_info(collection_name)
@pytest.mark.level(2)
def test_get_collection_info_with_empty_collectionname(self, connect):
collection_name = ''
with pytest.raises(Exception) as e:
connect.get_collection_info(collection_name)
@pytest.mark.level(2)
def test_get_collection_info_with_none_collectionname(self, connect):
collection_name = None
with pytest.raises(Exception) as e:
connect.get_collection_info(collection_name)
def test_get_collection_info_None(self, connect):
'''
target: test create collection but the collection name is None
method: create collection, param collection_name is None
expected: create raise error
'''
with pytest.raises(Exception) as e:
connect.get_collection_info(None)

View File

@ -1,93 +0,0 @@
import pytest
from .utils import *
from .constants import *
uid = "has_collection"
class TestHasCollection:
"""
******************************************************************
The following cases are used to test `has_collection` function
******************************************************************
"""
def test_has_collection(self, connect, collection):
'''
target: test if the created collection existed
method: create collection, assert the value returned by has_collection method
expected: True
'''
assert connect.has_collection(collection)
@pytest.mark.level(2)
def test_has_collection_without_connection(self, collection, dis_connect):
'''
target: test has collection, without connection
method: calling has collection with correct params, with a disconnected instance
expected: has collection raise exception
'''
with pytest.raises(Exception) as e:
assert dis_connect.has_collection(collection)
def test_has_collection_not_existed(self, connect):
'''
target: test if collection not created
method: random a collection name, which not existed in db,
assert the value returned by has_collection method
expected: False
'''
collection_name = gen_unique_str("test_collection")
assert not connect.has_collection(collection_name)
@pytest.mark.level(2)
def test_has_collection_multithread(self, connect):
'''
target: test create collection with multithread
method: create collection using multithread,
expected: collections are created
'''
threads_num = 4
threads = []
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
def has():
assert connect.has_collection(collection_name)
# assert not assert_collection(connect, collection_name)
for i in range(threads_num):
t = TestThread(target=has, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
class TestHasCollectionInvalid(object):
"""
Test has collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.level(2)
def test_has_collection_with_invalid_collectionname(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
@pytest.mark.level(2)
def test_has_collection_with_empty_collectionname(self, connect):
collection_name = ''
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
@pytest.mark.level(2)
def test_has_collection_with_none_collectionname(self, connect):
collection_name = None
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)

View File

@ -1,465 +0,0 @@
import pytest
from .utils import *
from .constants import *
ADD_TIMEOUT = 600
uid = "test_insert"
field_name = default_float_vec_field_name
binary_field_name = default_binary_vec_field_name
default_single_query = {
"bool": {
"must": [
{"vector": {field_name: {"topk": 10, "query": gen_vectors(1, default_dim), "metric_type": "L2",
"params": {"nprobe": 10}}}}
]
}
}
class TestInsertBase:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")) == "CPU":
if request.param["index_type"] in index_cpu_not_support():
pytest.skip("CPU not support index_type: ivf_sq8h")
return request.param
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
def test_add_vector_with_empty_vector(self, connect, collection):
'''
target: test add vectors with empty vectors list
method: set empty vectors list as add method params
expected: raises a Exception
'''
vector = []
with pytest.raises(Exception) as e:
status, ids = connect.insert(collection, vector)
def test_add_vector_with_None(self, connect, collection):
'''
target: test add vectors with None
method: set None as add method params
expected: raises a Exception
'''
vector = None
with pytest.raises(Exception) as e:
status, ids = connect.insert(collection, vector)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_collection_not_existed(self, connect):
'''
target: test insert, with collection not existed
method: insert entity into a random named collection
expected: error raised
'''
collection_name = gen_unique_str(uid)
with pytest.raises(Exception) as e:
connect.insert(collection_name, default_entities_rows)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_drop_collection(self, connect, collection):
'''
target: test delete collection after insert vector
method: insert vector and delete collection
expected: no error raised
'''
ids = connect.insert(collection, default_entity_row)
assert len(ids) == 1
connect.drop_collection(collection)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_sleep_drop_collection(self, connect, collection):
'''
target: test delete collection after insert vector for a while
method: insert vector, sleep, and delete collection
expected: no error raised
'''
ids = connect.insert(collection, default_entity_row)
assert len(ids) == 1
connect.flush([collection])
connect.drop_collection(collection)
@pytest.mark.skip
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_create_index(self, connect, collection, get_simple_index):
'''
target: test build index insert after vector
method: insert vector and build index
expected: no error raised
'''
ids = connect.insert(collection, default_entities_rows)
assert len(ids) == default_nb
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
info = connect.get_collection_info(collection)
fields = info["fields"]
for field in fields:
if field["name"] == field_name:
assert field["indexes"][0] == get_simple_index
@pytest.mark.skip
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_after_create_index(self, connect, collection, get_simple_index):
'''
target: test build index insert after vector
method: insert vector and build index
expected: no error raised
'''
connect.create_index(collection, field_name, get_simple_index)
ids = connect.insert(collection, default_entities_rows)
assert len(ids) == default_nb
info = connect.get_collection_info(collection)
fields = info["fields"]
for field in fields:
if field["name"] == field_name:
assert field["indexes"][0] == get_simple_index
@pytest.mark.skip(" todo fix search")
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_search(self, connect, collection):
'''
target: test search vector after insert vector after a while
method: insert vector, sleep, and search collection
expected: no error raised
'''
ids = connect.insert(collection, default_entities_rows)
connect.flush([collection])
res = connect.search(collection, default_single_query)
logging.getLogger().debug(res)
assert res
@pytest.mark.skip
def test_insert_segment_row_count(self, connect, collection):
nb = default_segment_row_limit + 1
res_ids = connect.insert(collection, gen_entities_rows(nb))
connect.flush([collection])
assert len(res_ids) == nb
stats = connect.get_collection_stats(collection)
assert len(stats['partitions'][0]['segments']) == 2
for segment in stats['partitions'][0]['segments']:
assert segment['row_count'] in [default_segment_row_limit, 1]
@pytest.fixture(
scope="function",
params=[
1,
2000
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_not_match(self, connect, id_collection, insert_count):
'''
target: test insert vectors in collection, use customize ids
method: create collection and insert vectors in it, check the ids returned and the collection length after vectors inserted
expected: the length of ids and the collection row count
'''
nb = insert_count
with pytest.raises(Exception) as e:
res_ids = connect.insert(id_collection, gen_entities_rows(nb))
@pytest.mark.skip("todo fix")
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_twice_ids_no_ids(self, connect, collection):
'''
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use customize ids first, and then use no ids
expected: error raised
'''
with pytest.raises(Exception) as e:
res_ids = connect.insert(collection, gen_entities_rows(default_nb, _id=False))
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_tag(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it, with the partition_tag param
expected: the collection row count equals to nq
'''
connect.create_partition(collection, default_tag)
ids = connect.insert(collection, default_entities_rows, partition_tag=default_tag)
assert len(ids) == default_nb
assert connect.has_partition(collection, default_tag)
@pytest.mark.skip("todo support custom id")
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_tag_with_ids(self, connect, id_collection):
'''
target: test insert entities in collection created before, insert with ids
method: create collection and insert entities in it, with the partition_tag param
expected: the collection row count equals to nq
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
res_ids = connect.insert(id_collection, gen_entities_rows(default_nb, _id=False), partition_tag=default_tag)
assert res_ids == ids
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_tag_not_existed(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it, with the not existed partition_tag param
expected: error raised
'''
tag = gen_unique_str()
with pytest.raises(Exception) as e:
ids = connect.insert(collection, default_entities_rows, partition_tag=tag)
@pytest.mark.skip("todo support count entites")
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_tag_existed(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it repeatly, with the partition_tag param
expected: the collection row count equals to nq
'''
connect.create_partition(collection, default_tag)
ids = connect.insert(collection, default_entities_rows, partition_tag=default_tag)
ids = connect.insert(collection, default_entities_rows, partition_tag=default_tag)
connect.flush([collection])
res_count = connect.count_entities(collection)
assert res_count == 2 * default_nb
@pytest.mark.level(2)
def test_insert_collection_not_existed(self, connect):
'''
target: test insert entities in collection, which not existed before
method: insert entities collection not existed, check the status
expected: error raised
'''
with pytest.raises(Exception) as e:
ids = connect.insert(gen_unique_str("not_exist_collection"), default_entities_rows)
@pytest.mark.skip("todo support dim check")
def test_insert_dim_not_matched(self, connect, collection):
'''
target: test insert entities, the vector dimension is not equal to the collection dimension
method: the entities dimension is half of the collection dimension, check the status
expected: error raised
'''
vectors = gen_vectors(default_nb, int(default_dim) // 2)
insert_entities = copy.deepcopy(default_entities_rows)
insert_entities[-1][default_float_vec_field_name] = vectors
with pytest.raises(Exception) as e:
ids = connect.insert(collection, insert_entities)
class TestInsertBinary:
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_binary_index(self, request):
request.param["metric_type"] = "JACCARD"
return request.param
@pytest.mark.skip
def test_insert_binary_entities(self, connect, binary_collection):
'''
target: test insert entities in binary collection
method: create collection and insert binary entities in it
expected: the collection row count equals to nb
'''
ids = connect.insert(binary_collection, default_binary_entities_rows)
assert len(ids) == default_nb
connect.flush()
assert connect.count_entities(binary_collection) == default_nb
@pytest.mark.skip
def test_insert_binary_tag(self, connect, binary_collection):
'''
target: test insert entities and create partition tag
method: create collection and insert binary entities in it, with the partition_tag param
expected: the collection row count equals to nb
'''
connect.create_partition(binary_collection, default_tag)
ids = connect.insert(binary_collection, default_binary_entities_rows, partition_tag=default_tag)
assert len(ids) == default_nb
assert connect.has_partition(binary_collection, default_tag)
# TODO
@pytest.mark.skip
@pytest.mark.level(2)
def test_insert_binary_multi_times(self, connect, binary_collection):
'''
target: test insert entities multi times and final flush
method: create collection and insert binary entity multi and final flush
expected: the collection row count equals to nb
'''
for i in range(default_nb):
ids = connect.insert(binary_collection, default_binary_entity_row)
assert len(ids) == 1
connect.flush([binary_collection])
assert connect.count_entities(binary_collection) == default_nb
@pytest.mark.skip
def test_insert_binary_after_create_index(self, connect, binary_collection, get_binary_index):
'''
target: test insert binary entities after build index
method: build index and insert entities
expected: no error raised
'''
connect.create_index(binary_collection, binary_field_name, get_binary_index)
ids = connect.insert(binary_collection, default_binary_entities_rows)
assert len(ids) == default_nb
connect.flush([binary_collection])
info = connect.get_collection_info(binary_collection)
fields = info["fields"]
for field in fields:
if field["name"] == binary_field_name:
assert field["indexes"][0] == get_binary_index
@pytest.mark.skip
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_binary_create_index(self, connect, binary_collection, get_binary_index):
'''
target: test build index insert after vector
method: insert vector and build index
expected: no error raised
'''
ids = connect.insert(binary_collection, default_binary_entities_rows)
assert len(ids) == default_nb
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_binary_index)
info = connect.get_collection_info(binary_collection)
fields = info["fields"]
for field in fields:
if field["name"] == binary_field_name:
assert field["indexes"][0] == get_binary_index
class TestInsertInvalid(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
@pytest.mark.skip("todo support field check")
def test_insert_field_name_not_match(self, connect, collection):
'''
target: test insert, with field name not matched
method: create collection and insert entities in it
expected: raise an exception
'''
tmp_entity = copy.deepcopy(default_entity_row)
tmp_entity[0]["string"] = "string"
with pytest.raises(Exception):
connect.insert(collection, default_entity_row)
def test_insert_with_invalid_collection_name(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception):
connect.insert(collection_name, default_entity_row)
def test_insert_with_invalid_tag_name(self, connect, collection, get_tag_name):
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
if tag_name is not None:
with pytest.raises(Exception):
connect.insert(collection, default_entity_row, partition_tag=tag_name)
else:
connect.insert(collection, default_entity_row, partition_tag=tag_name)
@pytest.mark.skip("todo support field check")
def test_insert_with_less_field(self, connect, collection):
tmp_entity = copy.deepcopy(default_entity_row)
tmp_entity[0].pop(default_float_vec_field_name)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
def test_insert_with_less_field_id(self, connect, id_collection):
tmp_entity = copy.deepcopy(gen_entities_rows(default_nb, _id=False))
tmp_entity[0].pop("_id")
with pytest.raises(Exception):
connect.insert(id_collection, tmp_entity)
def test_insert_with_more_field(self, connect, collection):
tmp_entity = copy.deepcopy(default_entity_row)
tmp_entity[0]["new_field"] = 1
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
def test_insert_with_more_field_id(self, connect, collection):
tmp_entity = copy.deepcopy(default_entity_row)
tmp_entity[0]["_id"] = 1
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.skip("todo support field check")
def test_insert_with_invalid_field_vector_value(self, connect, collection, get_field_vectors_value):
tmp_entity = copy.deepcopy(default_entity_row)
tmp_entity[0][default_float_vec_field_name][1] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)

View File

@ -1,86 +0,0 @@
import pytest
from .utils import *
from .constants import *
uid = "list_collections"
class TestListCollections:
"""
******************************************************************
The following cases are used to test `list_collections` function
******************************************************************
"""
def test_list_collections(self, connect, collection):
'''
target: test list collections
method: create collection, assert the value returned by list_collections method
expected: True
'''
assert collection in connect.list_collections()
def test_list_collections_multi_collections(self, connect):
'''
target: test list collections
method: create collection, assert the value returned by list_collections method
expected: True
'''
collection_num = 50
for i in range(collection_num):
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
assert collection_name in connect.list_collections()
@pytest.mark.level(2)
def test_list_collections_without_connection(self, dis_connect):
'''
target: test list collections, without connection
method: calling list collections with correct params, with a disconnected instance
expected: list collections raise exception
'''
with pytest.raises(Exception) as e:
dis_connect.list_collections()
def test_list_collections_not_existed(self, connect):
'''
target: test if collection not created
method: random a collection name, which not existed in db,
assert the value returned by list_collections method
expected: False
'''
collection_name = gen_unique_str(uid)
assert collection_name not in connect.list_collections()
@pytest.mark.level(2)
def test_list_collections_no_collection(self, connect):
'''
target: test show collections is correct or not, if no collection in db
method: delete all collections,
assert the value returned by list_collections method is equal to []
expected: the status is ok, and the result is equal to []
'''
result = connect.list_collections()
if result:
for collection_name in result:
assert connect.has_collection(collection_name)
@pytest.mark.level(2)
def test_list_collections_multithread(self, connect):
'''
target: test create collection with multithread
method: create collection using multithread,
expected: collections are created
'''
threads_num = 4
threads = []
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
def _list():
assert collection_name in connect.list_collections()
for i in range(threads_num):
t = threading.Thread(target=_list, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()

View File

@ -1,395 +0,0 @@
import pytest
from .utils import *
from .constants import *
TIMEOUT = 120
class TestCreateBase:
"""
******************************************************************
The following cases are used to test `create_partition` function
******************************************************************
"""
def test_create_partition(self, connect, collection):
'''
target: test create partition, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(collection, default_tag)
@pytest.mark.level(2)
@pytest.mark.timeout(600)
def test_create_partition_limit(self, connect, collection, args):
'''
target: test create partitions, check status returned
method: call function: create_partition for 4097 times
expected: exception raised
'''
threads_num = 8
threads = []
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
def create(connect, threads_num):
for i in range(max_partition_num // threads_num):
tag_tmp = gen_unique_str()
connect.create_partition(collection, tag_tmp)
for i in range(threads_num):
m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
t = threading.Thread(target=create, args=(m, threads_num, ))
threads.append(t)
t.start()
for t in threads:
t.join()
tag_tmp = gen_unique_str()
with pytest.raises(Exception) as e:
connect.create_partition(collection, tag_tmp)
def test_create_partition_repeat(self, connect, collection):
'''
target: test create partition, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.create_partition(collection, default_tag)
def test_create_partition_collection_not_existed(self, connect):
'''
target: test create partition, its owner collection name not existed in db, check status returned
method: call function: create_partition
expected: status not ok
'''
collection_name = gen_unique_str()
with pytest.raises(Exception) as e:
connect.create_partition(collection_name, default_tag)
def test_create_partition_tag_name_None(self, connect, collection):
'''
target: test create partition, tag name set None, check status returned
method: call function: create_partition
expected: status ok
'''
tag_name = None
with pytest.raises(Exception) as e:
connect.create_partition(collection, tag_name)
def test_create_different_partition_tags(self, connect, collection):
'''
target: test create partition twice with different names
method: call function: create_partition, and again
expected: status ok
'''
connect.create_partition(collection, default_tag)
tag_name = gen_unique_str()
connect.create_partition(collection, tag_name)
tag_list = connect.list_partitions(collection)
assert default_tag in tag_list
assert tag_name in tag_list
assert "_default" in tag_list
@pytest.mark.skip("not support custom id")
def test_create_partition_insert_default(self, connect, id_collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
insert_ids = connect.bulk_insert(id_collection, default_entities, ids)
assert len(insert_ids) == len(ids)
@pytest.mark.skip("not support custom id")
def test_create_partition_insert_with_tag(self, connect, id_collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
insert_ids = connect.bulk_insert(id_collection, default_entities, ids, partition_tag=default_tag)
assert len(insert_ids) == len(ids)
def test_create_partition_insert_with_tag_not_existed(self, connect, collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status not ok
'''
tag_new = "tag_new"
connect.create_partition(collection, default_tag)
ids = [i for i in range(default_nb)]
with pytest.raises(Exception) as e:
insert_ids = connect.bulk_insert(collection, default_entities, ids, partition_tag=tag_new)
@pytest.mark.skip("not support custom id")
def test_create_partition_insert_same_tags(self, connect, id_collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
insert_ids = connect.bulk_insert(id_collection, default_entities, ids, partition_tag=default_tag)
ids = [(i+default_nb) for i in range(default_nb)]
new_insert_ids = connect.bulk_insert(id_collection, default_entities, ids, partition_tag=default_tag)
connect.flush([id_collection])
res = connect.count_entities(id_collection)
assert res == default_nb * 2
@pytest.mark.level(2)
@pytest.mark.skip("not support count entities")
def test_create_partition_insert_same_tags_two_collections(self, connect, collection):
'''
target: test create two partitions, and insert vectors with the same tag to each collection, check status returned
method: call function: create_partition
expected: status ok, collection length is correct
'''
connect.create_partition(collection, default_tag)
collection_new = gen_unique_str()
connect.create_collection(collection_new, default_fields)
connect.create_partition(collection_new, default_tag)
ids = connect.bulk_insert(collection, default_entities, partition_tag=default_tag)
ids = connect.bulk_insert(collection_new, default_entities, partition_tag=default_tag)
connect.flush([collection, collection_new])
res = connect.count_entities(collection)
assert res == default_nb
res = connect.count_entities(collection_new)
assert res == default_nb
class TestShowBase:
"""
******************************************************************
The following cases are used to test `list_partitions` function
******************************************************************
"""
def test_list_partitions(self, connect, collection):
'''
target: test show partitions, check status and partitions returned
method: create partition first, then call function: list_partitions
expected: status ok, partition correct
'''
connect.create_partition(collection, default_tag)
res = connect.list_partitions(collection)
assert default_tag in res
def test_list_partitions_no_partition(self, connect, collection):
'''
target: test show partitions with collection name, check status and partitions returned
method: call function: list_partitions
expected: status ok, partitions correct
'''
res = connect.list_partitions(collection)
assert len(res) == 1
def test_show_multi_partitions(self, connect, collection):
'''
target: test show partitions, check status and partitions returned
method: create partitions first, then call function: list_partitions
expected: status ok, partitions correct
'''
tag_new = gen_unique_str()
connect.create_partition(collection, default_tag)
connect.create_partition(collection, tag_new)
res = connect.list_partitions(collection)
assert default_tag in res
assert tag_new in res
class TestHasBase:
"""
******************************************************************
The following cases are used to test `has_partition` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
def test_has_partition(self, connect, collection):
'''
target: test has_partition, check status and result
method: create partition first, then call function: has_partition
expected: status ok, result true
'''
connect.create_partition(collection, default_tag)
res = connect.has_partition(collection, default_tag)
logging.getLogger().info(res)
assert res
def test_has_partition_multi_partitions(self, connect, collection):
'''
target: test has_partition, check status and result
method: create partition first, then call function: has_partition
expected: status ok, result true
'''
for tag_name in [default_tag, "tag_new", "tag_new_new"]:
connect.create_partition(collection, tag_name)
for tag_name in [default_tag, "tag_new", "tag_new_new"]:
res = connect.has_partition(collection, tag_name)
assert res
def test_has_partition_tag_not_existed(self, connect, collection):
'''
target: test has_partition, check status and result
method: then call function: has_partition, with tag not existed
expected: status ok, result empty
'''
res = connect.has_partition(collection, default_tag)
logging.getLogger().info(res)
assert not res
def test_has_partition_collection_not_existed(self, connect, collection):
'''
target: test has_partition, check status and result
method: then call function: has_partition, with collection not existed
expected: status not ok
'''
with pytest.raises(Exception) as e:
res = connect.has_partition("not_existed_collection", default_tag)
@pytest.mark.level(2)
def test_has_partition_with_invalid_tag_name(self, connect, collection, get_tag_name):
'''
target: test has partition, with invalid tag name, check status returned
method: call function: has_partition
expected: status ok
'''
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
res = connect.has_partition(collection, tag_name)
class TestDropBase:
"""
******************************************************************
The following cases are used to test `drop_partition` function
******************************************************************
"""
def test_drop_partition(self, connect, collection):
'''
target: test drop partition, check status and partition if existed
method: create partitions first, then call function: drop_partition
expected: status ok, no partitions in db
'''
connect.create_partition(collection, default_tag)
connect.drop_partition(collection, default_tag)
res = connect.list_partitions(collection)
tag_list = []
assert default_tag not in tag_list
def test_drop_partition_tag_not_existed(self, connect, collection):
'''
target: test drop partition, but tag not existed
method: create partitions first, then call function: drop_partition
expected: status not ok
'''
connect.create_partition(collection, default_tag)
new_tag = "new_tag"
with pytest.raises(Exception) as e:
connect.drop_partition(collection, new_tag)
def test_drop_partition_tag_not_existed_A(self, connect, collection):
'''
target: test drop partition, but collection not existed
method: create partitions first, then call function: drop_partition
expected: status not ok
'''
connect.create_partition(collection, default_tag)
new_collection = gen_unique_str()
with pytest.raises(Exception) as e:
connect.drop_partition(new_collection, default_tag)
@pytest.mark.level(2)
def test_drop_partition_repeatedly(self, connect, collection):
'''
target: test drop partition twice, check status and partition if existed
method: create partitions first, then call function: drop_partition
expected: status not ok, no partitions in db
'''
connect.create_partition(collection, default_tag)
connect.drop_partition(collection, default_tag)
time.sleep(2)
with pytest.raises(Exception) as e:
connect.drop_partition(collection, default_tag)
tag_list = connect.list_partitions(collection)
assert default_tag not in tag_list
def test_drop_partition_create(self, connect, collection):
'''
target: test drop partition, and create again, check status
method: create partitions first, then call function: drop_partition, create_partition
expected: status not ok, partition in db
'''
connect.create_partition(collection, default_tag)
connect.drop_partition(collection, default_tag)
time.sleep(2)
connect.create_partition(collection, default_tag)
tag_list = connect.list_partitions(collection)
assert default_tag in tag_list
class TestNameInvalid(object):
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.level(2)
def test_drop_partition_with_invalid_collection_name(self, connect, collection, get_collection_name):
'''
target: test drop partition, with invalid collection name, check status returned
method: call function: drop_partition
expected: status not ok
'''
collection_name = get_collection_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.drop_partition(collection_name, default_tag)
@pytest.mark.level(2)
def test_drop_partition_with_invalid_tag_name(self, connect, collection, get_tag_name):
'''
target: test drop partition, with invalid tag name, check status returned
method: call function: drop_partition
expected: status not ok
'''
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.drop_partition(collection, tag_name)
@pytest.mark.level(2)
def test_list_partitions_with_invalid_collection_name(self, connect, collection, get_collection_name):
'''
target: test show partitions, with invalid collection name, check status returned
method: call function: list_partitions
expected: status not ok
'''
collection_name = get_collection_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
res = connect.list_partitions(collection_name)

File diff suppressed because it is too large Load Diff