mirror of
https://gitee.com/milvus-io/milvus.git
synced 2024-12-02 03:48:37 +08:00
Update registerNode in indexservice
Signed-off-by: cai.zhang <cai.zhang@zilliz.com>
This commit is contained in:
parent
4588342fa1
commit
c35079d7e7
@ -20,7 +20,7 @@ Checks: >
|
|||||||
-*, clang-diagnostic-*, -clang-diagnostic-error,
|
-*, clang-diagnostic-*, -clang-diagnostic-error,
|
||||||
clang-analyzer-*, -clang-analyzer-alpha*,
|
clang-analyzer-*, -clang-analyzer-alpha*,
|
||||||
google-*, -google-runtime-references, -google-readability-todo,
|
google-*, -google-runtime-references, -google-readability-todo,
|
||||||
modernize-*, -modernize-pass-by-value, -modernize-use-equals-default,
|
modernize-*, -modernize-pass-by-value, -modernize-use-equals-default, -modernize-use-trailing-return-type,
|
||||||
performance-faster-string-find, performance-for-range-copy,
|
performance-faster-string-find, performance-for-range-copy,
|
||||||
performance-implicit-conversion-in-loop, performance-inefficient-algorithm,
|
performance-implicit-conversion-in-loop, performance-inefficient-algorithm,
|
||||||
performance-trivially-destructible, performance-inefficient-vector-operation,
|
performance-trivially-destructible, performance-inefficient-vector-operation,
|
||||||
|
4
Makefile
4
Makefile
@ -125,6 +125,10 @@ build-go: build-cpp
|
|||||||
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/binlog $(PWD)/cmd/binlog/main.go 1>/dev/null
|
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/binlog $(PWD)/cmd/binlog/main.go 1>/dev/null
|
||||||
@echo "Building singlenode ..."
|
@echo "Building singlenode ..."
|
||||||
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/singlenode $(PWD)/cmd/singlenode/main.go 1>/dev/null
|
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/singlenode $(PWD)/cmd/singlenode/main.go 1>/dev/null
|
||||||
|
@echo "Building distributed indexservice ..."
|
||||||
|
@mkdir -p $(INSTALL_PATH)/distributed && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/distributed/indexservice $(PWD)/cmd/distributed/indexservice/main.go 1>/dev/null
|
||||||
|
@echo "Building distributed indexnode ..."
|
||||||
|
@mkdir -p $(INSTALL_PATH)/distributed && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/distributed/indexnode $(PWD)/cmd/distributed/indexnode/main.go 1>/dev/null
|
||||||
|
|
||||||
build-cpp:
|
build-cpp:
|
||||||
@(env bash $(PWD)/scripts/core_build.sh -f "$(CUSTOM_THIRDPARTY_PATH)")
|
@(env bash $(PWD)/scripts/core_build.sh -f "$(CUSTOM_THIRDPARTY_PATH)")
|
||||||
|
@ -23,7 +23,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
grpcindexnode.Init()
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
svr, err := grpcindexnode.CreateIndexNode(ctx)
|
svr, err := grpcindexnode.CreateIndexNode(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
@ -23,7 +23,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
grpcindexserver.Init()
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
svr, err := grpcindexserver.CreateIndexServer(ctx)
|
svr, err := grpcindexserver.CreateIndexServer(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
@ -84,6 +84,13 @@ datatype_is_vector(DataType datatype) {
|
|||||||
|
|
||||||
struct FieldMeta {
|
struct FieldMeta {
|
||||||
public:
|
public:
|
||||||
|
FieldMeta(const FieldMeta&) = delete;
|
||||||
|
FieldMeta(FieldMeta&&) = default;
|
||||||
|
FieldMeta&
|
||||||
|
operator=(const FieldMeta&) = delete;
|
||||||
|
FieldMeta&
|
||||||
|
operator=(FieldMeta&&) = default;
|
||||||
|
|
||||||
FieldMeta(const FieldName& name, FieldId id, DataType type) : name_(name), id_(id), type_(type) {
|
FieldMeta(const FieldName& name, FieldId id, DataType type) : name_(name), id_(id), type_(type) {
|
||||||
Assert(!is_vector());
|
Assert(!is_vector());
|
||||||
}
|
}
|
||||||
|
@ -26,6 +26,6 @@ struct LoadIndexInfo {
|
|||||||
// NOTE: Refer to common/SystemProperty.cpp for details
|
// NOTE: Refer to common/SystemProperty.cpp for details
|
||||||
struct LoadFieldDataInfo {
|
struct LoadFieldDataInfo {
|
||||||
int64_t field_id;
|
int64_t field_id;
|
||||||
void* blob;
|
void* blob = nullptr;
|
||||||
int64_t row_count;
|
int64_t row_count = -1;
|
||||||
};
|
};
|
||||||
|
@ -23,20 +23,24 @@ namespace milvus {
|
|||||||
|
|
||||||
class Schema {
|
class Schema {
|
||||||
public:
|
public:
|
||||||
void
|
FieldId
|
||||||
AddDebugField(const std::string& name, DataType data_type) {
|
AddDebugField(const std::string& name, DataType data_type) {
|
||||||
static int64_t debug_id = 1000;
|
static int64_t debug_id = 1000;
|
||||||
this->AddField(FieldName(name), FieldId(debug_id), data_type);
|
auto field_id = FieldId(debug_id);
|
||||||
debug_id++;
|
debug_id += 2;
|
||||||
|
this->AddField(FieldName(name), field_id, data_type);
|
||||||
|
return field_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
// auto gen field_id for convenience
|
// auto gen field_id for convenience
|
||||||
void
|
FieldId
|
||||||
AddDebugField(const std::string& name, DataType data_type, int64_t dim, MetricType metric_type) {
|
AddDebugField(const std::string& name, DataType data_type, int64_t dim, MetricType metric_type) {
|
||||||
static int64_t debug_id = 2000;
|
static int64_t debug_id = 2001;
|
||||||
auto field_meta = FieldMeta(FieldName(name), FieldId(debug_id), data_type, dim, metric_type);
|
auto field_id = FieldId(debug_id);
|
||||||
debug_id++;
|
debug_id += 2;
|
||||||
|
auto field_meta = FieldMeta(FieldName(name), field_id, data_type, dim, metric_type);
|
||||||
this->AddField(std::move(field_meta));
|
this->AddField(std::move(field_meta));
|
||||||
|
return field_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
// scalar type
|
// scalar type
|
||||||
@ -141,13 +145,14 @@ class Schema {
|
|||||||
void
|
void
|
||||||
AddField(FieldMeta&& field_meta) {
|
AddField(FieldMeta&& field_meta) {
|
||||||
auto offset = fields_.size();
|
auto offset = fields_.size();
|
||||||
fields_.emplace_back(field_meta);
|
|
||||||
AssertInfo(!name_offsets_.count(field_meta.get_name()), "duplicated field name");
|
AssertInfo(!name_offsets_.count(field_meta.get_name()), "duplicated field name");
|
||||||
name_offsets_.emplace(field_meta.get_name(), offset);
|
name_offsets_.emplace(field_meta.get_name(), offset);
|
||||||
AssertInfo(!id_offsets_.count(field_meta.get_id()), "duplicated field id");
|
AssertInfo(!id_offsets_.count(field_meta.get_id()), "duplicated field id");
|
||||||
id_offsets_.emplace(field_meta.get_id(), offset);
|
id_offsets_.emplace(field_meta.get_id(), offset);
|
||||||
|
|
||||||
auto field_sizeof = field_meta.get_sizeof();
|
auto field_sizeof = field_meta.get_sizeof();
|
||||||
sizeof_infos_.push_back(std::move(field_sizeof));
|
sizeof_infos_.push_back(std::move(field_sizeof));
|
||||||
|
fields_.emplace_back(std::move(field_meta));
|
||||||
total_sizeof_ += field_sizeof;
|
total_sizeof_ += field_sizeof;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ template <class...>
|
|||||||
constexpr std::false_type always_false{};
|
constexpr std::false_type always_false{};
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
using aligned_vector = std::vector<T, boost::alignment::aligned_allocator<T, 512>>;
|
using aligned_vector = std::vector<T, boost::alignment::aligned_allocator<T, 64>>;
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
struct QueryResult {
|
struct QueryResult {
|
||||||
|
@ -200,7 +200,7 @@ Parser::ParseVecNode(const Json& out_body) {
|
|||||||
auto field_offset = schema.get_offset(field_name);
|
auto field_offset = schema.get_offset(field_name);
|
||||||
|
|
||||||
auto vec_node = [&]() -> std::unique_ptr<VectorPlanNode> {
|
auto vec_node = [&]() -> std::unique_ptr<VectorPlanNode> {
|
||||||
auto field_meta = schema.operator[](field_name);
|
auto& field_meta = schema.operator[](field_name);
|
||||||
auto data_type = field_meta.get_data_type();
|
auto data_type = field_meta.get_data_type();
|
||||||
if (data_type == DataType::VECTOR_FLOAT) {
|
if (data_type == DataType::VECTOR_FLOAT) {
|
||||||
return std::make_unique<FloatVectorANNS>();
|
return std::make_unique<FloatVectorANNS>();
|
||||||
@ -252,7 +252,7 @@ template <typename T>
|
|||||||
ExprPtr
|
ExprPtr
|
||||||
Parser::ParseRangeNodeImpl(const FieldName& field_name, const Json& body) {
|
Parser::ParseRangeNodeImpl(const FieldName& field_name, const Json& body) {
|
||||||
auto expr = std::make_unique<RangeExprImpl<T>>();
|
auto expr = std::make_unique<RangeExprImpl<T>>();
|
||||||
auto field_meta = schema[field_name];
|
auto& field_meta = schema[field_name];
|
||||||
auto data_type = field_meta.get_data_type();
|
auto data_type = field_meta.get_data_type();
|
||||||
expr->data_type_ = data_type;
|
expr->data_type_ = data_type;
|
||||||
expr->field_offset_ = schema.get_offset(field_name);
|
expr->field_offset_ = schema.get_offset(field_name);
|
||||||
|
@ -130,7 +130,7 @@ ExecExprVisitor::ExecRangeVisitorImpl(RangeExprImpl<T>& expr, IndexFunc index_fu
|
|||||||
|
|
||||||
// RetType results(vec.num_chunk());
|
// RetType results(vec.num_chunk());
|
||||||
auto indexing_barrier = segment_.num_chunk_index_safe(field_offset);
|
auto indexing_barrier = segment_.num_chunk_index_safe(field_offset);
|
||||||
auto chunk_size = segment_.chunk_size();
|
auto chunk_size = segment_.size_per_chunk();
|
||||||
auto num_chunk = upper_div(row_count_, chunk_size);
|
auto num_chunk = upper_div(row_count_, chunk_size);
|
||||||
RetType results;
|
RetType results;
|
||||||
|
|
||||||
@ -290,7 +290,7 @@ ExecExprVisitor::ExecTermVisitorImpl(TermExpr& expr_raw) -> RetType {
|
|||||||
auto& field_meta = schema[field_offset];
|
auto& field_meta = schema[field_offset];
|
||||||
// auto vec_ptr = records.get_entity<T>(field_offset);
|
// auto vec_ptr = records.get_entity<T>(field_offset);
|
||||||
// auto& vec = *vec_ptr;
|
// auto& vec = *vec_ptr;
|
||||||
auto chunk_size = segment_.chunk_size();
|
auto chunk_size = segment_.size_per_chunk();
|
||||||
auto num_chunk = upper_div(row_count_, chunk_size);
|
auto num_chunk = upper_div(row_count_, chunk_size);
|
||||||
RetType bitsets;
|
RetType bitsets;
|
||||||
|
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
|
|
||||||
set(SEGCORE_FILES
|
set(SEGCORE_FILES
|
||||||
SegmentGrowingImpl.cpp
|
|
||||||
Collection.cpp
|
Collection.cpp
|
||||||
collection_c.cpp
|
collection_c.cpp
|
||||||
segment_c.cpp
|
segment_c.cpp
|
||||||
SegmentGrowing.cpp
|
SegmentGrowing.cpp
|
||||||
|
SegmentGrowingImpl.cpp
|
||||||
|
SegmentSealedImpl.cpp
|
||||||
IndexingEntry.cpp
|
IndexingEntry.cpp
|
||||||
InsertRecord.cpp
|
InsertRecord.cpp
|
||||||
Reduce.cpp
|
Reduce.cpp
|
||||||
|
@ -31,7 +31,10 @@ using SealedIndexingEntryPtr = std::unique_ptr<SealedIndexingEntry>;
|
|||||||
|
|
||||||
struct SealedIndexingRecord {
|
struct SealedIndexingRecord {
|
||||||
void
|
void
|
||||||
add_entry(FieldOffset field_offset, SealedIndexingEntryPtr&& ptr) {
|
add_entry(FieldOffset field_offset, MetricType metric_type, knowhere::VecIndexPtr indexing) {
|
||||||
|
auto ptr = std::make_unique<SealedIndexingEntry>();
|
||||||
|
ptr->indexing_ = indexing;
|
||||||
|
ptr->metric_type_ = metric_type;
|
||||||
std::unique_lock lck(mutex_);
|
std::unique_lock lck(mutex_);
|
||||||
entries_[field_offset] = std::move(ptr);
|
entries_[field_offset] = std::move(ptr);
|
||||||
}
|
}
|
||||||
|
@ -270,7 +270,7 @@ SegmentGrowingImpl::FillTargetEntry(const query::Plan* plan, QueryResult& result
|
|||||||
auto key_offset_opt = schema_->get_primary_key_offset();
|
auto key_offset_opt = schema_->get_primary_key_offset();
|
||||||
Assert(key_offset_opt.has_value());
|
Assert(key_offset_opt.has_value());
|
||||||
auto key_offset = key_offset_opt.value();
|
auto key_offset = key_offset_opt.value();
|
||||||
auto field_meta = schema_->operator[](key_offset);
|
auto& field_meta = schema_->operator[](key_offset);
|
||||||
Assert(field_meta.get_data_type() == DataType::INT64);
|
Assert(field_meta.get_data_type() == DataType::INT64);
|
||||||
auto uids = record_.get_entity<int64_t>(key_offset);
|
auto uids = record_.get_entity<int64_t>(key_offset);
|
||||||
for (int64_t i = 0; i < size; ++i) {
|
for (int64_t i = 0; i < size; ++i) {
|
||||||
@ -290,12 +290,8 @@ SegmentGrowingImpl::LoadIndexing(const LoadIndexInfo& info) {
|
|||||||
|
|
||||||
Assert(info.index_params.count("metric_type"));
|
Assert(info.index_params.count("metric_type"));
|
||||||
auto metric_type_str = info.index_params.at("metric_type");
|
auto metric_type_str = info.index_params.at("metric_type");
|
||||||
auto entry = std::make_unique<SealedIndexingEntry>();
|
|
||||||
|
|
||||||
entry->metric_type_ = GetMetricType(metric_type_str);
|
sealed_indexing_record_.add_entry(field_offset, GetMetricType(metric_type_str), info.index);
|
||||||
entry->indexing_ = info.index;
|
|
||||||
|
|
||||||
sealed_indexing_record_.add_entry(field_offset, std::move(entry));
|
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -306,7 +302,7 @@ SegmentGrowingImpl::chunk_data_impl(FieldOffset field_offset, int64_t chunk_id)
|
|||||||
}
|
}
|
||||||
|
|
||||||
int64_t
|
int64_t
|
||||||
SegmentGrowingImpl::get_safe_num_chunk() const {
|
SegmentGrowingImpl::num_chunk_data() const {
|
||||||
auto size = get_insert_record().ack_responder_.GetAck();
|
auto size = get_insert_record().ack_responder_.GetAck();
|
||||||
return upper_div(size, chunk_size_);
|
return upper_div(size, chunk_size_);
|
||||||
}
|
}
|
||||||
|
@ -105,7 +105,7 @@ class SegmentGrowingImpl : public SegmentGrowing {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int64_t
|
int64_t
|
||||||
chunk_size() const final {
|
size_per_chunk() const final {
|
||||||
return chunk_size_;
|
return chunk_size_;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -126,7 +126,7 @@ class SegmentGrowingImpl : public SegmentGrowing {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int64_t
|
int64_t
|
||||||
get_safe_num_chunk() const override;
|
num_chunk_data() const override;
|
||||||
|
|
||||||
Status
|
Status
|
||||||
LoadIndexing(const LoadIndexInfo& info) override;
|
LoadIndexing(const LoadIndexInfo& info) override;
|
||||||
|
@ -36,27 +36,21 @@ class SegmentInterface {
|
|||||||
virtual int64_t
|
virtual int64_t
|
||||||
get_row_count() const = 0;
|
get_row_count() const = 0;
|
||||||
|
|
||||||
|
virtual const Schema&
|
||||||
|
get_schema() const = 0;
|
||||||
|
|
||||||
virtual ~SegmentInterface() = default;
|
virtual ~SegmentInterface() = default;
|
||||||
};
|
};
|
||||||
|
|
||||||
// internal API for DSL calculation
|
// internal API for DSL calculation
|
||||||
class SegmentInternalInterface : public SegmentInterface {
|
class SegmentInternalInterface : public SegmentInterface {
|
||||||
public:
|
public:
|
||||||
virtual const Schema&
|
|
||||||
get_schema() const = 0;
|
|
||||||
|
|
||||||
virtual int64_t
|
|
||||||
get_safe_num_chunk() const = 0;
|
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
Span<T>
|
Span<T>
|
||||||
chunk_data(FieldOffset field_offset, int64_t chunk_id) const {
|
chunk_data(FieldOffset field_offset, int64_t chunk_id) const {
|
||||||
return static_cast<Span<T>>(chunk_data_impl(field_offset, chunk_id));
|
return static_cast<Span<T>>(chunk_data_impl(field_offset, chunk_id));
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual int64_t
|
|
||||||
num_chunk_index_safe(FieldOffset field_offset) const = 0;
|
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
const knowhere::scalar::StructuredIndex<T>&
|
const knowhere::scalar::StructuredIndex<T>&
|
||||||
chunk_scalar_index(FieldOffset field_offset, int64_t chunk_id) const {
|
chunk_scalar_index(FieldOffset field_offset, int64_t chunk_id) const {
|
||||||
@ -68,8 +62,16 @@ class SegmentInternalInterface : public SegmentInterface {
|
|||||||
return *ptr;
|
return *ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
virtual int64_t
|
virtual int64_t
|
||||||
chunk_size() const = 0;
|
num_chunk_index_safe(FieldOffset field_offset) const = 0;
|
||||||
|
|
||||||
|
virtual int64_t
|
||||||
|
num_chunk_data() const = 0;
|
||||||
|
|
||||||
|
// return chunk_size for each chunk, renaming against confusion
|
||||||
|
virtual int64_t
|
||||||
|
size_per_chunk() const = 0;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// blob and row_count
|
// blob and row_count
|
||||||
|
@ -16,12 +16,8 @@
|
|||||||
|
|
||||||
namespace milvus::segcore {
|
namespace milvus::segcore {
|
||||||
|
|
||||||
class SegmentSealed : public SegmentInterface {
|
class SegmentSealed : public SegmentInternalInterface {
|
||||||
public:
|
public:
|
||||||
virtual const Schema&
|
|
||||||
get_schema() = 0;
|
|
||||||
virtual int64_t
|
|
||||||
get_row_count() = 0;
|
|
||||||
virtual void
|
virtual void
|
||||||
LoadIndex(const LoadIndexInfo& info) = 0;
|
LoadIndex(const LoadIndexInfo& info) = 0;
|
||||||
virtual void
|
virtual void
|
||||||
@ -31,8 +27,6 @@ class SegmentSealed : public SegmentInterface {
|
|||||||
using SegmentSealedPtr = std::unique_ptr<SegmentSealed>;
|
using SegmentSealedPtr = std::unique_ptr<SegmentSealed>;
|
||||||
|
|
||||||
SegmentSealedPtr
|
SegmentSealedPtr
|
||||||
CreateSealedSegment(SchemaPtr schema, int64_t chunk_size = 32 * 1024) {
|
CreateSealedSegment(SchemaPtr schema, int64_t chunk_size = 32 * 1024);
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace milvus::segcore
|
} // namespace milvus::segcore
|
||||||
|
119
internal/core/src/segcore/SegmentSealedImpl.cpp
Normal file
119
internal/core/src/segcore/SegmentSealedImpl.cpp
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||||
|
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
||||||
|
// or implied. See the License for the specific language governing permissions and limitations under the License
|
||||||
|
|
||||||
|
#include "segcore/SegmentSealedImpl.h"
|
||||||
|
namespace milvus::segcore {
|
||||||
|
void
|
||||||
|
SegmentSealedImpl::LoadIndex(const LoadIndexInfo& info) {
|
||||||
|
auto field_id = FieldId(info.field_id);
|
||||||
|
auto field_offset = schema_->get_offset(field_id);
|
||||||
|
|
||||||
|
Assert(info.index_params.count("metric_type"));
|
||||||
|
auto metric_type_str = info.index_params.at("metric_type");
|
||||||
|
auto row_count = info.index->Count();
|
||||||
|
Assert(row_count > 0);
|
||||||
|
|
||||||
|
std::unique_lock lck(mutex_);
|
||||||
|
if (row_count_opt_.has_value()) {
|
||||||
|
AssertInfo(row_count_opt_.value() == row_count, "load data has different row count from other columns");
|
||||||
|
} else {
|
||||||
|
row_count_opt_ = row_count;
|
||||||
|
}
|
||||||
|
Assert(!vec_indexings_.is_ready(field_offset));
|
||||||
|
vec_indexings_.add_entry(field_offset, GetMetricType(metric_type_str), info.index);
|
||||||
|
++ready_count_;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
SegmentSealedImpl::LoadFieldData(const LoadFieldDataInfo& info) {
|
||||||
|
// TODO
|
||||||
|
Assert(info.row_count > 0);
|
||||||
|
auto field_id = FieldId(info.field_id);
|
||||||
|
auto field_offset = schema_->get_offset(field_id);
|
||||||
|
auto& field_meta = schema_->operator[](field_offset);
|
||||||
|
Assert(!field_meta.is_vector());
|
||||||
|
auto element_sizeof = field_meta.get_sizeof();
|
||||||
|
auto length_in_bytes = element_sizeof * info.row_count;
|
||||||
|
aligned_vector<char> vecdata(length_in_bytes);
|
||||||
|
memcpy(vecdata.data(), info.blob, length_in_bytes);
|
||||||
|
std::unique_lock lck(mutex_);
|
||||||
|
if (row_count_opt_.has_value()) {
|
||||||
|
AssertInfo(row_count_opt_.value() == info.row_count, "load data has different row count from other columns");
|
||||||
|
} else {
|
||||||
|
row_count_opt_ = info.row_count;
|
||||||
|
}
|
||||||
|
AssertInfo(columns_data_[field_offset.get()].empty(), "already exists");
|
||||||
|
columns_data_[field_offset.get()] = std::move(vecdata);
|
||||||
|
++ready_count_;
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t
|
||||||
|
SegmentSealedImpl::num_chunk_index_safe(FieldOffset field_offset) const {
|
||||||
|
// TODO: support scalar index
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t
|
||||||
|
SegmentSealedImpl::num_chunk_data() const {
|
||||||
|
PanicInfo("unimplemented");
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t
|
||||||
|
SegmentSealedImpl::size_per_chunk() const {
|
||||||
|
PanicInfo("unimplemented");
|
||||||
|
}
|
||||||
|
|
||||||
|
SpanBase
|
||||||
|
SegmentSealedImpl::chunk_data_impl(FieldOffset field_offset, int64_t chunk_id) const {
|
||||||
|
PanicInfo("unimplemented");
|
||||||
|
}
|
||||||
|
|
||||||
|
const knowhere::Index*
|
||||||
|
SegmentSealedImpl::chunk_index_impl(FieldOffset field_offset, int64_t chunk_id) const {
|
||||||
|
PanicInfo("unimplemented");
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
SegmentSealedImpl::FillTargetEntry(const query::Plan* Plan, QueryResult& results) const {
|
||||||
|
PanicInfo("unimplemented");
|
||||||
|
}
|
||||||
|
|
||||||
|
QueryResult
|
||||||
|
SegmentSealedImpl::Search(const query::Plan* Plan,
|
||||||
|
const query::PlaceholderGroup** placeholder_groups,
|
||||||
|
const Timestamp* timestamps,
|
||||||
|
int64_t num_groups) const {
|
||||||
|
PanicInfo("unimplemented");
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t
|
||||||
|
SegmentSealedImpl::GetMemoryUsageInBytes() const {
|
||||||
|
PanicInfo("unimplemented");
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t
|
||||||
|
SegmentSealedImpl::get_row_count() const {
|
||||||
|
std::shared_lock lck(mutex_);
|
||||||
|
AssertInfo(row_count_opt_.has_value(), "Data not loaded");
|
||||||
|
return row_count_opt_.value();
|
||||||
|
}
|
||||||
|
|
||||||
|
const Schema&
|
||||||
|
SegmentSealedImpl::get_schema() const {
|
||||||
|
return *schema_;
|
||||||
|
}
|
||||||
|
|
||||||
|
SegmentSealedPtr
|
||||||
|
CreateSealedSegment(SchemaPtr schema, int64_t chunk_size) {
|
||||||
|
return std::make_unique<SegmentSealedImpl>(schema);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace milvus::segcore
|
84
internal/core/src/segcore/SegmentSealedImpl.h
Normal file
84
internal/core/src/segcore/SegmentSealedImpl.h
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||||
|
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
||||||
|
// or implied. See the License for the specific language governing permissions and limitations under the License
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
#include "segcore/SegmentSealed.h"
|
||||||
|
#include "SealedIndexingRecord.h"
|
||||||
|
#include <map>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
namespace milvus::segcore {
|
||||||
|
class SegmentSealedImpl : public SegmentSealed {
|
||||||
|
public:
|
||||||
|
explicit SegmentSealedImpl(SchemaPtr schema) : schema_(schema), columns_data_(schema->size()) {
|
||||||
|
}
|
||||||
|
void
|
||||||
|
LoadIndex(const LoadIndexInfo& info) override;
|
||||||
|
void
|
||||||
|
LoadFieldData(const LoadFieldDataInfo& info) override;
|
||||||
|
|
||||||
|
public:
|
||||||
|
void
|
||||||
|
FillTargetEntry(const query::Plan* Plan, QueryResult& results) const override;
|
||||||
|
|
||||||
|
QueryResult
|
||||||
|
Search(const query::Plan* Plan,
|
||||||
|
const query::PlaceholderGroup* placeholder_groups[],
|
||||||
|
const Timestamp timestamps[],
|
||||||
|
int64_t num_groups) const override;
|
||||||
|
|
||||||
|
int64_t
|
||||||
|
GetMemoryUsageInBytes() const override;
|
||||||
|
|
||||||
|
int64_t
|
||||||
|
get_row_count() const override;
|
||||||
|
|
||||||
|
const Schema&
|
||||||
|
get_schema() const override;
|
||||||
|
|
||||||
|
public:
|
||||||
|
int64_t
|
||||||
|
num_chunk_index_safe(FieldOffset field_offset) const override;
|
||||||
|
|
||||||
|
int64_t
|
||||||
|
num_chunk_data() const override;
|
||||||
|
|
||||||
|
// return chunk_size for each chunk, renaming against confusion
|
||||||
|
int64_t
|
||||||
|
size_per_chunk() const override;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
// blob and row_count
|
||||||
|
SpanBase
|
||||||
|
chunk_data_impl(FieldOffset field_offset, int64_t chunk_id) const override;
|
||||||
|
|
||||||
|
const knowhere::Index*
|
||||||
|
chunk_index_impl(FieldOffset field_offset, int64_t chunk_id) const override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
bool
|
||||||
|
is_all_ready() const {
|
||||||
|
return ready_count_ == schema_->size();
|
||||||
|
}
|
||||||
|
|
||||||
|
mutable std::shared_mutex mutex_;
|
||||||
|
std::atomic_int ready_count_ = 0;
|
||||||
|
|
||||||
|
private:
|
||||||
|
// TOOD: generate index for scalar
|
||||||
|
std::optional<int64_t> row_count_opt_;
|
||||||
|
std::map<FieldOffset, knowhere::IndexPtr> scalar_indexings_;
|
||||||
|
SealedIndexingRecord vec_indexings_;
|
||||||
|
std::vector<aligned_vector<char>> columns_data_;
|
||||||
|
aligned_vector<idx_t> row_ids_;
|
||||||
|
SchemaPtr schema_;
|
||||||
|
};
|
||||||
|
} // namespace milvus::segcore
|
@ -208,7 +208,7 @@ PreDelete(CSegmentInterface c_segment, int64_t size) {
|
|||||||
return segment->PreDelete(size);
|
return segment->PreDelete(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////// interfaces for growing segment //////////////////////////////
|
////////////////////////////// interfaces for sealed segment //////////////////////////////
|
||||||
CStatus
|
CStatus
|
||||||
LoadFieldData(CSegmentInterface c_segment, CLoadFieldDataInfo load_field_data_info) {
|
LoadFieldData(CSegmentInterface c_segment, CLoadFieldDataInfo load_field_data_info) {
|
||||||
auto segment = (milvus::segcore::SegmentSealed*)c_segment;
|
auto segment = (milvus::segcore::SegmentSealed*)c_segment;
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
#include <knowhere/index/vector_index/adapter/VectorAdapter.h>
|
#include <knowhere/index/vector_index/adapter/VectorAdapter.h>
|
||||||
#include <knowhere/index/vector_index/VecIndexFactory.h>
|
#include <knowhere/index/vector_index/VecIndexFactory.h>
|
||||||
#include <knowhere/index/vector_index/IndexIVF.h>
|
#include <knowhere/index/vector_index/IndexIVF.h>
|
||||||
|
#include "segcore/SegmentSealedImpl.h"
|
||||||
|
|
||||||
using namespace milvus;
|
using namespace milvus;
|
||||||
using namespace milvus::segcore;
|
using namespace milvus::segcore;
|
||||||
@ -213,4 +214,45 @@ TEST(Sealed, with_predicate) {
|
|||||||
ASSERT_EQ(post_qr.internal_seg_offsets_[offset], 420000 + i);
|
ASSERT_EQ(post_qr.internal_seg_offsets_[offset], 420000 + i);
|
||||||
ASSERT_EQ(post_qr.result_distances_[offset], 0.0);
|
ASSERT_EQ(post_qr.result_distances_[offset], 0.0);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(Sealed, LoadFieldData) {
|
||||||
|
auto dim = 16;
|
||||||
|
auto topK = 5;
|
||||||
|
int64_t N = 1000 * 1000;
|
||||||
|
auto metric_type = MetricType::METRIC_L2;
|
||||||
|
auto schema = std::make_shared<Schema>();
|
||||||
|
auto fakevec_id = schema->AddDebugField("fakevec", DataType::VECTOR_FLOAT, dim, metric_type);
|
||||||
|
auto counter_id = schema->AddDebugField("counter", DataType::INT64);
|
||||||
|
auto dataset = DataGen(schema, N);
|
||||||
|
|
||||||
|
auto fakevec = dataset.get_col<float>(0);
|
||||||
|
|
||||||
|
auto counter = dataset.get_col<int64_t>(1);
|
||||||
|
auto indexing = std::make_shared<knowhere::IVF>();
|
||||||
|
|
||||||
|
auto conf = knowhere::Config{{knowhere::meta::DIM, dim},
|
||||||
|
{knowhere::meta::TOPK, topK},
|
||||||
|
{knowhere::IndexParams::nlist, 100},
|
||||||
|
{knowhere::IndexParams::nprobe, 10},
|
||||||
|
{knowhere::Metric::TYPE, milvus::knowhere::Metric::L2},
|
||||||
|
{knowhere::meta::DEVICEID, 0}};
|
||||||
|
auto database = knowhere::GenDataset(N, dim, fakevec.data());
|
||||||
|
indexing->Train(database, conf);
|
||||||
|
indexing->AddWithoutIds(database, conf);
|
||||||
|
|
||||||
|
auto segment = CreateSealedSegment(schema);
|
||||||
|
LoadFieldDataInfo field_info;
|
||||||
|
field_info.field_id = counter_id.get();
|
||||||
|
field_info.row_count = N;
|
||||||
|
field_info.blob = counter.data();
|
||||||
|
segment->LoadFieldData(field_info);
|
||||||
|
|
||||||
|
LoadIndexInfo vec_info;
|
||||||
|
vec_info.field_id = fakevec_id.get();
|
||||||
|
vec_info.field_name = "fakevec";
|
||||||
|
vec_info.index = indexing;
|
||||||
|
vec_info.index_params["metric_type"] = milvus::knowhere::Metric::L2;
|
||||||
|
segment->LoadIndex(vec_info);
|
||||||
|
int i = 1 + 1;
|
||||||
}
|
}
|
@ -33,7 +33,7 @@ TEST(Span, Naive) {
|
|||||||
auto age_ptr = dataset.get_col<float>(1);
|
auto age_ptr = dataset.get_col<float>(1);
|
||||||
auto float_ptr = dataset.get_col<float>(2);
|
auto float_ptr = dataset.get_col<float>(2);
|
||||||
SegmentInternalInterface& interface = *segment;
|
SegmentInternalInterface& interface = *segment;
|
||||||
auto num_chunk = interface.get_safe_num_chunk();
|
auto num_chunk = interface.num_chunk_data();
|
||||||
ASSERT_EQ(num_chunk, upper_div(N, chunk_size));
|
ASSERT_EQ(num_chunk, upper_div(N, chunk_size));
|
||||||
auto row_count = interface.get_row_count();
|
auto row_count = interface.get_row_count();
|
||||||
ASSERT_EQ(N, row_count);
|
ASSERT_EQ(N, row_count);
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
package grpcindexnode
|
package grpcindexnodeclient
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -7,7 +7,6 @@ import (
|
|||||||
|
|
||||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||||
"github.com/zilliztech/milvus-distributed/internal/proto/indexpb"
|
"github.com/zilliztech/milvus-distributed/internal/proto/indexpb"
|
||||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -15,32 +14,6 @@ type Client struct {
|
|||||||
grpcClient indexpb.IndexNodeClient
|
grpcClient indexpb.IndexNodeClient
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Client) Init() {
|
|
||||||
//TODO:???
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Client) Start() {
|
|
||||||
//TODO:???
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Client) Stop() {
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Client) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Client) GetTimeTickChannel() (string, error) {
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Client) GetStatisticsChannel() (string, error) {
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Client) BuildIndex(req *indexpb.BuildIndexCmd) (*commonpb.Status, error) {
|
func (c Client) BuildIndex(req *indexpb.BuildIndexCmd) (*commonpb.Status, error) {
|
||||||
|
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
@ -53,9 +26,9 @@ func NewClient(nodeAddress string) *Client {
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
conn, err := grpc.DialContext(ctx1, nodeAddress, grpc.WithInsecure(), grpc.WithBlock())
|
conn, err := grpc.DialContext(ctx1, nodeAddress, grpc.WithInsecure(), grpc.WithBlock())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("IndexNode connect to IndexService failed, error= %v", err)
|
log.Printf("Connect to IndexNode failed, error= %v", err)
|
||||||
}
|
}
|
||||||
log.Printf("IndexNode connected to IndexService, IndexService=%s", Params.Address)
|
log.Printf("Connected to IndexService, IndexService=%s", nodeAddress)
|
||||||
return &Client{
|
return &Client{
|
||||||
grpcClient: indexpb.NewIndexNodeClient(conn),
|
grpcClient: indexpb.NewIndexNodeClient(conn),
|
||||||
}
|
}
|
@ -1,177 +0,0 @@
|
|||||||
package grpcindexnode
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/zilliztech/milvus-distributed/internal/util/paramtable"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ParamTable struct {
|
|
||||||
paramtable.BaseTable
|
|
||||||
|
|
||||||
Address string
|
|
||||||
Port int
|
|
||||||
ServiceAddress string
|
|
||||||
ServicePort int
|
|
||||||
|
|
||||||
NodeID int64
|
|
||||||
|
|
||||||
MasterAddress string
|
|
||||||
|
|
||||||
EtcdAddress string
|
|
||||||
MetaRootPath string
|
|
||||||
|
|
||||||
MinIOAddress string
|
|
||||||
MinIOAccessKeyID string
|
|
||||||
MinIOSecretAccessKey string
|
|
||||||
MinIOUseSSL bool
|
|
||||||
MinioBucketName string
|
|
||||||
}
|
|
||||||
|
|
||||||
var Params ParamTable
|
|
||||||
|
|
||||||
func (pt *ParamTable) Init() {
|
|
||||||
pt.BaseTable.Init()
|
|
||||||
pt.initAddress()
|
|
||||||
pt.initPort()
|
|
||||||
pt.initIndexServerAddr()
|
|
||||||
pt.initIndexServerPort()
|
|
||||||
pt.initEtcdAddress()
|
|
||||||
pt.initMasterAddress()
|
|
||||||
pt.initMetaRootPath()
|
|
||||||
pt.initMinIOAddress()
|
|
||||||
pt.initMinIOAccessKeyID()
|
|
||||||
pt.initMinIOSecretAccessKey()
|
|
||||||
pt.initMinIOUseSSL()
|
|
||||||
pt.initMinioBucketName()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *ParamTable) initAddress() {
|
|
||||||
addr, err := pt.Load("indexNode.address")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
hostName, _ := net.LookupHost(addr)
|
|
||||||
if len(hostName) <= 0 {
|
|
||||||
if ip := net.ParseIP(addr); ip == nil {
|
|
||||||
panic("invalid ip indexBuilder.address")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
port, err := pt.Load("indexNode.port")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
_, err = strconv.Atoi(port)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
pt.Address = addr + ":" + port
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *ParamTable) initPort() {
|
|
||||||
pt.Port = pt.ParseInt("indexNode.port")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *ParamTable) initIndexServerAddr() {
|
|
||||||
addr, err := pt.Load("indexServer.address")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
hostName, _ := net.LookupHost(addr)
|
|
||||||
if len(hostName) <= 0 {
|
|
||||||
if ip := net.ParseIP(addr); ip == nil {
|
|
||||||
panic("invalid ip indexBuilder.address")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
port, err := pt.Load("indexServer.port")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
_, err = strconv.Atoi(port)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
pt.ServiceAddress = addr + ":" + port
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt ParamTable) initIndexServerPort() {
|
|
||||||
pt.ServicePort = pt.ParseInt("indexServer.port")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *ParamTable) initEtcdAddress() {
|
|
||||||
addr, err := pt.Load("_EtcdAddress")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
pt.EtcdAddress = addr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *ParamTable) initMetaRootPath() {
|
|
||||||
rootPath, err := pt.Load("etcd.rootPath")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
subPath, err := pt.Load("etcd.metaSubPath")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
pt.MetaRootPath = rootPath + "/" + subPath
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *ParamTable) initMasterAddress() {
|
|
||||||
ret, err := pt.Load("_MasterAddress")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
pt.MasterAddress = ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *ParamTable) initMinIOAddress() {
|
|
||||||
ret, err := pt.Load("_MinioAddress")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
pt.MinIOAddress = ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *ParamTable) initMinIOAccessKeyID() {
|
|
||||||
ret, err := pt.Load("minio.accessKeyID")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
pt.MinIOAccessKeyID = ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *ParamTable) initMinIOSecretAccessKey() {
|
|
||||||
ret, err := pt.Load("minio.secretAccessKey")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
pt.MinIOSecretAccessKey = ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *ParamTable) initMinIOUseSSL() {
|
|
||||||
ret, err := pt.Load("minio.useSSL")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
pt.MinIOUseSSL, err = strconv.ParseBool(ret)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *ParamTable) initMinioBucketName() {
|
|
||||||
bucketName, err := pt.Load("minio.bucketName")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
pt.MinioBucketName = bucketName
|
|
||||||
}
|
|
@ -7,7 +7,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
grpcindexservice "github.com/zilliztech/milvus-distributed/internal/distributed/indexservice"
|
serviceclient "github.com/zilliztech/milvus-distributed/internal/distributed/indexservice/client"
|
||||||
"github.com/zilliztech/milvus-distributed/internal/indexnode"
|
"github.com/zilliztech/milvus-distributed/internal/indexnode"
|
||||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||||
"github.com/zilliztech/milvus-distributed/internal/proto/indexpb"
|
"github.com/zilliztech/milvus-distributed/internal/proto/indexpb"
|
||||||
@ -19,27 +19,29 @@ type Server struct {
|
|||||||
|
|
||||||
grpcServer *grpc.Server
|
grpcServer *grpc.Server
|
||||||
|
|
||||||
indexNodeLoopCtx context.Context
|
loopCtx context.Context
|
||||||
indexNodeLoopCancel func()
|
loopCancel func()
|
||||||
indexNodeLoopWg sync.WaitGroup
|
loopWg sync.WaitGroup
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewGrpcServer(ctx context.Context, indexID int64) *Server {
|
func NewGrpcServer(ctx context.Context, nodeID int64) *Server {
|
||||||
|
ctx1, cancel := context.WithCancel(ctx)
|
||||||
return &Server{
|
return &Server{
|
||||||
node: indexnode.NewIndexNode(ctx, indexID),
|
loopCtx: ctx1,
|
||||||
|
loopCancel: cancel,
|
||||||
|
node: indexnode.NewIndexNode(ctx, nodeID),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func registerNode() error {
|
func registerNode() error {
|
||||||
|
|
||||||
indexServiceClient := grpcindexservice.NewClient(Params.ServiceAddress)
|
indexServiceClient := serviceclient.NewClient(indexnode.Params.ServiceAddress)
|
||||||
|
|
||||||
request := &indexpb.RegisterNodeRequest{
|
request := &indexpb.RegisterNodeRequest{
|
||||||
Base: nil,
|
Base: nil,
|
||||||
Address: &commonpb.Address{
|
Address: &commonpb.Address{
|
||||||
Ip: Params.Address,
|
Ip: indexnode.Params.NodeIP,
|
||||||
Port: int64(Params.Port),
|
Port: int64(indexnode.Params.NodePort),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
resp, err := indexServiceClient.RegisterNode(request)
|
resp, err := indexServiceClient.RegisterNode(request)
|
||||||
@ -48,17 +50,17 @@ func registerNode() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
Params.NodeID = resp.InitParams.NodeID
|
indexnode.Params.NodeID = resp.InitParams.NodeID
|
||||||
log.Println("Register indexNode successful with nodeID=", Params.NodeID)
|
log.Println("Register indexNode successful with nodeID=", indexnode.Params.NodeID)
|
||||||
|
|
||||||
err = Params.LoadFromKVPair(resp.InitParams.StartParams)
|
err = indexnode.Params.LoadFromKVPair(resp.InitParams.StartParams)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) grpcLoop() {
|
func (s *Server) grpcLoop() {
|
||||||
defer s.indexNodeLoopWg.Done()
|
defer s.loopWg.Done()
|
||||||
|
|
||||||
lis, err := net.Listen("tcp", ":"+strconv.Itoa(Params.Port))
|
lis, err := net.Listen("tcp", ":"+strconv.Itoa(indexnode.Params.NodePort))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("IndexNode grpc server fatal error=%v", err)
|
log.Fatalf("IndexNode grpc server fatal error=%v", err)
|
||||||
}
|
}
|
||||||
@ -68,45 +70,41 @@ func (s *Server) grpcLoop() {
|
|||||||
if err = s.grpcServer.Serve(lis); err != nil {
|
if err = s.grpcServer.Serve(lis); err != nil {
|
||||||
log.Fatalf("IndexNode grpc server fatal error=%v", err)
|
log.Fatalf("IndexNode grpc server fatal error=%v", err)
|
||||||
}
|
}
|
||||||
log.Println("IndexNode grpc server starting...")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) startIndexNode() error {
|
func (s *Server) startIndexNode() error {
|
||||||
s.indexNodeLoopWg.Add(1)
|
s.loopWg.Add(1)
|
||||||
//TODO: How to make sure that grpc server has started successfully
|
//TODO: How to make sure that grpc server has started successfully
|
||||||
go s.grpcLoop()
|
go s.grpcLoop()
|
||||||
|
|
||||||
|
log.Println("IndexNode grpc server start successfully")
|
||||||
|
|
||||||
err := registerNode()
|
err := registerNode()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
Params.Init()
|
indexnode.Params.Init()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func Init() {
|
func (s *Server) Init() {
|
||||||
Params.Init()
|
indexnode.Params.Init()
|
||||||
|
log.Println("IndexNode init successfully, nodeAddress=", indexnode.Params.NodeAddress)
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateIndexNode(ctx context.Context) (*Server, error) {
|
func CreateIndexNode(ctx context.Context) (*Server, error) {
|
||||||
|
|
||||||
ctx1, cancel := context.WithCancel(ctx)
|
return NewGrpcServer(ctx, indexnode.Params.NodeID), nil
|
||||||
s := &Server{
|
|
||||||
indexNodeLoopCtx: ctx1,
|
|
||||||
indexNodeLoopCancel: cancel,
|
|
||||||
}
|
|
||||||
|
|
||||||
return s, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) Start() error {
|
func (s *Server) Start() error {
|
||||||
|
s.Init()
|
||||||
return s.startIndexNode()
|
return s.startIndexNode()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) Stop() {
|
func (s *Server) Stop() {
|
||||||
s.indexNodeLoopWg.Wait()
|
s.loopWg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) Close() {
|
func (s *Server) Close() {
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
package grpcindexservice
|
package grpcindexserviceclient
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -6,41 +6,14 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
|
|
||||||
"github.com/zilliztech/milvus-distributed/internal/proto/indexpb"
|
"github.com/zilliztech/milvus-distributed/internal/proto/indexpb"
|
||||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Client struct {
|
type Client struct {
|
||||||
grpcClient indexpb.IndexServiceClient
|
grpcClient indexpb.IndexServiceClient
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g Client) Init() {
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g Client) Start() {
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g Client) Stop() {
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g Client) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g Client) GetTimeTickChannel() (string, error) {
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g Client) GetStatisticsChannel() (string, error) {
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g Client) RegisterNode(req *indexpb.RegisterNodeRequest) (*indexpb.RegisterNodeResponse, error) {
|
func (g Client) RegisterNode(req *indexpb.RegisterNodeRequest) (*indexpb.RegisterNodeResponse, error) {
|
||||||
|
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
@ -80,9 +53,9 @@ func NewClient(address string) *Client {
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
conn, err := grpc.DialContext(ctx1, address, grpc.WithInsecure(), grpc.WithBlock())
|
conn, err := grpc.DialContext(ctx1, address, grpc.WithInsecure(), grpc.WithBlock())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("IndexNode connect to IndexService failed, error= %v", err)
|
log.Printf("Connect to IndexService failed, error= %v", err)
|
||||||
}
|
}
|
||||||
log.Printf("IndexNode connected to IndexService, IndexService=%s", Params.Address)
|
log.Printf("Connected to IndexService, IndexService=%s", address)
|
||||||
|
|
||||||
return &Client{
|
return &Client{
|
||||||
grpcClient: indexpb.NewIndexServiceClient(conn),
|
grpcClient: indexpb.NewIndexServiceClient(conn),
|
@ -1,144 +0,0 @@
|
|||||||
package grpcindexservice
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/zilliztech/milvus-distributed/internal/util/paramtable"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ParamTable struct {
|
|
||||||
paramtable.BaseTable
|
|
||||||
|
|
||||||
Address string
|
|
||||||
Port int
|
|
||||||
|
|
||||||
NodeID int64
|
|
||||||
|
|
||||||
MasterAddress string
|
|
||||||
|
|
||||||
EtcdAddress string
|
|
||||||
MetaRootPath string
|
|
||||||
|
|
||||||
MinIOAddress string
|
|
||||||
MinIOAccessKeyID string
|
|
||||||
MinIOSecretAccessKey string
|
|
||||||
MinIOUseSSL bool
|
|
||||||
MinioBucketName string
|
|
||||||
}
|
|
||||||
|
|
||||||
var Params ParamTable
|
|
||||||
|
|
||||||
func (pt *ParamTable) Init() {
|
|
||||||
pt.BaseTable.Init()
|
|
||||||
pt.initAddress()
|
|
||||||
pt.initPort()
|
|
||||||
pt.initEtcdAddress()
|
|
||||||
pt.initMasterAddress()
|
|
||||||
pt.initMetaRootPath()
|
|
||||||
pt.initMinIOAddress()
|
|
||||||
pt.initMinIOAccessKeyID()
|
|
||||||
pt.initMinIOSecretAccessKey()
|
|
||||||
pt.initMinIOUseSSL()
|
|
||||||
pt.initMinioBucketName()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *ParamTable) initAddress() {
|
|
||||||
addr, err := pt.Load("indexServer.address")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
hostName, _ := net.LookupHost(addr)
|
|
||||||
if len(hostName) <= 0 {
|
|
||||||
if ip := net.ParseIP(addr); ip == nil {
|
|
||||||
panic("invalid ip indexBuilder.address")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
port, err := pt.Load("indexServer.port")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
_, err = strconv.Atoi(port)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
pt.Address = addr + ":" + port
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *ParamTable) initPort() {
|
|
||||||
pt.Port = pt.ParseInt("indexServer.port")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *ParamTable) initEtcdAddress() {
|
|
||||||
addr, err := pt.Load("_EtcdAddress")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
pt.EtcdAddress = addr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *ParamTable) initMetaRootPath() {
|
|
||||||
rootPath, err := pt.Load("etcd.rootPath")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
subPath, err := pt.Load("etcd.metaSubPath")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
pt.MetaRootPath = rootPath + "/" + subPath
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *ParamTable) initMasterAddress() {
|
|
||||||
ret, err := pt.Load("_MasterAddress")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
pt.MasterAddress = ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *ParamTable) initMinIOAddress() {
|
|
||||||
ret, err := pt.Load("_MinioAddress")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
pt.MinIOAddress = ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *ParamTable) initMinIOAccessKeyID() {
|
|
||||||
ret, err := pt.Load("minio.accessKeyID")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
pt.MinIOAccessKeyID = ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *ParamTable) initMinIOSecretAccessKey() {
|
|
||||||
ret, err := pt.Load("minio.secretAccessKey")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
pt.MinIOSecretAccessKey = ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *ParamTable) initMinIOUseSSL() {
|
|
||||||
ret, err := pt.Load("minio.useSSL")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
pt.MinIOUseSSL, err = strconv.ParseBool(ret)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *ParamTable) initMinioBucketName() {
|
|
||||||
bucketName, err := pt.Load("minio.bucketName")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
pt.MinioBucketName = bucketName
|
|
||||||
}
|
|
@ -29,13 +29,11 @@ type Server struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) Init() {
|
func (s *Server) Init() {
|
||||||
log.Println("initing params ...")
|
indexservice.Params.Init()
|
||||||
Params.Init()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) Start() error {
|
func (s *Server) Start() error {
|
||||||
s.Init()
|
s.Init()
|
||||||
log.Println("stringing indexserver ...")
|
|
||||||
return s.startIndexServer()
|
return s.startIndexServer()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -64,20 +62,6 @@ func (s *Server) RegisterNode(ctx context.Context, req *indexpb.RegisterNodeRequ
|
|||||||
func (s *Server) BuildIndex(ctx context.Context, req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error) {
|
func (s *Server) BuildIndex(ctx context.Context, req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error) {
|
||||||
|
|
||||||
return s.server.BuildIndex(req)
|
return s.server.BuildIndex(req)
|
||||||
//indexID := int64(0)
|
|
||||||
//request := &indexpb.BuildIndexCmd{
|
|
||||||
// IndexID: indexID,
|
|
||||||
// Req: req,
|
|
||||||
//}
|
|
||||||
//
|
|
||||||
//indexNodeClient := grpcindexnode.NewClient()
|
|
||||||
//
|
|
||||||
//status, err := indexNodeClient.BuildIndex(request)
|
|
||||||
//response := &indexpb.BuildIndexResponse{
|
|
||||||
// Status: status,
|
|
||||||
// IndexID: indexID,
|
|
||||||
//}
|
|
||||||
//return response, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) GetIndexStates(ctx context.Context, req *indexpb.IndexStatesRequest) (*indexpb.IndexStatesResponse, error) {
|
func (s *Server) GetIndexStates(ctx context.Context, req *indexpb.IndexStatesRequest) (*indexpb.IndexStatesResponse, error) {
|
||||||
@ -95,8 +79,6 @@ func (s *Server) NotifyBuildIndex(ctx context.Context, nty *indexpb.BuildIndexNo
|
|||||||
return s.server.NotifyBuildIndex(nty)
|
return s.server.NotifyBuildIndex(nty)
|
||||||
}
|
}
|
||||||
|
|
||||||
//varindex
|
|
||||||
|
|
||||||
func NewServer() *Server {
|
func NewServer() *Server {
|
||||||
|
|
||||||
return &Server{
|
return &Server{
|
||||||
@ -109,7 +91,7 @@ func (s *Server) grpcLoop() {
|
|||||||
defer s.loopWg.Done()
|
defer s.loopWg.Done()
|
||||||
|
|
||||||
log.Println("Starting start IndexServer")
|
log.Println("Starting start IndexServer")
|
||||||
lis, err := net.Listen("tcp", ":"+strconv.Itoa(Params.Port))
|
lis, err := net.Listen("tcp", ":"+strconv.Itoa(indexservice.Params.Port))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("IndexServer grpc server fatal error=%v", err)
|
log.Fatalf("IndexServer grpc server fatal error=%v", err)
|
||||||
}
|
}
|
||||||
@ -121,20 +103,16 @@ func (s *Server) grpcLoop() {
|
|||||||
if err = s.grpcServer.Serve(lis); err != nil {
|
if err = s.grpcServer.Serve(lis); err != nil {
|
||||||
log.Fatalf("IndexServer grpc server fatal error=%v", err)
|
log.Fatalf("IndexServer grpc server fatal error=%v", err)
|
||||||
}
|
}
|
||||||
log.Println("IndexServer grpc server starting...")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) startIndexServer() error {
|
func (s *Server) startIndexServer() error {
|
||||||
s.loopWg.Add(1)
|
s.loopWg.Add(1)
|
||||||
go s.grpcLoop()
|
go s.grpcLoop()
|
||||||
|
log.Println("IndexServer grpc server start successfully")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func Init() {
|
|
||||||
Params.Init()
|
|
||||||
}
|
|
||||||
|
|
||||||
func CreateIndexServer(ctx context.Context) (*Server, error) {
|
func CreateIndexServer(ctx context.Context) (*Server, error) {
|
||||||
|
|
||||||
ctx1, cancel := context.WithCancel(ctx)
|
ctx1, cancel := context.WithCancel(ctx)
|
||||||
|
@ -10,8 +10,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/zilliztech/milvus-distributed/internal/allocator"
|
"github.com/zilliztech/milvus-distributed/internal/allocator"
|
||||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
|
||||||
"github.com/zilliztech/milvus-distributed/internal/indexservice"
|
|
||||||
"github.com/zilliztech/milvus-distributed/internal/kv"
|
"github.com/zilliztech/milvus-distributed/internal/kv"
|
||||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||||
miniokv "github.com/zilliztech/milvus-distributed/internal/kv/minio"
|
miniokv "github.com/zilliztech/milvus-distributed/internal/kv/minio"
|
||||||
@ -43,8 +41,8 @@ type IndexNode struct {
|
|||||||
startCallbacks []func()
|
startCallbacks []func()
|
||||||
closeCallbacks []func()
|
closeCallbacks []func()
|
||||||
|
|
||||||
indexNodeID int64
|
indexNodeID int64
|
||||||
serviceClient indexservice.Interface // method factory
|
//serviceClient indexservice.Interface // method factory
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *IndexNode) Init() {
|
func (i *IndexNode) Init() {
|
||||||
@ -72,58 +70,25 @@ func (i *IndexNode) GetStatisticsChannel() (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (i *IndexNode) BuildIndex(req *indexpb.BuildIndexCmd) (*commonpb.Status, error) {
|
func (i *IndexNode) BuildIndex(req *indexpb.BuildIndexCmd) (*commonpb.Status, error) {
|
||||||
//TODO: build index in index node
|
|
||||||
ctx := context.Background()
|
|
||||||
t := NewIndexAddTask()
|
|
||||||
t.req = req.Req
|
|
||||||
t.idAllocator = i.idAllocator
|
|
||||||
t.buildQueue = i.sched.IndexBuildQueue
|
|
||||||
t.table = i.metaTable
|
|
||||||
t.kv = i.kv
|
|
||||||
var cancel func()
|
|
||||||
t.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
fn := func() error {
|
log.Println("Create index with indexID=", req.IndexID)
|
||||||
select {
|
return &commonpb.Status{
|
||||||
case <-ctx.Done():
|
|
||||||
return errors.New("insert timeout")
|
|
||||||
default:
|
|
||||||
return i.sched.IndexAddQueue.Enqueue(t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ret := &commonpb.Status{
|
|
||||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||||
Reason: "",
|
Reason: "",
|
||||||
}
|
}, nil
|
||||||
|
|
||||||
err := fn()
|
|
||||||
if err != nil {
|
|
||||||
ret.ErrorCode = commonpb.ErrorCode_UNEXPECTED_ERROR
|
|
||||||
ret.Reason = err.Error()
|
|
||||||
return ret, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
err = t.WaitToFinish()
|
|
||||||
if err != nil {
|
|
||||||
ret.ErrorCode = commonpb.ErrorCode_UNEXPECTED_ERROR
|
|
||||||
ret.Reason = err.Error()
|
|
||||||
return ret, nil
|
|
||||||
}
|
|
||||||
return ret, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateIndexNode(ctx context.Context) (*IndexNode, error) {
|
func CreateIndexNode(ctx context.Context) (*IndexNode, error) {
|
||||||
return &IndexNode{}, nil
|
return &IndexNode{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewIndexNode(ctx context.Context, indexID int64) *IndexNode {
|
func NewIndexNode(ctx context.Context, nodeID int64) *IndexNode {
|
||||||
ctx1, cancel := context.WithCancel(ctx)
|
ctx1, cancel := context.WithCancel(ctx)
|
||||||
in := &IndexNode{
|
in := &IndexNode{
|
||||||
loopCtx: ctx1,
|
loopCtx: ctx1,
|
||||||
loopCancel: cancel,
|
loopCancel: cancel,
|
||||||
|
|
||||||
indexNodeID: indexID,
|
indexNodeID: nodeID,
|
||||||
}
|
}
|
||||||
|
|
||||||
return in
|
return in
|
||||||
|
@ -13,6 +13,14 @@ type ParamTable struct {
|
|||||||
Address string
|
Address string
|
||||||
Port int
|
Port int
|
||||||
|
|
||||||
|
NodeAddress string
|
||||||
|
NodeIP string
|
||||||
|
NodePort int
|
||||||
|
ServiceAddress string
|
||||||
|
ServicePort int
|
||||||
|
|
||||||
|
NodeID int64
|
||||||
|
|
||||||
MasterAddress string
|
MasterAddress string
|
||||||
|
|
||||||
EtcdAddress string
|
EtcdAddress string
|
||||||
@ -31,6 +39,11 @@ func (pt *ParamTable) Init() {
|
|||||||
pt.BaseTable.Init()
|
pt.BaseTable.Init()
|
||||||
pt.initAddress()
|
pt.initAddress()
|
||||||
pt.initPort()
|
pt.initPort()
|
||||||
|
pt.initNodeAddress()
|
||||||
|
pt.initNodeIP()
|
||||||
|
pt.initNodePort()
|
||||||
|
pt.initIndexServerAddr()
|
||||||
|
pt.initIndexServerPort()
|
||||||
pt.initEtcdAddress()
|
pt.initEtcdAddress()
|
||||||
pt.initMasterAddress()
|
pt.initMasterAddress()
|
||||||
pt.initMetaRootPath()
|
pt.initMetaRootPath()
|
||||||
@ -70,6 +83,72 @@ func (pt *ParamTable) initPort() {
|
|||||||
pt.Port = pt.ParseInt("indexBuilder.port")
|
pt.Port = pt.ParseInt("indexBuilder.port")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (pt *ParamTable) initNodeAddress() {
|
||||||
|
addr, err := pt.Load("indexNode.address")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hostName, _ := net.LookupHost(addr)
|
||||||
|
if len(hostName) <= 0 {
|
||||||
|
if ip := net.ParseIP(addr); ip == nil {
|
||||||
|
panic("invalid ip indexBuilder.address")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
port, err := pt.Load("indexNode.port")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
_, err = strconv.Atoi(port)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pt.NodeAddress = addr + ":" + port
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pt *ParamTable) initNodeIP() {
|
||||||
|
addr, err := pt.Load("indexNode.address")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
pt.NodeIP = addr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pt *ParamTable) initNodePort() {
|
||||||
|
pt.NodePort = pt.ParseInt("indexNode.port")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pt *ParamTable) initIndexServerAddr() {
|
||||||
|
addr, err := pt.Load("indexServer.address")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hostName, _ := net.LookupHost(addr)
|
||||||
|
if len(hostName) <= 0 {
|
||||||
|
if ip := net.ParseIP(addr); ip == nil {
|
||||||
|
panic("invalid ip indexServer.address")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
port, err := pt.Load("indexServer.port")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
_, err = strconv.Atoi(port)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pt.ServiceAddress = addr + ":" + port
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pt ParamTable) initIndexServerPort() {
|
||||||
|
pt.ServicePort = pt.ParseInt("indexServer.port")
|
||||||
|
}
|
||||||
|
|
||||||
func (pt *ParamTable) initEtcdAddress() {
|
func (pt *ParamTable) initEtcdAddress() {
|
||||||
addr, err := pt.Load("_EtcdAddress")
|
addr, err := pt.Load("_EtcdAddress")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -8,7 +8,9 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/zilliztech/milvus-distributed/internal/allocator"
|
"github.com/zilliztech/milvus-distributed/internal/allocator"
|
||||||
|
grpcindexnodeclient "github.com/zilliztech/milvus-distributed/internal/distributed/indexnode/client"
|
||||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||||
|
"github.com/zilliztech/milvus-distributed/internal/indexnode"
|
||||||
"github.com/zilliztech/milvus-distributed/internal/kv"
|
"github.com/zilliztech/milvus-distributed/internal/kv"
|
||||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||||
@ -22,7 +24,7 @@ import (
|
|||||||
type IndexService struct {
|
type IndexService struct {
|
||||||
// implement Service
|
// implement Service
|
||||||
|
|
||||||
//nodeClients [] .Interface
|
nodeClients []indexnode.Interface
|
||||||
// factory method
|
// factory method
|
||||||
loopCtx context.Context
|
loopCtx context.Context
|
||||||
loopCancel func()
|
loopCancel func()
|
||||||
@ -99,6 +101,11 @@ func (i *IndexService) RegisterNode(req *indexpb.RegisterNodeRequest) (*indexpb.
|
|||||||
|
|
||||||
i.nodeNum++
|
i.nodeNum++
|
||||||
|
|
||||||
|
nodeAddress := req.Address.Ip + ":" + strconv.FormatInt(req.Address.Port, 10)
|
||||||
|
log.Println(nodeAddress)
|
||||||
|
nodeClient := grpcindexnodeclient.NewClient(nodeAddress)
|
||||||
|
i.nodeClients = append(i.nodeClients, nodeClient)
|
||||||
|
|
||||||
return &indexpb.RegisterNodeResponse{
|
return &indexpb.RegisterNodeResponse{
|
||||||
InitParams: &internalpb2.InitParams{
|
InitParams: &internalpb2.InitParams{
|
||||||
NodeID: nodeID,
|
NodeID: nodeID,
|
||||||
@ -108,28 +115,19 @@ func (i *IndexService) RegisterNode(req *indexpb.RegisterNodeRequest) (*indexpb.
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (i *IndexService) BuildIndex(req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error) {
|
func (i *IndexService) BuildIndex(req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error) {
|
||||||
//TODO: Multiple indexes will build at same time.
|
|
||||||
//ctx := context.Background()
|
//TODO: Allocator ID
|
||||||
//indexNodeClient := indexnode.NewIndexNode(ctx, rand.Int63n(i.nodeNum))
|
indexID := int64(0)
|
||||||
//
|
nodeClient := i.nodeClients[0]
|
||||||
////TODO: Allocator index ID
|
request := &indexpb.BuildIndexCmd{
|
||||||
//indexID := int64(0)
|
IndexID: indexID,
|
||||||
//
|
Req: req,
|
||||||
//request := &indexpb.BuildIndexCmd{
|
}
|
||||||
// IndexID: indexID,
|
status, err := nodeClient.BuildIndex(request)
|
||||||
// Req: req,
|
return &indexpb.BuildIndexResponse{
|
||||||
//}
|
Status: status,
|
||||||
//
|
IndexID: indexID,
|
||||||
//status, err := indexNodeClient.BuildIndex(request)
|
}, err
|
||||||
//if err != nil {
|
|
||||||
// return nil, err
|
|
||||||
//}
|
|
||||||
//
|
|
||||||
//return &indexpb.BuildIndexResponse{
|
|
||||||
// Status: status,
|
|
||||||
// IndexID: indexID,
|
|
||||||
//}, nil
|
|
||||||
return nil, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *IndexService) GetIndexStates(req *indexpb.IndexStatesRequest) (*indexpb.IndexStatesResponse, error) {
|
func (i *IndexService) GetIndexStates(req *indexpb.IndexStatesRequest) (*indexpb.IndexStatesResponse, error) {
|
||||||
|
@ -13,8 +13,6 @@ type ParamTable struct {
|
|||||||
Address string
|
Address string
|
||||||
Port int
|
Port int
|
||||||
|
|
||||||
NodeID int64
|
|
||||||
|
|
||||||
MasterAddress string
|
MasterAddress string
|
||||||
|
|
||||||
EtcdAddress string
|
EtcdAddress string
|
||||||
|
Loading…
Reference in New Issue
Block a user