fix: Int64 overflow in JSON encoding (#37657)

issue: ##36621

- For simple types in a struct, add "string" to the JSON tag for
automatic string conversion during JSON encoding.
- For complex types in a struct, replace "int64" with "string."

Signed-off-by: jaime <yun.zhang@zilliz.com>
This commit is contained in:
jaime 2024-11-14 22:52:30 +08:00 committed by GitHub
parent c5485bb1b1
commit 1d06d4324b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
48 changed files with 2125 additions and 1554 deletions

View File

@ -251,21 +251,21 @@ func combineToSegmentIndexesMeta220(segmentIndexes SegmentIndexesMeta210, indexB
}
segmentIndexModel := &model.SegmentIndex{
SegmentID: segID,
CollectionID: record.GetCollectionID(),
PartitionID: record.GetPartitionID(),
NumRows: buildMeta.GetReq().GetNumRows(),
IndexID: indexID,
BuildID: record.GetBuildID(),
NodeID: buildMeta.GetNodeID(),
IndexVersion: buildMeta.GetIndexVersion(),
IndexState: buildMeta.GetState(),
FailReason: buildMeta.GetFailReason(),
IsDeleted: buildMeta.GetMarkDeleted(),
CreateTime: record.GetCreateTime(),
IndexFileKeys: fileKeys,
IndexSize: buildMeta.GetSerializeSize(),
WriteHandoff: buildMeta.GetState() == commonpb.IndexState_Finished,
SegmentID: segID,
CollectionID: record.GetCollectionID(),
PartitionID: record.GetPartitionID(),
NumRows: buildMeta.GetReq().GetNumRows(),
IndexID: indexID,
BuildID: record.GetBuildID(),
NodeID: buildMeta.GetNodeID(),
IndexVersion: buildMeta.GetIndexVersion(),
IndexState: buildMeta.GetState(),
FailReason: buildMeta.GetFailReason(),
IsDeleted: buildMeta.GetMarkDeleted(),
CreatedUTCTime: record.GetCreateTime(),
IndexFileKeys: fileKeys,
IndexSize: buildMeta.GetSerializeSize(),
WriteHandoff: buildMeta.GetState() == commonpb.IndexState_Finished,
}
segmentIndexModels.AddRecord(segID, indexID, segmentIndexModel)
}

2
go.mod
View File

@ -23,7 +23,7 @@ require (
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/klauspost/compress v1.17.9
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d
github.com/milvus-io/milvus-proto/go-api/v2 v2.3.4-0.20241108105827-266fb751b620
github.com/milvus-io/milvus-proto/go-api/v2 v2.3.4-0.20241111062829-6de3d96f664f
github.com/minio/minio-go/v7 v7.0.73
github.com/pingcap/log v1.1.1-0.20221015072633-39906604fb81
github.com/prometheus/client_golang v1.14.0

4
go.sum
View File

@ -628,8 +628,8 @@ github.com/milvus-io/cgosymbolizer v0.0.0-20240722103217-b7dee0e50119 h1:9VXijWu
github.com/milvus-io/cgosymbolizer v0.0.0-20240722103217-b7dee0e50119/go.mod h1:DvXTE/K/RtHehxU8/GtDs4vFtfw64jJ3PaCnFri8CRg=
github.com/milvus-io/gorocksdb v0.0.0-20220624081344-8c5f4212846b h1:TfeY0NxYxZzUfIfYe5qYDBzt4ZYRqzUjTR6CvUzjat8=
github.com/milvus-io/gorocksdb v0.0.0-20220624081344-8c5f4212846b/go.mod h1:iwW+9cWfIzzDseEBCCeDSN5SD16Tidvy8cwQ7ZY8Qj4=
github.com/milvus-io/milvus-proto/go-api/v2 v2.3.4-0.20241108105827-266fb751b620 h1:0IWUDtDloift7cQHalhdjuVkL/3qSeiXFqR7MofZBkg=
github.com/milvus-io/milvus-proto/go-api/v2 v2.3.4-0.20241108105827-266fb751b620/go.mod h1:/6UT4zZl6awVeXLeE7UGDWZvXj3IWkRsh3mqsn0DiAs=
github.com/milvus-io/milvus-proto/go-api/v2 v2.3.4-0.20241111062829-6de3d96f664f h1:yLxT8NH0ixUOJMqJuk0xvGf0cKsr+N2xibyTat256PI=
github.com/milvus-io/milvus-proto/go-api/v2 v2.3.4-0.20241111062829-6de3d96f664f/go.mod h1:/6UT4zZl6awVeXLeE7UGDWZvXj3IWkRsh3mqsn0DiAs=
github.com/milvus-io/pulsar-client-go v0.12.1 h1:O2JZp1tsYiO7C0MQ4hrUY/aJXnn2Gry6hpm7UodghmE=
github.com/milvus-io/pulsar-client-go v0.12.1/go.mod h1:dkutuH4oS2pXiGm+Ti7fQZ4MRjrMPZ8IJeEGAWMeckk=
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs=

View File

@ -19,10 +19,12 @@ package datacoord
import (
"context"
"encoding/json"
"strconv"
"sync"
"time"
"github.com/hashicorp/golang-lru/v2/expirable"
"github.com/samber/lo"
"go.uber.org/zap"
"google.golang.org/protobuf/proto"
@ -31,20 +33,25 @@ import (
"github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
"github.com/milvus-io/milvus/pkg/util/timerecord"
"github.com/milvus-io/milvus/pkg/util/typeutil"
)
func newCompactionTaskStats(task *datapb.CompactionTask) *metricsinfo.CompactionTask {
return &metricsinfo.CompactionTask{
PlanID: task.PlanID,
CollectionID: task.CollectionID,
Type: task.Type.String(),
State: task.State.String(),
FailReason: task.FailReason,
StartTime: task.StartTime,
EndTime: task.EndTime,
TotalRows: task.TotalRows,
InputSegments: task.InputSegments,
ResultSegments: task.ResultSegments,
PlanID: task.PlanID,
CollectionID: task.CollectionID,
Type: task.Type.String(),
State: task.State.String(),
FailReason: task.FailReason,
StartTime: typeutil.TimestampToString(uint64(task.StartTime)),
EndTime: typeutil.TimestampToString(uint64(task.EndTime)),
TotalRows: task.TotalRows,
InputSegments: lo.Map(task.InputSegments, func(t int64, i int) string {
return strconv.FormatInt(t, 10)
}),
ResultSegments: lo.Map(task.ResultSegments, func(t int64, i int) string {
return strconv.FormatInt(t, 10)
}),
}
}

View File

@ -308,59 +308,59 @@ func Test_compactionTrigger_force(t *testing.T) {
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
1: {
indexID: {
SegmentID: 1,
CollectionID: 2,
PartitionID: 1,
NumRows: 100,
IndexID: indexID,
BuildID: 1,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: 1,
CollectionID: 2,
PartitionID: 1,
NumRows: 100,
IndexID: indexID,
BuildID: 1,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
},
2: {
indexID: {
SegmentID: 2,
CollectionID: 2,
PartitionID: 1,
NumRows: 100,
IndexID: indexID,
BuildID: 2,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: 2,
CollectionID: 2,
PartitionID: 1,
NumRows: 100,
IndexID: indexID,
BuildID: 2,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
},
3: {
indexID: {
SegmentID: 3,
CollectionID: 1111,
PartitionID: 1,
NumRows: 100,
IndexID: indexID,
BuildID: 3,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: 3,
CollectionID: 1111,
PartitionID: 1,
NumRows: 100,
IndexID: indexID,
BuildID: 3,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
},
},

View File

@ -487,40 +487,40 @@ func createMetaForRecycleUnusedSegIndexes(catalog metastore.DataCoordCatalog) *m
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
segID: {
indexID: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID,
NodeID: 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 10,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID,
NodeID: 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 10,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 0,
WriteHandoff: false,
},
},
segID + 1: {
indexID: {
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 10,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 10,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 0,
WriteHandoff: false,
},
},
},
@ -532,37 +532,37 @@ func createMetaForRecycleUnusedSegIndexes(catalog metastore.DataCoordCatalog) *m
}
meta.indexMeta.segmentBuildInfo.Add(&model.SegmentIndex{
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID,
NodeID: 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 10,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 0,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID,
NodeID: 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 10,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 0,
})
meta.indexMeta.segmentBuildInfo.Add(&model.SegmentIndex{
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 10,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 0,
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 10,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 0,
})
for id, segment := range segments {
@ -652,40 +652,40 @@ func createMetaTableForRecycleUnusedIndexFiles(catalog *datacoord.Catalog) *meta
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
segID: {
indexID: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID,
NodeID: 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 10,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID,
NodeID: 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 10,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 0,
WriteHandoff: false,
},
},
segID + 1: {
indexID: {
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreateTime: 10,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 10,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
},
},
@ -710,38 +710,38 @@ func createMetaTableForRecycleUnusedIndexFiles(catalog *datacoord.Catalog) *meta
},
}
meta.indexMeta.segmentBuildInfo.Add(&model.SegmentIndex{
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID,
NodeID: 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 10,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID,
NodeID: 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 10,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 0,
WriteHandoff: false,
})
meta.indexMeta.segmentBuildInfo.Add(&model.SegmentIndex{
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreateTime: 10,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 10,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
})
for id, segment := range segments {
meta.segments.SetSegment(id, segment)
@ -1052,40 +1052,40 @@ func TestGarbageCollector_clearETCD(t *testing.T) {
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
segID: {
indexID: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 5000,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 1024,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 5000,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 0,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 1024,
WriteHandoff: false,
},
},
segID + 1: {
indexID: {
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 5000,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: []string{"file3", "file4"},
IndexSize: 1024,
WriteHandoff: false,
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 5000,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 0,
IndexFileKeys: []string{"file3", "file4"},
IndexSize: 1024,
WriteHandoff: false,
},
},
},
@ -1138,39 +1138,39 @@ func TestGarbageCollector_clearETCD(t *testing.T) {
}
m.indexMeta.segmentBuildInfo.Add(&model.SegmentIndex{
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 5000,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 1024,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 5000,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 0,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 1024,
WriteHandoff: false,
})
m.indexMeta.segmentBuildInfo.Add(&model.SegmentIndex{
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 5000,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: []string{"file3", "file4"},
IndexSize: 1024,
WriteHandoff: false,
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 5000,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 0,
IndexFileKeys: []string{"file3", "file4"},
IndexSize: 1024,
WriteHandoff: false,
})
for id, segment := range segments {

View File

@ -43,6 +43,7 @@ import (
"github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/pkg/metrics"
"github.com/milvus-io/milvus/pkg/util/indexparams"
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
"github.com/milvus-io/milvus/pkg/util/timerecord"
"github.com/milvus-io/milvus/pkg/util/typeutil"
)
@ -63,29 +64,17 @@ type indexMeta struct {
segmentIndexes map[UniqueID]map[UniqueID]*model.SegmentIndex
}
type indexTaskStats struct {
IndexID UniqueID `json:"index_id,omitempty"`
CollectionID UniqueID `json:"collection_id,omitempty"`
SegmentID UniqueID `json:"segment_id,omitempty"`
BuildID UniqueID `json:"build_id,omitempty"`
IndexState string `json:"index_state,omitempty"`
FailReason string `json:"fail_reason,omitempty"`
IndexSize uint64 `json:"index_size,omitempty"`
IndexVersion int64 `json:"index_version,omitempty"`
CreateTime uint64 `json:"create_time,omitempty"`
}
func newIndexTaskStats(s *model.SegmentIndex) *indexTaskStats {
return &indexTaskStats{
IndexID: s.IndexID,
CollectionID: s.CollectionID,
SegmentID: s.SegmentID,
BuildID: s.BuildID,
IndexState: s.IndexState.String(),
FailReason: s.FailReason,
IndexSize: s.IndexSize,
IndexVersion: s.IndexVersion,
CreateTime: s.CreateTime,
func newIndexTaskStats(s *model.SegmentIndex) *metricsinfo.IndexTaskStats {
return &metricsinfo.IndexTaskStats{
IndexID: s.IndexID,
CollectionID: s.CollectionID,
SegmentID: s.SegmentID,
BuildID: s.BuildID,
IndexState: s.IndexState.String(),
FailReason: s.FailReason,
IndexSize: s.IndexSize,
IndexVersion: s.IndexVersion,
CreatedUTCTime: typeutil.TimestampToString(s.CreatedUTCTime),
}
}
@ -94,7 +83,7 @@ type segmentBuildInfo struct {
// buildID -> segmentIndex
buildID2SegmentIndex map[UniqueID]*model.SegmentIndex
// taskStats records the task stats of the segment
taskStats *expirable.LRU[UniqueID, *indexTaskStats]
taskStats *expirable.LRU[UniqueID, *metricsinfo.IndexTaskStats]
}
func newSegmentIndexBuildInfo() *segmentBuildInfo {
@ -102,7 +91,7 @@ func newSegmentIndexBuildInfo() *segmentBuildInfo {
// build ID -> segment index
buildID2SegmentIndex: make(map[UniqueID]*model.SegmentIndex),
// build ID -> task stats
taskStats: expirable.NewLRU[UniqueID, *indexTaskStats](64, nil, time.Minute*30),
taskStats: expirable.NewLRU[UniqueID, *metricsinfo.IndexTaskStats](64, nil, time.Minute*30),
}
}
@ -124,7 +113,7 @@ func (m *segmentBuildInfo) List() map[UniqueID]*model.SegmentIndex {
return m.buildID2SegmentIndex
}
func (m *segmentBuildInfo) GetTaskStats() []*indexTaskStats {
func (m *segmentBuildInfo) GetTaskStats() []*metricsinfo.IndexTaskStats {
return m.taskStats.Values()
}

View File

@ -37,6 +37,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/indexpb"
"github.com/milvus-io/milvus/internal/proto/workerpb"
"github.com/milvus-io/milvus/pkg/common"
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
)
func TestReloadFromKV(t *testing.T) {
@ -516,20 +517,20 @@ func TestMeta_AddSegmentIndex(t *testing.T) {
}
segmentIndex := &model.SegmentIndex{
SegmentID: 1,
CollectionID: 2,
PartitionID: 3,
NumRows: 10240,
IndexID: 4,
BuildID: 5,
NodeID: 6,
IndexVersion: 0,
IndexState: 0,
FailReason: "",
IsDeleted: false,
CreateTime: 12,
IndexFileKeys: nil,
IndexSize: 0,
SegmentID: 1,
CollectionID: 2,
PartitionID: 3,
NumRows: 10240,
IndexID: 4,
BuildID: 5,
NodeID: 6,
IndexVersion: 0,
IndexState: 0,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 12,
IndexFileKeys: nil,
IndexSize: 0,
}
t.Run("save meta fail", func(t *testing.T) {
@ -663,20 +664,20 @@ func TestMeta_GetSegmentIndexState(t *testing.T) {
t.Run("unissued", func(t *testing.T) {
m.updateSegmentIndex(&model.SegmentIndex{
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10250,
IndexID: indexID,
BuildID: buildID,
NodeID: 1,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 12,
IndexFileKeys: nil,
IndexSize: 0,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10250,
IndexID: indexID,
BuildID: buildID,
NodeID: 1,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 12,
IndexFileKeys: nil,
IndexSize: 0,
})
state := m.GetSegmentIndexState(collID, segID, indexID)
@ -685,20 +686,20 @@ func TestMeta_GetSegmentIndexState(t *testing.T) {
t.Run("finish", func(t *testing.T) {
m.updateSegmentIndex(&model.SegmentIndex{
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10250,
IndexID: indexID,
BuildID: buildID,
NodeID: 1,
IndexVersion: 0,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 12,
IndexFileKeys: nil,
IndexSize: 0,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10250,
IndexID: indexID,
BuildID: buildID,
NodeID: 1,
IndexVersion: 0,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 12,
IndexFileKeys: nil,
IndexSize: 0,
})
state := m.GetSegmentIndexState(collID, segID, indexID)
@ -733,20 +734,20 @@ func TestMeta_GetIndexedSegment(t *testing.T) {
m.segmentIndexes = map[UniqueID]map[UniqueID]*model.SegmentIndex{
segID: {
indexID: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 10,
IndexFileKeys: nil,
IndexSize: 0,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 10,
IndexFileKeys: nil,
IndexSize: 0,
},
},
}
@ -769,20 +770,20 @@ func TestMeta_GetIndexedSegment(t *testing.T) {
}
m.segmentBuildInfo.Add(&model.SegmentIndex{
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 10,
IndexFileKeys: nil,
IndexSize: 0,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 10,
IndexFileKeys: nil,
IndexSize: 0,
})
t.Run("success", func(t *testing.T) {
@ -1091,20 +1092,20 @@ func TestMeta_GetIndexParams(t *testing.T) {
func TestMeta_GetIndexJob(t *testing.T) {
m := newSegmentIndexMeta(nil)
m.segmentBuildInfo.Add(&model.SegmentIndex{
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
})
t.Run("exist", func(t *testing.T) {
@ -1179,20 +1180,20 @@ func updateSegmentIndexMeta(t *testing.T) *indexMeta {
indexBuildInfo := newSegmentIndexBuildInfo()
indexBuildInfo.Add(&model.SegmentIndex{
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
})
return &indexMeta{
@ -1200,20 +1201,20 @@ func updateSegmentIndexMeta(t *testing.T) *indexMeta {
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
segID: {
indexID: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
},
},
},
@ -1520,26 +1521,26 @@ func TestIndexMeta_GetUnindexedSegments(t *testing.T) {
func TestBuildIndexTaskStatsJSON(t *testing.T) {
im := &indexMeta{segmentBuildInfo: newSegmentIndexBuildInfo()}
si1 := &model.SegmentIndex{
BuildID: 1,
CollectionID: 100,
SegmentID: 1000,
IndexID: 10,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IndexSize: 1024,
IndexVersion: 1,
CreateTime: uint64(time.Now().Unix()),
BuildID: 1,
CollectionID: 100,
SegmentID: 1000,
IndexID: 10,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IndexSize: 1024,
IndexVersion: 1,
CreatedUTCTime: uint64(time.Now().Unix()),
}
si2 := &model.SegmentIndex{
BuildID: 2,
CollectionID: 101,
SegmentID: 1001,
IndexID: 11,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IndexSize: 2048,
IndexVersion: 1,
CreateTime: uint64(time.Now().Unix()),
BuildID: 2,
CollectionID: 101,
SegmentID: 1001,
IndexID: 11,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IndexSize: 2048,
IndexVersion: 1,
CreatedUTCTime: uint64(time.Now().Unix()),
}
actualJSON := im.TaskStatsJSON()
@ -1553,7 +1554,7 @@ func TestBuildIndexTaskStatsJSON(t *testing.T) {
assert.True(t, ok)
assert.EqualValues(t, si1, ret1)
expectedTasks := []*indexTaskStats{
expectedTasks := []*metricsinfo.IndexTaskStats{
newIndexTaskStats(si1),
newIndexTaskStats(si2),
}

View File

@ -64,14 +64,14 @@ func (s *Server) createIndexForSegment(segment *SegmentInfo, indexID UniqueID) e
return err
}
segIndex := &model.SegmentIndex{
SegmentID: segment.ID,
CollectionID: segment.CollectionID,
PartitionID: segment.PartitionID,
NumRows: segment.NumOfRows,
IndexID: indexID,
BuildID: buildID,
CreateTime: uint64(segment.ID),
WriteHandoff: false,
SegmentID: segment.ID,
CollectionID: segment.CollectionID,
PartitionID: segment.PartitionID,
NumRows: segment.NumOfRows,
IndexID: indexID,
BuildID: buildID,
CreatedUTCTime: uint64(time.Now().Unix()),
WriteHandoff: false,
}
if err = s.meta.indexMeta.AddSegmentIndex(segIndex); err != nil {
return err

View File

@ -400,152 +400,152 @@ func TestServer_AlterIndex(t *testing.T) {
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
segID: {
indexID: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
indexID + 1: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 1,
BuildID: buildID + 1,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 1,
BuildID: buildID + 1,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
indexID + 3: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 3,
BuildID: buildID + 3,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreateTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 3,
BuildID: buildID + 3,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
indexID + 4: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 4,
BuildID: buildID + 4,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Failed,
FailReason: "mock failed",
IsDeleted: false,
CreateTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 4,
BuildID: buildID + 4,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Failed,
FailReason: "mock failed",
IsDeleted: false,
CreatedUTCTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
indexID + 5: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 5,
BuildID: buildID + 5,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 5,
BuildID: buildID + 5,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
},
segID - 1: {
indexID: {
SegmentID: segID - 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
CreateTime: createTS,
SegmentID: segID - 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
CreatedUTCTime: createTS,
},
indexID + 1: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 1,
BuildID: buildID + 1,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
CreateTime: createTS,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 1,
BuildID: buildID + 1,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
CreatedUTCTime: createTS,
},
indexID + 3: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 3,
BuildID: buildID + 3,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
CreateTime: createTS,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 3,
BuildID: buildID + 3,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
CreatedUTCTime: createTS,
},
indexID + 4: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 4,
BuildID: buildID + 4,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Failed,
FailReason: "mock failed",
CreateTime: createTS,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 4,
BuildID: buildID + 4,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Failed,
FailReason: "mock failed",
CreatedUTCTime: createTS,
},
indexID + 5: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 5,
BuildID: buildID + 5,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
CreateTime: createTS,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 5,
BuildID: buildID + 5,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
CreatedUTCTime: createTS,
},
},
},
@ -808,21 +808,21 @@ func TestServer_GetIndexState(t *testing.T) {
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
segID: {
indexID: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 3000,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_IndexStateNone,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 3000,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_IndexStateNone,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
},
},
@ -933,21 +933,21 @@ func TestServer_GetSegmentIndexState(t *testing.T) {
},
}
s.meta.indexMeta.updateSegmentIndex(&model.SegmentIndex{
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10250,
IndexID: indexID,
BuildID: 10,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreateTime: createTS,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 1025,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10250,
IndexID: indexID,
BuildID: 10,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: createTS,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 1025,
WriteHandoff: false,
})
s.meta.segments.SetSegment(segID, &SegmentInfo{
SegmentInfo: &datapb.SegmentInfo{
@ -970,21 +970,21 @@ func TestServer_GetSegmentIndexState(t *testing.T) {
t.Run("finish", func(t *testing.T) {
s.meta.indexMeta.updateSegmentIndex(&model.SegmentIndex{
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10250,
IndexID: indexID,
BuildID: 10,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: createTS,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 1025,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10250,
IndexID: indexID,
BuildID: 10,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: createTS,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 1025,
WriteHandoff: false,
})
resp, err := s.GetSegmentIndexState(ctx, req)
assert.NoError(t, err)
@ -1092,21 +1092,21 @@ func TestServer_GetIndexBuildProgress(t *testing.T) {
t.Run("finish", func(t *testing.T) {
s.meta.indexMeta.updateSegmentIndex(&model.SegmentIndex{
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10250,
IndexID: indexID,
BuildID: 10,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: createTS,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10250,
IndexID: indexID,
BuildID: 10,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: createTS,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 0,
WriteHandoff: false,
})
s.meta.segments = NewSegmentsInfo()
s.meta.segments.SetSegment(segID, &SegmentInfo{
@ -1354,152 +1354,152 @@ func TestServer_DescribeIndex(t *testing.T) {
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
segID: {
indexID: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
indexID + 1: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 1,
BuildID: buildID + 1,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 1,
BuildID: buildID + 1,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
indexID + 3: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 3,
BuildID: buildID + 3,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreateTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 3,
BuildID: buildID + 3,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
indexID + 4: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 4,
BuildID: buildID + 4,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Failed,
FailReason: "mock failed",
IsDeleted: false,
CreateTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 4,
BuildID: buildID + 4,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Failed,
FailReason: "mock failed",
IsDeleted: false,
CreatedUTCTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
indexID + 5: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 5,
BuildID: buildID + 5,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 5,
BuildID: buildID + 5,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
},
segID - 1: {
indexID: {
SegmentID: segID - 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
CreateTime: createTS,
SegmentID: segID - 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
CreatedUTCTime: createTS,
},
indexID + 1: {
SegmentID: segID - 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 1,
BuildID: buildID + 1,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
CreateTime: createTS,
SegmentID: segID - 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 1,
BuildID: buildID + 1,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
CreatedUTCTime: createTS,
},
indexID + 3: {
SegmentID: segID - 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 3,
BuildID: buildID + 3,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
CreateTime: createTS,
SegmentID: segID - 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 3,
BuildID: buildID + 3,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
CreatedUTCTime: createTS,
},
indexID + 4: {
SegmentID: segID - 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 4,
BuildID: buildID + 4,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Failed,
FailReason: "mock failed",
CreateTime: createTS,
SegmentID: segID - 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 4,
BuildID: buildID + 4,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Failed,
FailReason: "mock failed",
CreatedUTCTime: createTS,
},
indexID + 5: {
SegmentID: segID - 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 5,
BuildID: buildID + 5,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
CreateTime: createTS,
SegmentID: segID - 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 5,
BuildID: buildID + 5,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
CreatedUTCTime: createTS,
},
},
},
@ -1859,89 +1859,89 @@ func TestServer_GetIndexStatistics(t *testing.T) {
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
segID: {
indexID: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
indexID + 1: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 1,
BuildID: buildID + 1,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 1,
BuildID: buildID + 1,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
indexID + 3: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 3,
BuildID: buildID + 3,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreateTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 3,
BuildID: buildID + 3,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
indexID + 4: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 4,
BuildID: buildID + 4,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Failed,
FailReason: "mock failed",
IsDeleted: false,
CreateTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 4,
BuildID: buildID + 4,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Failed,
FailReason: "mock failed",
IsDeleted: false,
CreatedUTCTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
indexID + 5: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 5,
BuildID: buildID + 5,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID + 5,
BuildID: buildID + 5,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
},
},
@ -2251,21 +2251,21 @@ func TestServer_GetIndexInfos(t *testing.T) {
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
segID: {
indexID: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 10000,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: createTS,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
},
},

View File

@ -37,6 +37,7 @@ import (
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/tsoutil"
"github.com/milvus-io/milvus/pkg/util/typeutil"
"github.com/milvus-io/milvus/pkg/util/uniquegenerator"
)
@ -91,7 +92,7 @@ func (s *Server) getChannelsJSON(ctx context.Context, req *milvuspb.GetMetricsRe
channel2Checkpoints := s.meta.GetChannelCheckpoints()
for _, channel := range channels {
if cp, ok := channel2Checkpoints[channel.Name]; ok {
channel.CheckpointTS = typeutil.TimestampToString(cp.GetTimestamp())
channel.CheckpointTS = tsoutil.PhysicalTimeFormat(cp.GetTimestamp())
} else {
log.Warn("channel not found in meta cache", zap.String("channel", channel.Name))
}
@ -139,15 +140,11 @@ func (s *Server) getDistJSON(ctx context.Context, req *milvuspb.GetMetricsReques
dmChannel := metrics.NewDMChannelFrom(chInfo.GetVchan())
dmChannel.NodeID = nodeID
dmChannel.WatchState = chInfo.State.String()
dmChannel.StartWatchTS = chInfo.GetStartTs()
dmChannel.StartWatchTS = typeutil.TimestampToString(uint64(chInfo.GetStartTs()))
channels = append(channels, dmChannel)
}
}
if len(segments) == 0 && len(channels) == 0 {
return ""
}
dist := &metricsinfo.DataCoordDist{
Segments: segments,
DMChannels: channels,

View File

@ -34,6 +34,7 @@ import (
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/tsoutil"
"github.com/milvus-io/milvus/pkg/util/typeutil"
)
@ -216,8 +217,8 @@ func TestGetSyncTaskMetrics(t *testing.T) {
SegmentID: 1,
BatchRows: 100,
SegmentLevel: "L0",
TSFrom: 1000,
TSTo: 2000,
TSFrom: "t1",
TSTo: "t2",
DeltaRowCount: 10,
FlushSize: 1024,
RunningTime: "2h",
@ -502,7 +503,7 @@ func TestGetChannelsJSON(t *testing.T) {
Name: "channel1",
CollectionID: 100,
NodeID: 1,
CheckpointTS: typeutil.TimestampToString(1000),
CheckpointTS: tsoutil.PhysicalTimeFormat(1000),
},
}
channelsBytes, err = json.Marshal(channels)
@ -678,7 +679,7 @@ func TestGetDistJSON(t *testing.T) {
cm.EXPECT().GetChannelWatchInfos().Return(map[int64]map[string]*datapb.ChannelWatchInfo{})
svr.channelManager = cm
expectedJSON := ""
expectedJSON := "{}"
actualJSON := svr.getDistJSON(ctx, req)
assert.Equal(t, expectedJSON, actualJSON)
})

View File

@ -1607,20 +1607,20 @@ func TestGetRecoveryInfo(t *testing.T) {
})
assert.NoError(t, err)
svr.meta.indexMeta.updateSegmentIndex(&model.SegmentIndex{
SegmentID: seg4.ID,
CollectionID: 0,
PartitionID: 0,
NumRows: 100,
IndexID: 0,
BuildID: 0,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
SegmentID: seg4.ID,
CollectionID: 0,
PartitionID: 0,
NumRows: 100,
IndexID: 0,
BuildID: 0,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
})
req := &datapb.GetRecoveryInfoRequest{

View File

@ -1258,20 +1258,20 @@ func TestGetRecoveryInfoV2(t *testing.T) {
})
assert.NoError(t, err)
svr.meta.indexMeta.updateSegmentIndex(&model.SegmentIndex{
SegmentID: seg4.ID,
CollectionID: 0,
PartitionID: 0,
NumRows: 100,
IndexID: 0,
BuildID: 0,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
SegmentID: seg4.ID,
CollectionID: 0,
PartitionID: 0,
NumRows: 100,
IndexID: 0,
BuildID: 0,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
})
ch := &channelMeta{Name: "vchan1", CollectionID: 0}

View File

@ -58,190 +58,190 @@ var (
func createIndexMeta(catalog metastore.DataCoordCatalog) *indexMeta {
indexBuildInfo := newSegmentIndexBuildInfo()
indexBuildInfo.Add(&model.SegmentIndex{
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 1,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 0,
IndexFileKeys: nil,
IndexSize: 1,
})
indexBuildInfo.Add(&model.SegmentIndex{
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
})
indexBuildInfo.Add(&model.SegmentIndex{
SegmentID: segID + 2,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 2,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: true,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
SegmentID: segID + 2,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 2,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: true,
CreatedUTCTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
})
indexBuildInfo.Add(&model.SegmentIndex{
SegmentID: segID + 3,
CollectionID: collID,
PartitionID: partID,
NumRows: 500,
IndexID: indexID,
BuildID: buildID + 3,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
SegmentID: segID + 3,
CollectionID: collID,
PartitionID: partID,
NumRows: 500,
IndexID: indexID,
BuildID: buildID + 3,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
})
indexBuildInfo.Add(&model.SegmentIndex{
SegmentID: segID + 4,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 4,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
SegmentID: segID + 4,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 4,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
})
indexBuildInfo.Add(&model.SegmentIndex{
SegmentID: segID + 5,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 5,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
SegmentID: segID + 5,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 5,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
})
indexBuildInfo.Add(&model.SegmentIndex{
SegmentID: segID + 6,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 6,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
SegmentID: segID + 6,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 6,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
})
indexBuildInfo.Add(&model.SegmentIndex{
SegmentID: segID + 7,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 7,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Failed,
FailReason: "error",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
SegmentID: segID + 7,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 7,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Failed,
FailReason: "error",
IsDeleted: false,
CreatedUTCTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
})
indexBuildInfo.Add(&model.SegmentIndex{
SegmentID: segID + 8,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 8,
NodeID: nodeID + 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
SegmentID: segID + 8,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 8,
NodeID: nodeID + 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
})
indexBuildInfo.Add(&model.SegmentIndex{
SegmentID: segID + 9,
CollectionID: collID,
PartitionID: partID,
NumRows: 500,
IndexID: indexID,
BuildID: buildID + 9,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
SegmentID: segID + 9,
CollectionID: collID,
PartitionID: partID,
NumRows: 500,
IndexID: indexID,
BuildID: buildID + 9,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
})
indexBuildInfo.Add(&model.SegmentIndex{
SegmentID: segID + 10,
CollectionID: collID,
PartitionID: partID,
NumRows: 500,
IndexID: indexID,
BuildID: buildID + 10,
NodeID: nodeID,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
SegmentID: segID + 10,
CollectionID: collID,
PartitionID: partID,
NumRows: 500,
IndexID: indexID,
BuildID: buildID + 10,
NodeID: nodeID,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
})
return &indexMeta{
@ -278,200 +278,200 @@ func createIndexMeta(catalog metastore.DataCoordCatalog) *indexMeta {
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
segID: {
indexID: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 1,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 0,
IndexFileKeys: nil,
IndexSize: 1,
},
},
segID + 1: {
indexID: {
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
},
},
segID + 2: {
indexID: {
SegmentID: segID + 2,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 2,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: true,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
SegmentID: segID + 2,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 2,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: true,
CreatedUTCTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
},
},
segID + 3: {
indexID: {
SegmentID: segID + 3,
CollectionID: collID,
PartitionID: partID,
NumRows: 500,
IndexID: indexID,
BuildID: buildID + 3,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
SegmentID: segID + 3,
CollectionID: collID,
PartitionID: partID,
NumRows: 500,
IndexID: indexID,
BuildID: buildID + 3,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
},
},
segID + 4: {
indexID: {
SegmentID: segID + 4,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 4,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
SegmentID: segID + 4,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 4,
NodeID: nodeID,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
},
},
segID + 5: {
indexID: {
SegmentID: segID + 5,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 5,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
SegmentID: segID + 5,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 5,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
},
},
segID + 6: {
indexID: {
SegmentID: segID + 6,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 6,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
SegmentID: segID + 6,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 6,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
},
},
segID + 7: {
indexID: {
SegmentID: segID + 7,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 7,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Failed,
FailReason: "error",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
SegmentID: segID + 7,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 7,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Failed,
FailReason: "error",
IsDeleted: false,
CreatedUTCTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
},
},
segID + 8: {
indexID: {
SegmentID: segID + 8,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 8,
NodeID: nodeID + 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
SegmentID: segID + 8,
CollectionID: collID,
PartitionID: partID,
NumRows: 1026,
IndexID: indexID,
BuildID: buildID + 8,
NodeID: nodeID + 1,
IndexVersion: 1,
IndexState: commonpb.IndexState_InProgress,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
},
},
segID + 9: {
indexID: {
SegmentID: segID + 9,
CollectionID: collID,
PartitionID: partID,
NumRows: 500,
IndexID: indexID,
BuildID: buildID + 9,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
SegmentID: segID + 9,
CollectionID: collID,
PartitionID: partID,
NumRows: 500,
IndexID: indexID,
BuildID: buildID + 9,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
},
},
segID + 10: {
indexID: {
SegmentID: segID + 10,
CollectionID: collID,
PartitionID: partID,
NumRows: 500,
IndexID: indexID,
BuildID: buildID + 10,
NodeID: nodeID,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
SegmentID: segID + 10,
CollectionID: collID,
PartitionID: partID,
NumRows: 500,
IndexID: indexID,
BuildID: buildID + 10,
NodeID: nodeID,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 1111,
IndexFileKeys: nil,
IndexSize: 1,
},
},
},
@ -1541,20 +1541,20 @@ func (s *taskSchedulerSuite) Test_indexTaskWithMvOptionalScalarField() {
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
segID: {
indexID: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: minNumberOfRowsToBuild,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: minNumberOfRowsToBuild,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
},
},
},
@ -1583,20 +1583,20 @@ func (s *taskSchedulerSuite) Test_indexTaskWithMvOptionalScalarField() {
}
mt.indexMeta.segmentBuildInfo.Add(&model.SegmentIndex{
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: minNumberOfRowsToBuild,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: minNumberOfRowsToBuild,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 0,
IndexState: commonpb.IndexState_Unissued,
FailReason: "",
IsDeleted: false,
CreatedUTCTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
})
cm := mocks.NewChunkManager(s.T())
cm.EXPECT().RootPath().Return("ut-index")

View File

@ -27,9 +27,6 @@ func (h *Handlers) RegisterRoutesTo(router gin.IRouter) {
router.GET("/health", wrapHandler(h.handleGetHealth))
router.POST("/dummy", wrapHandler(h.handleDummy))
router.GET("/databases", wrapHandler(h.handleListDatabases))
router.GET("/database", wrapHandler(h.handleDescribeDatabases))
router.POST("/collection", wrapHandler(h.handleCreateCollection))
router.DELETE("/collection", wrapHandler(h.handleDropCollection))
router.GET("/collection/existence", wrapHandler(h.handleHasCollection))
@ -99,24 +96,6 @@ func (h *Handlers) handleDummy(c *gin.Context) (interface{}, error) {
return h.proxy.Dummy(c, &req)
}
func (h *Handlers) handleListDatabases(c *gin.Context) (interface{}, error) {
req := milvuspb.ListDatabasesRequest{}
err := shouldBind(c, &req)
if err != nil {
return nil, fmt.Errorf("%w: parse body failed: %v", errBadRequest, err)
}
return h.proxy.ListDatabases(c, &req)
}
func (h *Handlers) handleDescribeDatabases(c *gin.Context) (interface{}, error) {
req := milvuspb.DescribeDatabaseRequest{}
err := shouldBind(c, &req)
if err != nil {
return nil, fmt.Errorf("%w: parse body failed: %v", errBadRequest, err)
}
return h.proxy.DescribeDatabase(c, &req)
}
func (h *Handlers) handleCreateCollection(c *gin.Context) (interface{}, error) {
wrappedReq := WrappedCreateCollectionRequest{}
err := shouldBind(c, &wrappedReq)

View File

@ -456,13 +456,6 @@ func (s *Server) init() error {
}
}
if HTTPParams.Enabled.GetAsBool() {
registerHTTPHandlerOnce.Do(func() {
log.Info("register Proxy http server")
s.registerHTTPServer()
})
}
if s.rootCoordClient == nil {
var err error
log.Debug("create RootCoord client for Proxy")
@ -529,6 +522,13 @@ func (s *Server) init() error {
s.proxy.SetQueryCoordClient(s.queryCoordClient)
log.Debug("set QueryCoord client for Proxy done")
if HTTPParams.Enabled.GetAsBool() {
registerHTTPHandlerOnce.Do(func() {
log.Info("register Proxy http server")
s.registerHTTPServer()
})
}
log.Debug(fmt.Sprintf("update Proxy's state to %s", commonpb.StateCode_Initializing.String()))
s.proxy.UpdateStateCode(commonpb.StateCode_Initializing)

View File

@ -28,6 +28,7 @@ import (
"github.com/milvus-io/milvus/pkg/metrics"
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/tsoutil"
"github.com/milvus-io/milvus/pkg/util/typeutil"
)
@ -127,7 +128,7 @@ func (fm *fgManagerImpl) GetChannelsJSON() string {
channels = append(channels, &metricsinfo.Channel{
Name: ch,
WatchState: ds.fg.Status(),
LatestTimeTick: typeutil.TimestampToString(latestTimeTick),
LatestTimeTick: tsoutil.PhysicalTimeFormat(latestTimeTick),
NodeID: paramtable.GetNodeID(),
CollectionID: ds.metacache.Collection(),
})

View File

@ -43,6 +43,7 @@ import (
"github.com/milvus-io/milvus/pkg/mq/msgstream"
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/tsoutil"
"github.com/milvus-io/milvus/pkg/util/typeutil"
)
@ -182,7 +183,7 @@ func TestGetChannelsJSON(t *testing.T) {
{
Name: "fake-ch-_1",
WatchState: "Healthy",
LatestTimeTick: typeutil.TimestampToString(0),
LatestTimeTick: tsoutil.PhysicalTimeFormat(0),
NodeID: paramtable.GetNodeID(),
CollectionID: 1,
},

View File

@ -42,6 +42,7 @@ import (
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/retry"
"github.com/milvus-io/milvus/pkg/util/timerecord"
"github.com/milvus-io/milvus/pkg/util/tsoutil"
"github.com/milvus-io/milvus/pkg/util/typeutil"
)
@ -422,8 +423,8 @@ func (t *SyncTask) MarshalJSON() ([]byte, error) {
SegmentID: t.segmentID,
BatchRows: t.batchRows,
SegmentLevel: t.level.String(),
TSFrom: t.tsFrom,
TSTo: t.tsTo,
TSFrom: tsoutil.PhysicalTimeFormat(t.tsFrom),
TSTo: tsoutil.PhysicalTimeFormat(t.tsTo),
DeltaRowCount: t.deltaRowCount,
FlushSize: t.flushedSize,
RunningTime: t.execTime.String(),

View File

@ -400,8 +400,8 @@ func (s *SyncTaskSuite) TestSyncTask_MarshalJSON() {
SegmentID: t.segmentID,
BatchRows: t.batchRows,
SegmentLevel: t.level.String(),
TSFrom: t.tsFrom,
TSTo: t.tsTo,
TSFrom: tsoutil.PhysicalTimeFormat(t.tsFrom),
TSTo: tsoutil.PhysicalTimeFormat(t.tsTo),
DeltaRowCount: t.deltaRowCount,
FlushSize: t.flushedSize,
RunningTime: t.execTime.String(),

View File

@ -109,4 +109,14 @@ const (
DNSegmentsPath = "/_dn/segments"
// DNChannelsPath is the path to get channels in DataNode.
DNChannelsPath = "/_dn/channels"
// DatabaseListPath is the path to get all databases.
DatabaseListPath = "/_db/list"
// DatabaseDescPath is the path to get database description.
DatabaseDescPath = "/_db/desc"
// CollectionListPath is the path to get all collections.
CollectionListPath = "/_collection/list"
// CollectionDescPath is the path to get collection description.
CollectionDescPath = "/_collection/desc"
)

View File

@ -99,7 +99,7 @@
// handleError(error);
// });
fetchData(MILVUS_URI + "/databases", databases)
fetchData(MILVUS_URI + "/_db/list", listDatabaseData)
.then(data => {
databaseData = data;
renderDatabases(startPage, paginationSize)

View File

@ -31,21 +31,11 @@ const handleError = (error) => {
// window.location.href = `5xx.html?error=${errorMessage}`;
};
const fetchData = (url, localData, kvParams) => {
const fetchData = (url, localData) => {
if (DEBUG_MODE) {
return new Promise((resolve) => {
resolve(JSON.parse(localData));
});
} else if (kvParams && kvParams.length !== 0) {
return fetch(url, {
method: 'POST',
headers: {
'Accept': 'application/json',
'Content-Type': 'application/json',
},
mode: 'no-cors',
body: JSON.stringify(kvParams)
}).then(response => response.json())
} else {
return fetch(url).then(response => {
return response.json();
@ -62,18 +52,4 @@ function getQueryParams() {
params[decodeURIComponent(key)] = decodeURIComponent(value || '');
});
return params;
}
function formatTimestamp(timestamp) {
const date = new Date(timestamp); // Convert timestamp to a Date object
// Format the date components
const year = date.getFullYear();
const month = ('0' + (date.getMonth() + 1)).slice(-2); // Months are zero-indexed
const day = ('0' + date.getDate()).slice(-2);
const hours = ('0' + date.getHours()).slice(-2);
const minutes = ('0' + date.getMinutes()).slice(-2);
const seconds = ('0' + date.getSeconds()).slice(-2);
// Return formatted date string
return `${year}-${month}-${day} ${hours}:${minutes}:${seconds}`
}

View File

@ -445,38 +445,67 @@ const mconfigs = `
}
`;
const collections =`
const listCollectionData =`
{
"status": {
"error_code": "Success",
"reason": ""
},
"collection_names": [
"collection1",
"collection2",
"collection3",
"collection4",
"collection5",
"collection6",
"collection7",
"collection8",
"collection9",
"collection10"
"collection_1",
"collection_2",
"collection_3",
"collection_4",
"collection_5",
"collection_6",
"collection_7",
"collection_8",
"collection_9",
"collection_10"
],
"collection_ids": [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10
],
"created_timestamps": [
1633036800, 1633123200, 1633209600, 1633296000, 1633382400, 1633468800, 1633555200, 1633641600, 1633728000, 1633814400
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10"
],
"created_utc_timestamps": [
1633036800, 1633123200, 1633209600, 1633296000, 1633382400, 1633468800, 1633555200, 1633641600, 1633728000, 1633814400
"2021-10-01 00:00:00",
"2021-10-02 00:00:00",
"2021-10-03 00:00:00",
"2021-10-04 00:00:00",
"2021-10-05 00:00:00",
"2021-10-06 00:00:00",
"2021-10-07 00:00:00",
"2021-10-08 00:00:00",
"2021-10-09 00:00:00",
"2021-10-10 00:00:00"
],
"inMemory_percentages": [
100, 90, 80, 70, 60, 50, 40, 30, 20, 10
100,
90,
80,
70,
60,
50,
40,
30,
20,
10
],
"query_service_available": [
true, false, false, false, false, false, false, false, false, false
true,
true,
true,
true,
true,
false,
false,
false,
false,
false
]
}
`
@ -556,63 +585,57 @@ const collectionRequest = `
]
`
const describeCollectionResp = `
const describeCollectionData = `
{
"status": {
"error_code": 0,
"reason": "Success"
},
"schema": {
"name": "example_collection",
"description": "This is an example collection schema",
"fields": [
{
"name": "field1",
"data_type": "INT64",
"is_primary_key": true,
"auto_id": false
},
{
"name": "field2",
"data_type": "FLOAT",
"is_primary_key": false,
"auto_id": false
}
]
},
"collectionID": 12345,
"virtual_channel_names": ["vchan1", "vchan2"],
"physical_channel_names": ["pchan1", "pchan2"],
"created_timestamp": 1633036800,
"created_utc_timestamp": 1633036800,
"shards_num": 2,
"aliases": ["alias1", "alias2"],
"start_positions": [
{
"key": "start_key",
"data": "start_data"
}
],
"consistency_level": 0,
"collection_id": "1",
"collection_name": "example_collection",
"properties": [
"created_time": "2021-10-01 00:00:00",
"shards_num": 2,
"consistency_level": "Strong",
"aliases": ["alias1", "alias2"],
"properties": {
"property_key": "property_value"
},
"db_name": "example_db",
"num_partitions": 3,
"virtual_channel_names": ["v_channel1", "v_channel2"],
"physical_channel_names": ["p_channel1", "p_channel2"],
"partition_infos": [
{
"key": "property_key",
"value": "property_value"
"partition_name": "partition1",
"partition_id": "1",
"created_utc_timestamp": "2021-10-01 00:00:00"
}
],
"db_name": "example_db",
"num_partitions": 1,
"db_id": 1
"enable_dynamic_field": true,
"fields": [
{
"field_id": "1",
"name": "field1",
"is_primary_key": true,
"description": "description1",
"data_type": "int64",
"type_params": {
"param_key": "param_value"
},
"index_params": {
"index_key": "index_value"
},
"auto_id": false,
"element_type": "element_type1",
"default_value": "default_value1",
"is_dynamic": false,
"is_partition_key": false,
"is_clustering_key": false,
"nullable": true,
"is_function_output": false
}
]
}
`
const databases = `
const listDatabaseData = `
{
"status": {
"error_code": "Success",
"reason": ""
},
"db_names": [
"database_1",
"database_2",
@ -625,33 +648,38 @@ const databases = `
"database_9",
"database_10"
],
"created_timestamp": [
1633036800,
1633123200,
1633209600,
1633296000,
1633382400,
1633468800,
1633555200,
1633641600,
1633728000,
1633814400
],
"db_ids": [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10"
],
"created_timestamps": [
"2021-10-01 00:00:00",
"2021-10-02 00:00:00",
"2021-10-03 00:00:00",
"2021-10-04 00:00:00",
"2021-10-05 00:00:00",
"2021-10-06 00:00:00",
"2021-10-07 00:00:00",
"2021-10-08 00:00:00",
"2021-10-09 00:00:00",
"2021-10-10 00:00:00"
]
}
`
const describeDatabaseResp = `
const describeDatabaseData = `
{
"status": {
"error_code": 0,
"reason": "Success"
},
"db_name": "example_db",
"dbID": 1,
"created_timestamp": 1633036800,
"db_id": 1,
"created_timestamp": "2021-10-01 00:00:00",
"properties": [
{
"key": "property_key",
@ -679,7 +707,7 @@ const qcCurrentTargets = `
"is_sorted": true,
"node_id": 1,
"is_invisible": false,
"loaded_timestamp": 1633072800,
"loaded_timestamp": "2021-10-01 00:00:00",
"index": [
{
"field_id": 1,
@ -701,20 +729,17 @@ const qcCurrentTargets = `
"collection_id": 1,
"channel_name": "channel1",
"unflushed_segment_ids": [
1
"1"
],
"flushed_segment_ids": [
2
"2"
],
"dropped_segment_ids": [
3
"3"
],
"level_zero_segment_ids": [
4
],
"partition_stats_versions": {
"1": 1
}
"4"
]
}
]
}
@ -774,7 +799,7 @@ const qcDist = `
"is_sorted": true,
"node_id": 1,
"is_invisible": false,
"loaded_timestamp": 1633072800,
"loaded_timestamp": "2021-10-01 00:00:00",
"index": [
{
"field_id": 1,
@ -798,15 +823,15 @@ const qcDist = `
"version": 1,
"collection_id": 1,
"channel_name": "channel1",
"unflushed_segment_ids": [1],
"flushed_segment_ids": [2],
"dropped_segment_ids": [3],
"level_zero_segment_ids": [4],
"unflushed_segment_ids": ["1"],
"flushed_segment_ids": ["2"],
"dropped_segment_ids": ["3"],
"level_zero_segment_ids": ["4"],
"partition_stats_versions": {
"1": 1
},
"watch_state": "Healthy",
"start_watch_ts": 1633072800
"start_watch_ts": "2021-10-01 00:00:00"
}
],
"leader_views": [
@ -830,7 +855,7 @@ const qcDist = `
"is_sorted": true,
"node_id": 1,
"is_invisible": false,
"loaded_timestamp": 1633072800,
"loaded_timestamp": "2021-10-01 00:00:00",
"index": [
{
"field_id": 1,
@ -958,7 +983,7 @@ const qnSegments = `
"is_sorted": true,
"node_id": 1,
"is_invisible": false,
"loaded_timestamp": 1620000000,
"loaded_timestamp": "2021-10-01 00:00:00",
"index": [
{
"field_id": 1,
@ -985,7 +1010,7 @@ const qnSegments = `
"is_sorted": true,
"node_id": 2,
"is_invisible": false,
"loaded_timestamp": 1620000001,
"loaded_timestamp": "2021-10-01 00:00:00",
"index": [
{
"field_id": 2,
@ -1059,22 +1084,22 @@ const dc_dist = `
"version": 1,
"collection_id": 100,
"channel_name": "channel1",
"unflushed_segment_ids": [1, 2, 3],
"flushed_segment_ids": [4, 5, 6],
"dropped_segment_ids": [7, 8, 9],
"unflushed_segment_ids": ["1", "2", "3"],
"flushed_segment_ids": ["4", "5", "6"],
"dropped_segment_ids": ["7", "8", "9"],
"watch_state": "success",
"start_watch_ts": 123456789
"start_watch_ts": "2023-10-01 12:05:00"
},
{
"node_id": 1,
"version": 1,
"collection_id": 100,
"channel_name": "channel3",
"unflushed_segment_ids": [1, 2, 3],
"flushed_segment_ids": [4, 5, 6],
"dropped_segment_ids": [7, 8, 9],
"unflushed_segment_ids": ["1", "2", "3"],
"flushed_segment_ids": ["4", "5", "6"],
"dropped_segment_ids": ["7", "8", "9"],
"watch_state": "to_watch",
"start_watch_ts": 123456789
"start_watch_ts": "2023-10-01 12:05:00"
}
]
}
@ -1090,7 +1115,7 @@ const dc_build_index_task = `
"index_state": "Finished",
"index_size": 1024,
"index_version": 1,
"create_time": 1633036800
"create_time": "2023-10-01 12:05:00"
},
{
"index_id": 2,
@ -1101,7 +1126,7 @@ const dc_build_index_task = `
"fail_reason": "Disk full",
"index_size": 2048,
"index_version": 2,
"create_time": 1633123200
"create_time": "2023-10-01 12:05:00"
}
]`
@ -1113,11 +1138,11 @@ const dc_compaction_task = `
"type": "Merge",
"state": "Completed",
"fail_reason": "",
"start_time": 1620000000,
"end_time": 1620003600,
"start_time": "2023-10-01 12:05:00",
"end_time": "2023-10-01 12:06:00",
"total_rows": 10000,
"input_segments": [1, 2, 3],
"result_segments": [4]
"input_segments": ["1", "2", "3"],
"result_segments": ["4"]
},
{
"plan_id": 2,
@ -1125,10 +1150,10 @@ const dc_compaction_task = `
"type": "Merge",
"state": "Failed",
"fail_reason": "Disk full",
"start_time": 1620007200,
"end_time": 1620010800,
"start_time": "2023-10-01 12:05:00",
"end_time": "2023-10-01 12:06:00",
"total_rows": 20000,
"input_segments": [5, 6, 7],
"input_segments": ["5", "6", "7"],
"result_segments": []
}
]`
@ -1139,8 +1164,8 @@ const dn_sync_task = `
"segment_id": 1,
"batch_rows": 1000,
"segment_level": "L1",
"ts_from": 1633036800,
"ts_to": 1633040400,
"ts_from": "2023-10-01 12:05:00",
"ts_to": "2023-10-01 12:06:00",
"delta_row_count": 10,
"flush_size": 1024,
"running_time": "100000000",
@ -1150,8 +1175,8 @@ const dn_sync_task = `
"segment_id": 2,
"batch_rows": 2000,
"segment_level": "L2",
"ts_from": 1633123200,
"ts_to": 1633126800,
"ts_from": "2023-10-01 12:05:00",
"ts_to": "2023-10-01 12:06:00",
"delta_row_count": 20,
"flush_size": 2048,
"running_time": "200000000",

View File

@ -126,7 +126,7 @@ function renderDatabases(currentPage, rowsPerPage) {
tableHTML += '<tr>';
tableHTML += `<td><a href="#" onclick="describeDatabase('${databaseData.db_names[i]}', ${i}, 'list-db')">${databaseData.db_names[i]}</a></td>`;
tableHTML += `<td>${databaseData.db_ids? databaseData.db_ids[i] : 0}</td>`;
tableHTML += `<td>${databaseData.created_timestamp? formatTimestamp(databaseData.created_timestamp[i]) : ''}</td>`;
tableHTML += `<td>${databaseData.created_timestamps[i]}</td>`;
tableHTML += '</tr>';
// Hidden row for displaying collection details as JSON
@ -165,7 +165,7 @@ function renderDatabases(currentPage, rowsPerPage) {
}
function describeDatabase(databaseName, rowIndex, type) {
fetchData(`${MILVUS_URI}/database?db_name=${databaseName}`, describeDatabaseResp)
fetchData(`${MILVUS_URI}/_db/desc?db_name=${databaseName}`, describeDatabaseData)
.then(data => {
// Format data as JSON and insert into the designated row
const jsonFormattedData = JSON.stringify(data, null, 2);
@ -182,7 +182,7 @@ function describeDatabase(databaseName, rowIndex, type) {
}
function describeCollection(databaseName, collectionName, rowIndex, type) {
fetchData(`${MILVUS_URI}/collection?db_name${databaseName}&&collection_name=${collectionName}`, describeCollectionResp)
fetchData(`${MILVUS_URI}/_collection/desc?db_name=${databaseName}&&collection_name=${collectionName}`, describeCollectionData)
.then(data => {
// Format data as JSON and insert into the designated row
const jsonFormattedData = JSON.stringify(data, null, 2);
@ -199,7 +199,7 @@ function describeCollection(databaseName, collectionName, rowIndex, type) {
}
function fetchCollections(databaseName) {
fetchData(MILVUS_URI + `/collections?db_name=${databaseName}`, collections )
fetchData(MILVUS_URI + `/_collection/list?db_name=${databaseName}`, listCollectionData )
.then(data => {
collectionsData = data;
renderCollections(databaseName, startPage, paginationSize)
@ -212,7 +212,7 @@ function fetchCollections(databaseName) {
let collectionsData = null; // Global variable to store fetched data
function renderCollections(databaseName, currentPage, rowsPerPage) {
let data = collectionsData;
if (!data) {
if (!data || !data.collection_names) {
console.error('No collections data available');
return;
}
@ -229,12 +229,11 @@ function renderCollections(databaseName, currentPage, rowsPerPage) {
const start = currentPage * rowsPerPage;
const end = start + rowsPerPage;
const totalCount = data.collection_names.length;
console.log(data)
for (let i = start; i < end && i < totalCount; i++) {
tableHTML += '<tr>';
tableHTML += `<td><a href="#" onclick="describeCollection('${databaseName}', '${data.collection_names[i]}', ${i}, 'list-coll')">${data.collection_names[i]}</a></td>`;
tableHTML += `<td>${data.collection_ids[i]}</td>`;
tableHTML += `<td>${formatTimestamp(data.created_utc_timestamps[i])}</td>`;
tableHTML += `<td>${data.created_utc_timestamps[i]}</td>`;
tableHTML += `<td>${data.inMemory_percentages? data.inMemory_percentages[i]: 'unknown'}</td>`;
tableHTML += `<td>${data.query_service_available? data.query_service_available[i] ? 'Yes' : 'No' : 'No'}</td>`;
tableHTML += '</tr>';
@ -509,7 +508,7 @@ function renderDependencies(data) {
const tr = `
<tr>
<td><strong>${key === 'metastore'? 'metastore [' + row['meta_type'] + ']' : 'mq [' + row['mq_type'] + ']'} </strong> </td>
<td>${row['health_status']? 'Healthy' : 'Unhealthy:' + row['unhealthy_reason']}</td>
<td>${row['health_status']? 'Healthy' : row['unhealthy_reason']}</td>
<td>${row['members_health']? row['members_health'].map(member => `
<ul>
<li>Endpoint: ${member.endpoint}, Health: ${member.health ? "Healthy" : "Unhealthy"}</li>
@ -593,7 +592,7 @@ function renderBuildIndexTasks(data) {
tableHTML += `<td>${indexState}</td>`;
tableHTML += `<td>${task.index_size}</td>`;
tableHTML += `<td>${task.index_version}</td>`;
tableHTML += `<td>${new Date(task.create_time * 1000).toLocaleString()}</td>`;
tableHTML += `<td>${task.create_time}</td>`;
tableHTML += '</tr>';
});
@ -636,8 +635,8 @@ function renderCompactionTasks(data) {
tableHTML += `<td>${new Date(task.start_time * 1000).toLocaleString()}</td>`;
tableHTML += `<td>${new Date(task.end_time * 1000).toLocaleString()}</td>`;
tableHTML += `<td>${task.total_rows}</td>`;
tableHTML += `<td>${task.input_segments.join(', ')}</td>`;
tableHTML += `<td>${task.result_segments.join(', ')}</td>`;
tableHTML += `<td>${task.input_segments? task.input_segments.join(', '): ''}</td>`;
tableHTML += `<td>${task.result_segments? task.result_segments.join(', '): ''}</td>`;
tableHTML += '</tr>';
});
@ -677,8 +676,8 @@ function renderImportTasks(data) {
tableHTML += `<td>${task.node_id}</td>`;
tableHTML += `<td>${state}</td>`;
tableHTML += `<td>${task.task_type}</td>`;
tableHTML += `<td>${new Date(task.created_time).toLocaleString()}</td>`;
tableHTML += `<td>${new Date(task.complete_time).toLocaleString()}</td>`;
tableHTML += `<td>${task.created_time}</td>`;
tableHTML += `<td>${task.complete_time}</td>`;
tableHTML += '</tr>';
});
@ -711,8 +710,8 @@ function renderSyncTasks(data) {
tableHTML += `<td>${task.segment_id}</td>`;
tableHTML += `<td>${task.batch_rows}</td>`;
tableHTML += `<td>${task.segment_level}</td>`;
tableHTML += `<td>${new Date(task.ts_from * 1000).toLocaleString()}</td>`;
tableHTML += `<td>${new Date(task.ts_to * 1000).toLocaleString()}</td>`;
tableHTML += `<td>${task.ts_from}</td>`;
tableHTML += `<td>${task.ts_to}</td>`;
tableHTML += `<td>${task.delta_row_count}</td>`;
tableHTML += `<td>${task.flush_size}</td>`;
tableHTML += `<td>${task.running_time}</td>`;
@ -797,7 +796,7 @@ function renderChannels(channels, currentPage, rowsPerPage) {
<td>${channel.watch_state || "N/A"}</td>
<td>${channel.node_id}</td>
<td>${channel.latest_time_tick || "N/A"}</td>
<td>${formatTimestamp(channel.start_watch_ts) || "N/A"}</td>
<td>${channel.start_watch_ts || "N/A"}</td>
<td>${channel.check_point_ts || "N/A"}</td>
`;
table.appendChild(row);
@ -1114,7 +1113,7 @@ function renderSlowQueries(data) {
tableHTML += `<td>${query.time}</td>`;
tableHTML += `<td>${query.trace_id}</td>`;
tableHTML += `<td>${query.type}</td>`;
tableHTML += `<td>${query.user}</td>`;
tableHTML += `<td>${query.user || 'unknown'}</td>`;
tableHTML += `<td>${query.database}</td>`;
tableHTML += `<td>${query.collection}</td>`;
tableHTML += `<td>${JSON.stringify(query.query_params)}</td>`;
@ -1406,7 +1405,7 @@ function renderQueryChannels(channels, currentPage, rowsPerPage) {
<td>${channel.collection_id}</td>
<td>${channel.leader_id || 'Not Found'}</td>
<td>${channel.node_id}</td>
<td>${channel.watch_state}</td>
<td>${channel.watch_state||''}</td>
<td>${channel.from}</td>
</tr>
`;

View File

@ -27,12 +27,12 @@
<!-- Centered Links Section -->
<div class="row text-center mb-3">
<div class="col">
<a href="#link1" class="btn btn-link" style="font-size: 1.5em;">Pprof</a>
<a href="http://localhost:9091/debug/pprof" class="btn btn-link" style="font-size: 1.5em;">Pprof</a>
</div>
</div>
<div class="row text-center mb-3">
<div class="col">
<a href="#link2" class="btn btn-link" style="font-size: 1.5em;">Memory Data Visualization</a>
<a href="http://localhost:9091/static" class="btn btn-link" style="font-size: 1.5em;">Memory Data Visualization</a>
</div>
</div>
</div>

View File

@ -1009,20 +1009,20 @@ func TestCatalog_DropIndex(t *testing.T) {
func TestCatalog_CreateSegmentIndex(t *testing.T) {
segIdx := &model.SegmentIndex{
SegmentID: 1,
CollectionID: 2,
PartitionID: 3,
NumRows: 1024,
IndexID: 4,
BuildID: 5,
NodeID: 6,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IndexVersion: 0,
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
SegmentID: 1,
CollectionID: 2,
PartitionID: 3,
NumRows: 1024,
IndexID: 4,
BuildID: 5,
NodeID: 6,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IndexVersion: 0,
IsDeleted: false,
CreatedUTCTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
}
t.Run("success", func(t *testing.T) {
@ -1105,20 +1105,20 @@ func TestCatalog_ListSegmentIndexes(t *testing.T) {
func TestCatalog_AlterSegmentIndexes(t *testing.T) {
segIdx := &model.SegmentIndex{
SegmentID: 0,
CollectionID: 0,
PartitionID: 0,
NumRows: 0,
IndexID: 0,
BuildID: 0,
NodeID: 0,
IndexState: 0,
FailReason: "",
IndexVersion: 0,
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
SegmentID: 0,
CollectionID: 0,
PartitionID: 0,
NumRows: 0,
IndexID: 0,
BuildID: 0,
NodeID: 0,
IndexState: 0,
FailReason: "",
IndexVersion: 0,
IsDeleted: false,
CreatedUTCTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
}
t.Run("add", func(t *testing.T) {

View File

@ -102,7 +102,7 @@ func MarshalIndexModel(index *Index) *indexpb.FieldIndex {
// }
//
// newIdx.IsDeleted = b.IsDeleted
// newIdx.CreateTime = b.CreateTime
// newIdx.CreatedUTCTime = b.CreatedUTCTime
//
// if newIdx.Extra == nil && b.Extra != nil {
// newIdx.Extra = b.Extra

View File

@ -7,20 +7,20 @@ import (
)
type SegmentIndex struct {
SegmentID int64
CollectionID int64
PartitionID int64
NumRows int64
IndexID int64
BuildID int64
NodeID int64
IndexVersion int64
IndexState commonpb.IndexState
FailReason string
IsDeleted bool
CreateTime uint64
IndexFileKeys []string
IndexSize uint64
SegmentID int64
CollectionID int64
PartitionID int64
NumRows int64
IndexID int64
BuildID int64
NodeID int64
IndexVersion int64
IndexState commonpb.IndexState
FailReason string
IsDeleted bool
CreatedUTCTime uint64
IndexFileKeys []string
IndexSize uint64
// deprecated
WriteHandoff bool
CurrentIndexVersion int32
@ -44,7 +44,7 @@ func UnmarshalSegmentIndexModel(segIndex *indexpb.SegmentIndex) *SegmentIndex {
FailReason: segIndex.FailReason,
IndexVersion: segIndex.IndexVersion,
IsDeleted: segIndex.Deleted,
CreateTime: segIndex.CreateTime,
CreatedUTCTime: segIndex.CreateTime,
IndexFileKeys: common.CloneStringList(segIndex.IndexFileKeys),
IndexSize: segIndex.SerializeSize,
WriteHandoff: segIndex.WriteHandoff,
@ -70,7 +70,7 @@ func MarshalSegmentIndexModel(segIdx *SegmentIndex) *indexpb.SegmentIndex {
IndexVersion: segIdx.IndexVersion,
IndexFileKeys: common.CloneStringList(segIdx.IndexFileKeys),
Deleted: segIdx.IsDeleted,
CreateTime: segIdx.CreateTime,
CreateTime: segIdx.CreatedUTCTime,
SerializeSize: segIdx.IndexSize,
WriteHandoff: segIdx.WriteHandoff,
CurrentIndexVersion: segIdx.CurrentIndexVersion,
@ -90,7 +90,7 @@ func CloneSegmentIndex(segIndex *SegmentIndex) *SegmentIndex {
FailReason: segIndex.FailReason,
IndexVersion: segIndex.IndexVersion,
IsDeleted: segIndex.IsDeleted,
CreateTime: segIndex.CreateTime,
CreatedUTCTime: segIndex.CreatedUTCTime,
IndexFileKeys: common.CloneStringList(segIndex.IndexFileKeys),
IndexSize: segIndex.IndexSize,
WriteHandoff: segIndex.WriteHandoff,

View File

@ -31,20 +31,20 @@ var (
}
indexModel2 = &SegmentIndex{
CollectionID: colID,
PartitionID: partID,
SegmentID: segmentID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IndexVersion: 0,
IsDeleted: false,
CreateTime: 1,
IndexFileKeys: nil,
IndexSize: 0,
CollectionID: colID,
PartitionID: partID,
SegmentID: segmentID,
NumRows: 1025,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IndexVersion: 0,
IsDeleted: false,
CreatedUTCTime: 1,
IndexFileKeys: nil,
IndexSize: 0,
}
)

View File

@ -19,20 +19,33 @@ package proxy
import (
"encoding/json"
"net/http"
"strconv"
"strings"
"github.com/gin-gonic/gin"
"github.com/samber/lo"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
mhttp "github.com/milvus-io/milvus/internal/http"
"github.com/milvus-io/milvus/internal/proxy/connection"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/dependency"
"github.com/milvus-io/milvus/pkg/util/etcd"
"github.com/milvus-io/milvus/pkg/util/funcutil"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/tsoutil"
"github.com/milvus-io/milvus/pkg/util/typeutil"
)
var contentType = "application/json"
var (
contentType = "application/json"
defaultDB = "default"
httpDBName = "db_name"
HTTPCollectionName = "collection_name"
)
func getConfigs(configs map[string]string) gin.HandlerFunc {
return func(c *gin.Context) {
@ -192,3 +205,196 @@ func getDataComponentMetrics(node *Proxy, metricsType string) gin.HandlerFunc {
c.Data(http.StatusOK, contentType, []byte(resp.GetResponse()))
}
}
// The Get request should be used to get the query parameters, not the body, such as Javascript
// fetch API only support GET request with query parameter.
func listCollection(node types.ProxyComponent) gin.HandlerFunc {
return func(c *gin.Context) {
dbName := c.Query(httpDBName)
if len(dbName) == 0 {
dbName = defaultDB
}
showCollectionResp, err := node.ShowCollections(c, &milvuspb.ShowCollectionsRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_ShowCollections,
},
DbName: dbName,
})
if err := merr.CheckRPCCall(showCollectionResp, err); err != nil {
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
mhttp.HTTPReturnMessage: err.Error(),
})
return
}
// Convert the response to Collections struct
collections := &metricsinfo.Collections{
CollectionIDs: lo.Map(showCollectionResp.CollectionIds, func(t int64, i int) string {
return strconv.FormatInt(t, 10)
}),
CollectionNames: showCollectionResp.CollectionNames,
CreatedUtcTimestamps: lo.Map(showCollectionResp.CreatedUtcTimestamps, func(t uint64, i int) string {
return typeutil.TimestampToString(t)
}),
InMemoryPercentages: lo.Map(showCollectionResp.InMemoryPercentages, func(t int64, i int) int {
return int(t)
}),
QueryServiceAvailable: showCollectionResp.QueryServiceAvailable,
}
// Marshal the collections struct to JSON
collectionsJSON, err := json.Marshal(collections)
if err != nil {
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
mhttp.HTTPReturnMessage: err.Error(),
})
return
}
c.Data(http.StatusOK, contentType, collectionsJSON)
}
}
func describeCollection(node types.ProxyComponent, rootCoord types.RootCoordClient) gin.HandlerFunc {
return func(c *gin.Context) {
dbName := c.Query(httpDBName)
collectionName := c.Query(HTTPCollectionName)
if len(dbName) == 0 {
dbName = defaultDB
}
if len(collectionName) == 0 {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
mhttp.HTTPReturnMessage: HTTPCollectionName + " is required",
})
return
}
describeCollectionResp, err := node.DescribeCollection(c, &milvuspb.DescribeCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_DescribeCollection,
},
DbName: dbName,
CollectionName: collectionName,
})
if err := merr.CheckRPCCall(describeCollectionResp, err); err != nil {
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
mhttp.HTTPReturnMessage: err.Error(),
})
return
}
describePartitionResp, err := rootCoord.ShowPartitions(c, &milvuspb.ShowPartitionsRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_ShowPartitions,
},
DbName: dbName,
CollectionName: collectionName,
})
if err := merr.CheckRPCCall(describePartitionResp, err); err != nil {
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
mhttp.HTTPReturnMessage: err.Error(),
})
return
}
// Convert the response to Collection struct
collection := &metricsinfo.Collection{
CollectionID: strconv.FormatInt(describeCollectionResp.CollectionID, 10),
CollectionName: describeCollectionResp.CollectionName,
CreatedTime: tsoutil.PhysicalTimeFormat(describeCollectionResp.CreatedUtcTimestamp),
ShardsNum: int(describeCollectionResp.ShardsNum),
ConsistencyLevel: describeCollectionResp.ConsistencyLevel.String(),
Aliases: describeCollectionResp.Aliases,
Properties: funcutil.KeyValuePair2Map(describeCollectionResp.Properties),
DBName: dbName,
NumPartitions: int(describeCollectionResp.NumPartitions),
VirtualChannelNames: describeCollectionResp.VirtualChannelNames,
PhysicalChannelNames: describeCollectionResp.PhysicalChannelNames,
PartitionInfos: metricsinfo.NewPartitionInfos(describePartitionResp),
EnableDynamicField: describeCollectionResp.Schema.EnableDynamicField,
Fields: metricsinfo.NewFields(describeCollectionResp.GetSchema()),
}
// Marshal the collection struct to JSON
collectionJSON, err := json.Marshal(collection)
if err != nil {
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
mhttp.HTTPReturnMessage: err.Error(),
})
return
}
c.Data(http.StatusOK, contentType, collectionJSON)
}
}
func listDatabase(node types.ProxyComponent) gin.HandlerFunc {
return func(c *gin.Context) {
showDatabaseResp, err := node.ListDatabases(c, &milvuspb.ListDatabasesRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_ListDatabases,
},
})
if err := merr.CheckRPCCall(showDatabaseResp, err); err != nil {
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
mhttp.HTTPReturnMessage: err.Error(),
})
return
}
// Convert the response to Databases struct
databases := metricsinfo.NewDatabases(showDatabaseResp)
// Marshal the databases struct to JSON
databasesJSON, err := json.Marshal(databases)
if err != nil {
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
mhttp.HTTPReturnMessage: err.Error(),
})
return
}
c.Data(http.StatusOK, contentType, databasesJSON)
}
}
func describeDatabase(node types.ProxyComponent) gin.HandlerFunc {
return func(c *gin.Context) {
dbName := c.Query(httpDBName)
if len(dbName) == 0 {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
mhttp.HTTPReturnMessage: httpDBName + " is required",
})
return
}
describeDatabaseResp, err := node.DescribeDatabase(c, &milvuspb.DescribeDatabaseRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_DescribeDatabase,
},
DbName: dbName,
})
if err := merr.CheckRPCCall(describeDatabaseResp, err); err != nil {
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
mhttp.HTTPReturnMessage: err.Error(),
})
return
}
// Convert the response to Database struct
database := metricsinfo.NewDatabase(describeDatabaseResp)
// Marshal the database struct to JSON
databaseJSON, err := json.Marshal(database)
if err != nil {
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
mhttp.HTTPReturnMessage: err.Error(),
})
return
}
c.Data(http.StatusOK, contentType, databaseJSON)
}
}

View File

@ -13,6 +13,7 @@ import (
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
"github.com/milvus-io/milvus/internal/mocks"
"github.com/milvus-io/milvus/internal/proxy/connection"
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
@ -151,3 +152,210 @@ func TestGetDataComponentMetrics(t *testing.T) {
assert.Contains(t, w.Body.String(), "test_response")
})
}
func TestListCollection(t *testing.T) {
t.Run("list collections successfully", func(t *testing.T) {
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
c.Request, _ = http.NewRequest("GET", "/?db_name=default", nil)
mockProxy := mocks.NewMockProxy(t)
mockProxy.EXPECT().ShowCollections(mock.Anything, mock.Anything).Return(&milvuspb.ShowCollectionsResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
CollectionIds: []int64{1, 2},
CollectionNames: []string{"collection1", "collection2"},
CreatedUtcTimestamps: []uint64{1633046400000, 1633132800000},
InMemoryPercentages: []int64{100, 100},
QueryServiceAvailable: []bool{true, true},
}, nil)
handler := listCollection(mockProxy)
handler(c)
assert.Equal(t, http.StatusOK, w.Code)
assert.Contains(t, w.Body.String(), "collection1")
assert.Contains(t, w.Body.String(), "collection2")
})
t.Run("list collections with error", func(t *testing.T) {
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
c.Request, _ = http.NewRequest("GET", "/?db_name=default", nil)
mockProxy := mocks.NewMockProxy(t)
mockProxy.EXPECT().ShowCollections(mock.Anything, mock.Anything).Return(nil, errors.New("error"))
handler := listCollection(mockProxy)
handler(c)
assert.Equal(t, http.StatusInternalServerError, w.Code)
assert.Contains(t, w.Body.String(), "error")
})
}
func TestDescribeCollection(t *testing.T) {
t.Run("describe collection successfully", func(t *testing.T) {
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
c.Request, _ = http.NewRequest("GET", "/?db_name=default&collection_name=collection1", nil)
mockProxy := mocks.NewMockProxy(t)
mockRootCoord := mocks.NewMockRootCoordClient(t)
mockProxy.EXPECT().DescribeCollection(mock.Anything, mock.Anything).Return(&milvuspb.DescribeCollectionResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
CollectionID: 1,
CollectionName: "collection1",
CreatedUtcTimestamp: 1633046400000,
ShardsNum: 2,
ConsistencyLevel: commonpb.ConsistencyLevel_Strong,
Aliases: []string{"alias1"},
Properties: []*commonpb.KeyValuePair{{Key: "key", Value: "value"}},
VirtualChannelNames: []string{"vchan1"},
PhysicalChannelNames: []string{"pchan1"},
NumPartitions: 1,
Schema: &schemapb.CollectionSchema{
EnableDynamicField: true,
Fields: []*schemapb.FieldSchema{
{
FieldID: 1,
Name: "field1",
DataType: schemapb.DataType_Int32,
},
},
},
}, nil)
mockRootCoord.EXPECT().ShowPartitions(mock.Anything, mock.Anything).Return(&milvuspb.ShowPartitionsResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
}, nil)
handler := describeCollection(mockProxy, mockRootCoord)
handler(c)
assert.Equal(t, http.StatusOK, w.Code)
assert.Contains(t, w.Body.String(), "collection1")
assert.Contains(t, w.Body.String(), "alias1")
})
t.Run("describe collection with error", func(t *testing.T) {
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
c.Request, _ = http.NewRequest("GET", "/?db_name=default&collection_name=collection1", nil)
mockProxy := mocks.NewMockProxy(t)
mockRootCoord := mocks.NewMockRootCoordClient(t)
mockProxy.EXPECT().DescribeCollection(mock.Anything, mock.Anything).Return(nil, errors.New("error"))
handler := describeCollection(mockProxy, mockRootCoord)
handler(c)
assert.Equal(t, http.StatusInternalServerError, w.Code)
assert.Contains(t, w.Body.String(), "error")
})
t.Run("missing collection_name", func(t *testing.T) {
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
c.Request, _ = http.NewRequest("GET", "/?db_name=default", nil)
mockProxy := mocks.NewMockProxy(t)
mockRootCoord := mocks.NewMockRootCoordClient(t)
handler := describeCollection(mockProxy, mockRootCoord)
handler(c)
assert.Equal(t, http.StatusBadRequest, w.Code)
assert.Contains(t, w.Body.String(), "collection_name is required")
})
}
func TestListDatabase(t *testing.T) {
t.Run("list databases successfully", func(t *testing.T) {
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
c.Request, _ = http.NewRequest("GET", "/", nil)
mockProxy := mocks.NewMockProxy(t)
mockProxy.EXPECT().ListDatabases(mock.Anything, mock.Anything).Return(&milvuspb.ListDatabasesResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
DbNames: []string{"db1", "db2"},
CreatedTimestamp: []uint64{1633046400000, 1633132800000},
}, nil)
handler := listDatabase(mockProxy)
handler(c)
assert.Equal(t, http.StatusOK, w.Code)
assert.Contains(t, w.Body.String(), "db1")
assert.Contains(t, w.Body.String(), "db2")
})
t.Run("list databases with error", func(t *testing.T) {
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
c.Request, _ = http.NewRequest("GET", "/", nil)
mockProxy := mocks.NewMockProxy(t)
mockProxy.EXPECT().ListDatabases(mock.Anything, mock.Anything).Return(nil, errors.New("error"))
handler := listDatabase(mockProxy)
handler(c)
assert.Equal(t, http.StatusInternalServerError, w.Code)
assert.Contains(t, w.Body.String(), "error")
})
}
func TestDescribeDatabase(t *testing.T) {
t.Run("describe database successfully", func(t *testing.T) {
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
c.Request, _ = http.NewRequest("GET", "/?db_name=db1", nil)
mockProxy := mocks.NewMockProxy(t)
mockProxy.EXPECT().DescribeDatabase(mock.Anything, mock.Anything).Return(&milvuspb.DescribeDatabaseResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
DbName: "db1",
DbID: 1,
CreatedTimestamp: 1633046400000,
Properties: []*commonpb.KeyValuePair{{Key: "key", Value: "value"}},
}, nil)
handler := describeDatabase(mockProxy)
handler(c)
assert.Equal(t, http.StatusOK, w.Code)
assert.Contains(t, w.Body.String(), "db1")
assert.Contains(t, w.Body.String(), "key")
assert.Contains(t, w.Body.String(), "value")
})
t.Run("describe database with error", func(t *testing.T) {
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
c.Request, _ = http.NewRequest("GET", "/?db_name=db1", nil)
mockProxy := mocks.NewMockProxy(t)
mockProxy.EXPECT().DescribeDatabase(mock.Anything, mock.Anything).Return(nil, errors.New("error"))
handler := describeDatabase(mockProxy)
handler(c)
assert.Equal(t, http.StatusInternalServerError, w.Code)
assert.Contains(t, w.Body.String(), "error")
})
t.Run("missing db_name", func(t *testing.T) {
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
c.Request, _ = http.NewRequest("GET", "/", nil)
mockProxy := mocks.NewMockProxy(t)
handler := describeDatabase(mockProxy)
handler(c)
assert.Equal(t, http.StatusBadRequest, w.Code)
assert.Contains(t, w.Body.String(), "db_name is required")
})
}

View File

@ -6534,6 +6534,14 @@ func (node *Proxy) RegisterRestRouter(router gin.IRouter) {
router.GET(http.DNSyncTasksPath, getDataComponentMetrics(node, metricsinfo.SyncTasks))
router.GET(http.DNSegmentsPath, getDataComponentMetrics(node, metricsinfo.DataSegments))
router.GET(http.DNChannelsPath, getDataComponentMetrics(node, metricsinfo.DataChannels))
// Database requests
router.GET(http.DatabaseListPath, listDatabase(node))
router.GET(http.DatabaseDescPath, describeDatabase(node))
// Collection requests
router.GET(http.CollectionListPath, listCollection(node))
router.GET(http.CollectionDescPath, describeCollection(node, node.rootCoord))
}
func (node *Proxy) CreatePrivilegeGroup(ctx context.Context, req *milvuspb.CreatePrivilegeGroupRequest) (*commonpb.Status, error) {

View File

@ -187,7 +187,7 @@ func TestAlterDatabase(t *testing.T) {
assert.Nil(t, err)
}
func TestDescribeDatabase(t *testing.T) {
func TestDescribeDatabaseTask(t *testing.T) {
rc := mocks.NewMockRootCoordClient(t)
rc.EXPECT().DescribeDatabase(mock.Anything, mock.Anything).Return(&rootcoordpb.DescribeDatabaseResponse{}, nil)

View File

@ -26,6 +26,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/util/metrics"
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
"github.com/milvus-io/milvus/pkg/util/tsoutil"
"github.com/milvus-io/milvus/pkg/util/typeutil"
)
@ -135,7 +136,7 @@ func SegmentFromInfo(info *datapb.SegmentInfo) *Segment {
func newSegmentMetricsFrom(segment *Segment) *metricsinfo.Segment {
convertedSegment := metrics.NewSegmentFrom(segment.SegmentInfo)
convertedSegment.NodeID = segment.Node
convertedSegment.LoadedTimestamp = segment.Version
convertedSegment.LoadedTimestamp = tsoutil.PhysicalTimeFormat(segment.LastDeltaTimestamp)
convertedSegment.Index = lo.Map(lo.Values(segment.IndexInfo), func(e *querypb.FieldIndexInfo, i int) *metricsinfo.SegmentIndex {
return &metricsinfo.SegmentIndex{
IndexFieldID: e.FieldID,

View File

@ -233,7 +233,6 @@ func TestGetSegmentDistJSON(t *testing.T) {
assert.Equal(t, int64(1000), s.NumOfRows)
assert.Equal(t, "Flushed", s.State)
assert.Equal(t, int64(1), s.NodeID)
assert.Equal(t, int64(1), s.LoadedTimestamp)
} else if s.SegmentID == 2 {
assert.Equal(t, int64(200), s.CollectionID)
assert.Equal(t, int64(20), s.PartitionID)
@ -241,7 +240,6 @@ func TestGetSegmentDistJSON(t *testing.T) {
assert.Equal(t, int64(2000), s.NumOfRows)
assert.Equal(t, "Flushed", s.State)
assert.Equal(t, int64(2), s.NodeID)
assert.Equal(t, int64(1), s.LoadedTimestamp)
} else {
assert.Failf(t, "unexpected segment id", "unexpected segment id %d", s.SegmentID)
}

View File

@ -31,6 +31,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
"github.com/milvus-io/milvus/pkg/util/typeutil"
)
@ -502,17 +503,7 @@ func (task *LeaderTask) MarshalJSON() ([]byte, error) {
}
func marshalJSON(task Task) ([]byte, error) {
return json.Marshal(&struct {
TaskName string `json:"task_name,omitempty"`
CollectionID int64 `json:"collection_id,omitempty"`
Replica int64 `json:"replica_id,omitempty"`
TaskType string `json:"task_type,omitempty"`
TaskStatus string `json:"task_status,omitempty"`
Priority string `json:"priority,omitempty"`
Actions []string `json:"actions,omitempty"`
Step int `json:"step,omitempty"`
Reason string `json:"reason,omitempty"`
}{
return json.Marshal(&metricsinfo.QueryCoordTask{
TaskName: task.Name(),
CollectionID: task.CollectionID(),
Replica: task.ReplicaID(),

View File

@ -18,6 +18,7 @@ package task
import (
"context"
"encoding/json"
"math/rand"
"strings"
"testing"
@ -45,6 +46,7 @@ import (
"github.com/milvus-io/milvus/pkg/kv"
"github.com/milvus-io/milvus/pkg/util/etcd"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/testutils"
"github.com/milvus-io/milvus/pkg/util/typeutil"
@ -1837,8 +1839,11 @@ func (suite *TaskSuite) TestGetTasksJSON() {
suite.NoError(err)
actualJSON := scheduler.GetTasksJSON()
suite.Contains(actualJSON, "SegmentTask")
suite.Contains(actualJSON, "ChannelTask")
var tasks []*metricsinfo.QueryCoordTask
err = json.Unmarshal([]byte(actualJSON), &tasks)
suite.NoError(err)
suite.Equal(2, len(tasks))
}
func TestTask(t *testing.T) {

View File

@ -32,6 +32,7 @@ import (
"github.com/milvus-io/milvus/pkg/mq/msgdispatcher"
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/tsoutil"
"github.com/milvus-io/milvus/pkg/util/typeutil"
)
@ -65,7 +66,7 @@ func TestGetPipelineJSON(t *testing.T) {
{
Name: ch,
WatchState: "Healthy",
LatestTimeTick: typeutil.TimestampToString(0),
LatestTimeTick: tsoutil.PhysicalTimeFormat(0),
NodeID: paramtable.GetNodeID(),
CollectionID: 1,
},

View File

@ -30,6 +30,7 @@ import (
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/timerecord"
"github.com/milvus-io/milvus/pkg/util/tsoutil"
"github.com/milvus-io/milvus/pkg/util/typeutil"
)
@ -170,7 +171,7 @@ func (m *manager) GetChannelStats() []*metricsinfo.Channel {
ret = append(ret, &metricsinfo.Channel{
Name: ch,
WatchState: p.Status(),
LatestTimeTick: typeutil.TimestampToString(tt),
LatestTimeTick: tsoutil.PhysicalTimeFormat(tt),
NodeID: paramtable.GetNodeID(),
CollectionID: p.GetCollectionID(),
})

View File

@ -1,6 +1,10 @@
package metrics
import (
"strconv"
"github.com/samber/lo"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
)
@ -23,12 +27,19 @@ func NewSegmentFrom(segment *datapb.SegmentInfo) *metricsinfo.Segment {
func NewDMChannelFrom(channel *datapb.VchannelInfo) *metricsinfo.DmChannel {
return &metricsinfo.DmChannel{
CollectionID: channel.GetCollectionID(),
ChannelName: channel.GetChannelName(),
UnflushedSegmentIds: channel.GetUnflushedSegmentIds(),
FlushedSegmentIds: channel.GetFlushedSegmentIds(),
DroppedSegmentIds: channel.GetDroppedSegmentIds(),
LevelZeroSegmentIds: channel.GetLevelZeroSegmentIds(),
PartitionStatsVersions: channel.GetPartitionStatsVersions(),
CollectionID: channel.GetCollectionID(),
ChannelName: channel.GetChannelName(),
UnflushedSegmentIds: lo.Map(channel.GetUnflushedSegmentIds(), func(t int64, i int) string {
return strconv.FormatInt(t, 10)
}),
FlushedSegmentIds: lo.Map(channel.GetFlushedSegmentIds(), func(t int64, i int) string {
return strconv.FormatInt(t, 10)
}),
DroppedSegmentIds: lo.Map(channel.GetDroppedSegmentIds(), func(t int64, i int) string {
return strconv.FormatInt(t, 10)
}),
LevelZeroSegmentIds: lo.Map(channel.GetLevelZeroSegmentIds(), func(t int64, i int) string {
return strconv.FormatInt(t, 10)
}),
}
}

View File

@ -14,7 +14,7 @@ require (
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.17.7
github.com/milvus-io/milvus-proto/go-api/v2 v2.3.4-0.20241108105827-266fb751b620
github.com/milvus-io/milvus-proto/go-api/v2 v2.3.4-0.20241111062829-6de3d96f664f
github.com/nats-io/nats-server/v2 v2.10.12
github.com/nats-io/nats.go v1.34.1
github.com/panjf2000/ants/v2 v2.7.2

View File

@ -488,8 +488,8 @@ github.com/milvus-io/cgosymbolizer v0.0.0-20240722103217-b7dee0e50119 h1:9VXijWu
github.com/milvus-io/cgosymbolizer v0.0.0-20240722103217-b7dee0e50119/go.mod h1:DvXTE/K/RtHehxU8/GtDs4vFtfw64jJ3PaCnFri8CRg=
github.com/milvus-io/gorocksdb v0.0.0-20220624081344-8c5f4212846b h1:TfeY0NxYxZzUfIfYe5qYDBzt4ZYRqzUjTR6CvUzjat8=
github.com/milvus-io/gorocksdb v0.0.0-20220624081344-8c5f4212846b/go.mod h1:iwW+9cWfIzzDseEBCCeDSN5SD16Tidvy8cwQ7ZY8Qj4=
github.com/milvus-io/milvus-proto/go-api/v2 v2.3.4-0.20241108105827-266fb751b620 h1:0IWUDtDloift7cQHalhdjuVkL/3qSeiXFqR7MofZBkg=
github.com/milvus-io/milvus-proto/go-api/v2 v2.3.4-0.20241108105827-266fb751b620/go.mod h1:/6UT4zZl6awVeXLeE7UGDWZvXj3IWkRsh3mqsn0DiAs=
github.com/milvus-io/milvus-proto/go-api/v2 v2.3.4-0.20241111062829-6de3d96f664f h1:yLxT8NH0ixUOJMqJuk0xvGf0cKsr+N2xibyTat256PI=
github.com/milvus-io/milvus-proto/go-api/v2 v2.3.4-0.20241111062829-6de3d96f664f/go.mod h1:/6UT4zZl6awVeXLeE7UGDWZvXj3IWkRsh3mqsn0DiAs=
github.com/milvus-io/pulsar-client-go v0.12.1 h1:O2JZp1tsYiO7C0MQ4hrUY/aJXnn2Gry6hpm7UodghmE=
github.com/milvus-io/pulsar-client-go v0.12.1/go.mod h1:dkutuH4oS2pXiGm+Ti7fQZ4MRjrMPZ8IJeEGAWMeckk=
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
@ -655,7 +655,6 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=

View File

@ -15,7 +15,6 @@ import (
"encoding/json"
"github.com/milvus-io/milvus-proto/go-api/v2/rgpb"
"github.com/milvus-io/milvus/pkg/util/typeutil"
)
// ComponentInfos defines the interface of all component infos
@ -90,7 +89,7 @@ type SlowQuery struct {
Partitions string `json:"partitions,omitempty"`
ConsistencyLevel string `json:"consistency_level,omitempty"`
UseDefaultConsistency bool `json:"use_default_consistency,omitempty"`
GuaranteeTimestamp uint64 `json:"guarantee_timestamp,omitempty"`
GuaranteeTimestamp uint64 `json:"guarantee_timestamp,omitempty,string"`
Duration string `json:"duration,omitempty"`
User string `json:"user,omitempty"`
QueryParams *QueryParams `json:"query_params,omitempty"`
@ -99,25 +98,24 @@ type SlowQuery struct {
}
type DmChannel struct {
NodeID int64 `json:"node_id,omitempty"`
Version int64 `json:"version,omitempty"`
CollectionID int64 `json:"collection_id,omitempty"`
ChannelName string `json:"channel_name,omitempty"`
UnflushedSegmentIds []int64 `json:"unflushed_segment_ids,omitempty"`
FlushedSegmentIds []int64 `json:"flushed_segment_ids,omitempty"`
DroppedSegmentIds []int64 `json:"dropped_segment_ids,omitempty"`
LevelZeroSegmentIds []int64 `json:"level_zero_segment_ids,omitempty"`
PartitionStatsVersions map[int64]int64 `json:"partition_stats_versions,omitempty"`
WatchState string `json:"watch_state,omitempty"`
StartWatchTS int64 `json:"start_watch_ts,omitempty"`
NodeID int64 `json:"node_id,omitempty"`
Version int64 `json:"version,omitempty,string"`
CollectionID int64 `json:"collection_id,omitempty,string"`
ChannelName string `json:"channel_name,omitempty"`
UnflushedSegmentIds []string `json:"unflushed_segment_ids,omitempty"`
FlushedSegmentIds []string `json:"flushed_segment_ids,omitempty"`
DroppedSegmentIds []string `json:"dropped_segment_ids,omitempty"`
LevelZeroSegmentIds []string `json:"level_zero_segment_ids,omitempty"`
WatchState string `json:"watch_state,omitempty"`
StartWatchTS string `json:"start_watch_ts,omitempty"`
}
type Segment struct {
SegmentID int64 `json:"segment_id,omitempty"`
CollectionID int64 `json:"collection_id,omitempty"`
PartitionID int64 `json:"partition_id,omitempty"`
SegmentID int64 `json:"segment_id,omitempty,string"`
CollectionID int64 `json:"collection_id,omitempty,string"`
PartitionID int64 `json:"partition_id,omitempty,string"`
Channel string `json:"channel,omitempty"`
NumOfRows int64 `json:"num_of_rows,omitempty"`
NumOfRows int64 `json:"num_of_rows,omitempty,string"`
State string `json:"state,omitempty"`
IsImporting bool `json:"is_importing,omitempty"`
Compacted bool `json:"compacted,omitempty"`
@ -127,42 +125,54 @@ type Segment struct {
// load related
IsInvisible bool `json:"is_invisible,omitempty"`
LoadedTimestamp int64 `json:"loaded_timestamp,omitempty"`
LoadedTimestamp string `json:"loaded_timestamp,omitempty,string"`
Index []*SegmentIndex `json:"index,omitempty"`
ResourceGroup string `json:"resource_group,omitempty"`
LoadedInsertRowCount int64 `json:"loaded_insert_row_count,omitempty"` // inert row count for growing segment that excludes the deleted row count in QueryNode
MemSize int64 `json:"mem_size,omitempty"` // memory size of segment in QueryNode
LoadedInsertRowCount int64 `json:"loaded_insert_row_count,omitempty,string"` // inert row count for growing segment that excludes the deleted row count in QueryNode
MemSize int64 `json:"mem_size,omitempty,string"` // memory size of segment in QueryNode
// flush related
FlushedRows int64 `json:"flushed_rows,omitempty"`
SyncBufferRows int64 `json:"sync_buffer_rows,omitempty"`
SyncingRows int64 `json:"syncing_rows,omitempty"`
FlushedRows int64 `json:"flushed_rows,omitempty,string"`
SyncBufferRows int64 `json:"sync_buffer_rows,omitempty,string"`
SyncingRows int64 `json:"syncing_rows,omitempty,string"`
}
type SegmentIndex struct {
IndexFieldID int64 `json:"field_id,omitempty"`
IndexID int64 `json:"index_id,omitempty"`
BuildID int64 `json:"build_id,omitempty"`
IndexSize int64 `json:"index_size,omitempty"`
IsLoaded bool `json:"is_loaded,omitempty"`
IndexFieldID int64 `json:"field_id,omitempty,string"`
IndexID int64 `json:"index_id,omitempty,string"`
BuildID int64 `json:"build_id,omitempty,string"`
IndexSize int64 `json:"index_size,omitempty,string"`
IsLoaded bool `json:"is_loaded,omitempty,string"`
}
type QueryCoordTarget struct {
CollectionID int64 `json:"collection_id,omitempty"`
CollectionID int64 `json:"collection_id,omitempty,string"`
Segments []*Segment `json:"segments,omitempty"`
DMChannels []*DmChannel `json:"dm_channels,omitempty"`
}
type QueryCoordTask struct {
TaskName string `json:"task_name,omitempty"`
CollectionID int64 `json:"collection_id,omitempty,string"`
Replica int64 `json:"replica_id,omitempty,string"`
TaskType string `json:"task_type,omitempty"`
TaskStatus string `json:"task_status,omitempty"`
Priority string `json:"priority,omitempty"`
Actions []string `json:"actions,omitempty"`
Step int `json:"step,omitempty"`
Reason string `json:"reason,omitempty"`
}
type LeaderView struct {
LeaderID int64 `json:"leader_id,omitempty"`
CollectionID int64 `json:"collection_id,omitempty"`
LeaderID int64 `json:"leader_id,omitempty,string"`
CollectionID int64 `json:"collection_id,omitempty,string"`
NodeID int64 `json:"node_id,omitempty"`
Channel string `json:"channel,omitempty"`
Version int64 `json:"version,omitempty"`
Version int64 `json:"version,omitempty,string"`
SealedSegments []*Segment `json:"sealed_segments,omitempty"`
GrowingSegments []*Segment `json:"growing_segments,omitempty"`
TargetVersion int64 `json:"target_version,omitempty"`
NumOfGrowingRows int64 `json:"num_of_growing_rows,omitempty"`
TargetVersion int64 `json:"target_version,omitempty,string"`
NumOfGrowingRows int64 `json:"num_of_growing_rows,omitempty,string"`
UnServiceableError string `json:"unserviceable_error,omitempty"`
}
@ -179,8 +189,8 @@ type ResourceGroup struct {
}
type Replica struct {
ID int64 `json:"ID,omitempty"`
CollectionID int64 `json:"collectionID,omitempty"`
ID int64 `json:"ID,omitempty,string"`
CollectionID int64 `json:"collectionID,omitempty,string"`
RWNodes []int64 `json:"rw_nodes,omitempty"`
ResourceGroup string `json:"resource_group,omitempty"`
RONodes []int64 `json:"ro_nodes,omitempty"`
@ -192,8 +202,8 @@ type Channel struct {
Name string `json:"name,omitempty"`
WatchState string `json:"watch_state,omitempty"`
LatestTimeTick string `json:"latest_time_tick,omitempty"` // a time string that indicates the latest time tick of the channel is received
NodeID int64 `json:"node_id,omitempty"`
CollectionID int64 `json:"collection_id,omitempty"`
NodeID int64 `json:"node_id,omitempty,string"`
CollectionID int64 `json:"collection_id,omitempty,string"`
CheckpointTS string `json:"check_point_ts,omitempty"` // a time string, format like "2006-01-02 15:04:05"
}
@ -290,16 +300,28 @@ type DataNodeConfiguration struct {
FlushInsertBufferSize int64 `json:"flush_insert_buffer_size"`
}
type IndexTaskStats struct {
IndexID int64 `json:"index_id,omitempty,string"`
CollectionID int64 `json:"collection_id,omitempty,string"`
SegmentID int64 `json:"segment_id,omitempty,string"`
BuildID int64 `json:"build_id,omitempty,string"`
IndexState string `json:"index_state,omitempty"`
FailReason string `json:"fail_reason,omitempty"`
IndexSize uint64 `json:"index_size,omitempty,string"`
IndexVersion int64 `json:"index_version,omitempty,string"`
CreatedUTCTime string `json:"create_time,omitempty"`
}
type SyncTask struct {
SegmentID int64 `json:"segment_id,omitempty"`
BatchRows int64 `json:"batch_rows,omitempty"`
SegmentLevel string `json:"segment_level,omitempty"`
TSFrom typeutil.Timestamp `json:"ts_from,omitempty"`
TSTo typeutil.Timestamp `json:"ts_to,omitempty"`
DeltaRowCount int64 `json:"delta_row_count,omitempty"`
FlushSize int64 `json:"flush_size,omitempty"`
RunningTime string `json:"running_time,omitempty"`
NodeID int64 `json:"node_id,omitempty"`
SegmentID int64 `json:"segment_id,omitempty,string"`
BatchRows int64 `json:"batch_rows,omitempty,string"`
SegmentLevel string `json:"segment_level,omitempty,string"`
TSFrom string `json:"ts_from,omitempty"`
TSTo string `json:"ts_to,omitempty"`
DeltaRowCount int64 `json:"delta_row_count,omitempty,string"`
FlushSize int64 `json:"flush_size,omitempty,string"`
RunningTime string `json:"running_time,omitempty"`
NodeID int64 `json:"node_id,omitempty,string"`
}
// DataNodeInfos implements ComponentInfos
@ -343,10 +365,10 @@ type DataCoordInfos struct {
}
type ImportTask struct {
JobID int64 `json:"job_id,omitempty"`
TaskID int64 `json:"task_id,omitempty"`
CollectionID int64 `json:"collection_id,omitempty"`
NodeID int64 `json:"node_id,omitempty"`
JobID int64 `json:"job_id,omitempty,string"`
TaskID int64 `json:"task_id,omitempty,string"`
CollectionID int64 `json:"collection_id,omitempty,string"`
NodeID int64 `json:"node_id,omitempty,string"`
State string `json:"state,omitempty"`
Reason string `json:"reason,omitempty"`
TaskType string `json:"task_type,omitempty"`
@ -355,16 +377,16 @@ type ImportTask struct {
}
type CompactionTask struct {
PlanID int64 `json:"plan_id,omitempty"`
CollectionID int64 `json:"collection_id,omitempty"`
Type string `json:"type,omitempty"`
State string `json:"state,omitempty"`
FailReason string `json:"fail_reason,omitempty"`
StartTime int64 `json:"start_time,omitempty"`
EndTime int64 `json:"end_time,omitempty"`
TotalRows int64 `json:"total_rows,omitempty"`
InputSegments []int64 `json:"input_segments,omitempty"`
ResultSegments []int64 `json:"result_segments,omitempty"`
PlanID int64 `json:"plan_id,omitempty"`
CollectionID int64 `json:"collection_id,omitempty"`
Type string `json:"type,omitempty"`
State string `json:"state,omitempty"`
FailReason string `json:"fail_reason,omitempty"`
StartTime string `json:"start_time,omitempty"`
EndTime string `json:"end_time,omitempty"`
TotalRows int64 `json:"total_rows,omitempty,string"`
InputSegments []string `json:"input_segments,omitempty"`
ResultSegments []string `json:"result_segments,omitempty"`
}
// RootCoordConfiguration records the configuration of RootCoord.
@ -377,3 +399,67 @@ type RootCoordInfos struct {
BaseComponentInfos
SystemConfigurations RootCoordConfiguration `json:"system_configurations"`
}
type Collections struct {
CollectionNames []string ` json:"collection_names,omitempty"`
CollectionIDs []string `json:"collection_ids,omitempty"`
CreatedUtcTimestamps []string `json:"created_utc_timestamps,omitempty"`
// Load percentage on querynode when type is InMemory
InMemoryPercentages []int `json:"inMemory_percentages,omitempty"`
// Indicate whether query service is available
QueryServiceAvailable []bool `json:"query_service_available,omitempty"`
}
type PartitionInfo struct {
PartitionName string `json:"partition_name,omitempty"`
PartitionID int64 `json:"partition_id,omitempty,string"`
CreatedUtcTimestamp string `json:"created_utc_timestamp,omitempty"`
}
type Field struct {
FieldID string `json:"field_id,omitempty,string"`
Name string `json:"name,omitempty"`
IsPrimaryKey bool `json:"is_primary_key,omitempty"`
Description string `json:"description,omitempty"`
DataType string `json:"data_type,omitempty"`
TypeParams map[string]string `json:"type_params,omitempty"`
IndexParams map[string]string `json:"index_params,omitempty"`
AutoID bool `json:"auto_id,omitempty"`
ElementType string `json:"element_type,omitempty"`
DefaultValue string `json:"default_value,omitempty"`
IsDynamic bool `json:"is_dynamic,omitempty"`
IsPartitionKey bool `json:"is_partition_key,omitempty"`
IsClusteringKey bool `json:"is_clustering_key,omitempty"`
Nullable bool `json:"nullable,omitempty"`
IsFunctionOutput bool `json:"is_function_output,omitempty"`
}
type Collection struct {
CollectionID string `json:"collection_id,omitempty,string"`
CollectionName string `json:"collection_name,omitempty"`
CreatedTime string `json:"created_time,omitempty"`
ShardsNum int `json:"shards_num,omitempty"`
ConsistencyLevel string `json:"consistency_level,omitempty"`
Aliases []string `json:"aliases,omitempty"`
Properties map[string]string `json:"properties,omitempty"`
DBName string `json:"db_name,omitempty,string"`
NumPartitions int `json:"num_partitions,omitempty,string"`
VirtualChannelNames []string `json:"virtual_channel_names,omitempty"`
PhysicalChannelNames []string `json:"physical_channel_names,omitempty"`
PartitionInfos []*PartitionInfo `json:"partition_infos,omitempty"`
EnableDynamicField bool `json:"enable_dynamic_field,omitempty"`
Fields []*Field `json:"fields,omitempty"`
}
type Database struct {
DBName string `json:"db_name,omitempty"`
DBID int64 `json:"dbID,omitempty"`
CreatedTimestamp string `json:"created_timestamp,omitempty"`
Properties map[string]string `json:"properties,omitempty"`
}
type Databases struct {
Names []string `json:"db_names,omitempty"`
IDs []string `json:"db_ids,omitempty"`
CreatedTimestamps []string `json:"created_timestamps,omitempty"`
}

View File

@ -14,14 +14,18 @@ package metricsinfo
import (
"encoding/json"
"os"
"strconv"
"strings"
"time"
"github.com/samber/lo"
"go.uber.org/zap"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
"github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/pkg/util/funcutil"
"github.com/milvus-io/milvus/pkg/util/typeutil"
)
@ -125,3 +129,61 @@ func NewSlowQueryWithSearchRequest(request *milvuspb.SearchRequest, user string,
Time: time.Now().Format(time.DateTime),
}
}
func NewPartitionInfos(partitions *milvuspb.ShowPartitionsResponse) []*PartitionInfo {
partitionInfos := make([]*PartitionInfo, len(partitions.PartitionNames))
for i := range partitions.PartitionNames {
partitionInfos[i] = &PartitionInfo{
PartitionName: partitions.PartitionNames[i],
PartitionID: partitions.PartitionIDs[i],
CreatedUtcTimestamp: typeutil.TimestampToString(partitions.CreatedUtcTimestamps[i]),
}
}
return partitionInfos
}
func NewFields(fields *schemapb.CollectionSchema) []*Field {
fieldInfos := make([]*Field, len(fields.Fields))
for i, f := range fields.Fields {
fieldInfos[i] = &Field{
FieldID: strconv.FormatInt(f.FieldID, 10),
Name: f.Name,
IsPrimaryKey: f.IsPrimaryKey,
Description: f.Description,
DataType: f.DataType.String(),
TypeParams: funcutil.KeyValuePair2Map(f.TypeParams),
IndexParams: funcutil.KeyValuePair2Map(f.IndexParams),
AutoID: f.AutoID,
ElementType: f.ElementType.String(),
DefaultValue: f.DefaultValue.String(),
IsDynamic: f.IsDynamic,
IsPartitionKey: f.IsPartitionKey,
IsClusteringKey: f.IsClusteringKey,
Nullable: f.Nullable,
IsFunctionOutput: f.IsFunctionOutput,
}
}
return fieldInfos
}
func NewDatabase(resp *milvuspb.DescribeDatabaseResponse) *Database {
return &Database{
DBName: resp.GetDbName(),
DBID: resp.GetDbID(),
CreatedTimestamp: typeutil.TimestampToString(uint64(int64(resp.GetCreatedTimestamp()) / int64(time.Millisecond) / int64(time.Nanosecond))),
Properties: funcutil.KeyValuePair2Map(resp.GetProperties()),
}
}
func NewDatabases(resp *milvuspb.ListDatabasesResponse) *Databases {
createdTimestamps := make([]string, len(resp.GetCreatedTimestamp()))
for i, ts := range resp.GetCreatedTimestamp() {
createdTimestamps[i] = typeutil.TimestampToString(uint64(int64(ts) / int64(time.Millisecond) / int64(time.Nanosecond)))
}
return &Databases{
Names: resp.GetDbNames(),
IDs: lo.Map(resp.GetDbIds(), func(t int64, i int) string { return strconv.FormatInt(t, 10) }),
CreatedTimestamps: createdTimestamps,
}
}

View File

@ -95,3 +95,7 @@ func SubByNow(ts uint64) int64 {
now := time.Now().UnixMilli()
return now - utcT
}
func PhysicalTimeFormat(ts uint64) string {
return PhysicalTime(ts).Format(time.DateTime)
}

View File

@ -47,6 +47,9 @@ func SubTimeByWallClock(after, before time.Time) time.Duration {
}
func TimestampToString(ts uint64) string {
ut := time.Unix(int64(ts), 0)
if ts <= 0 {
return ""
}
ut := time.UnixMilli(int64(ts))
return ut.Format(time.DateTime)
}