Fix misuse of timerecord (#15868)

Signed-off-by: cai.zhang <cai.zhang@zilliz.com>
This commit is contained in:
cai.zhang 2022-03-04 15:45:56 +08:00 committed by GitHub
parent 272a347ddd
commit 2512dff261
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 17 additions and 12 deletions

View File

@ -477,8 +477,9 @@ func (it *IndexBuildTask) buildIndex(ctx context.Context) ([]*storage.Blob, erro
if err != nil {
return nil, err
}
it.tr.Record("index codec serialize done")
metrics.IndexNodeEncodeIndexFileLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.NodeID, 10), strconv.FormatInt(it.segmentID, 10)).Observe(float64(it.tr.RecordSpan()))
encodeIndexFileDur := it.tr.Record("index codec serialize done")
metrics.IndexNodeEncodeIndexFileLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.NodeID, 10),
strconv.FormatInt(it.segmentID, 10)).Observe(float64(encodeIndexFileDur.Milliseconds()))
return serializedIndexBlobs, nil
}
@ -591,8 +592,9 @@ func (it *IndexBuildTask) Execute(ctx context.Context) error {
it.SetState(TaskStateRetry)
return err
}
metrics.IndexNodeSaveIndexFileLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.NodeID, 10), strconv.FormatInt(it.segmentID, 10)).Observe(float64(it.tr.RecordSpan()))
it.tr.Record("index file save done")
saveIndexFileDur := it.tr.Record("index file save done")
metrics.IndexNodeSaveIndexFileLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.NodeID, 10),
strconv.FormatInt(it.segmentID, 10)).Observe(float64(saveIndexFileDur.Milliseconds()))
it.tr.Elapse("index building all done")
log.Info("IndexNode CreateIndex successfully ", zap.Int64("collect", it.collectionID),
zap.Int64("partition", it.partitionID), zap.Int64("segment", it.segmentID))

View File

@ -22,6 +22,8 @@ import (
"sync"
"time"
"github.com/milvus-io/milvus/internal/util/timerecord"
"github.com/milvus-io/milvus/internal/metrics"
"go.uber.org/zap"
@ -93,13 +95,13 @@ func (ticker *channelsTimeTickerImpl) initCurrents(current Timestamp) {
}
func (ticker *channelsTimeTickerImpl) tick() error {
applyStart := time.Now()
tr := timerecord.NewTimeRecorder("applyTimestamp")
now, err := ticker.tso.AllocOne()
if err != nil {
log.Warn("Proxy channelsTimeTickerImpl failed to get ts from tso", zap.Error(err))
return err
}
metrics.ProxyApplyTimestampLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10)).Observe(float64(time.Since(applyStart).Milliseconds()))
metrics.ProxyApplyTimestampLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
stats, err := ticker.getStatisticsFunc()
if err != nil {

View File

@ -2535,10 +2535,11 @@ func (node *Proxy) Search(ctx context.Context, request *milvuspb.SearchRequest)
strconv.FormatInt(qt.CollectionID, 10), metrics.SearchLabel, metrics.SuccessLabel).Inc()
metrics.ProxySearchVectors.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10),
strconv.FormatInt(qt.CollectionID, 10), metrics.SearchLabel).Set(float64(qt.result.Results.NumQueries))
searchDur := tr.ElapseSpan().Milliseconds()
metrics.ProxySearchLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10),
strconv.FormatInt(qt.CollectionID, 10), metrics.SearchLabel).Observe(float64(tr.ElapseSpan().Milliseconds()))
strconv.FormatInt(qt.CollectionID, 10), metrics.SearchLabel).Observe(float64(searchDur))
metrics.ProxySearchLatencyPerNQ.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10),
strconv.FormatInt(qt.CollectionID, 10)).Observe(float64(tr.ElapseSpan().Milliseconds()) / float64(qt.result.Results.NumQueries))
strconv.FormatInt(qt.CollectionID, 10)).Observe(float64(searchDur) / float64(qt.result.Results.NumQueries))
return qt.result, nil
}

View File

@ -832,8 +832,8 @@ func (it *insertTask) Execute(ctx context.Context) error {
it.result.Status.Reason = err.Error()
return err
}
metrics.ProxySendInsertReqLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), collectionName).Observe(float64(tr.RecordSpan().Milliseconds()))
tr.Record("send insert request to message stream")
sendMsgDur := tr.Record("send insert request to message stream")
metrics.ProxySendInsertReqLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), collectionName).Observe(float64(sendMsgDur.Milliseconds()))
return nil
}
@ -1488,14 +1488,14 @@ func (st *searchTask) Execute(ctx context.Context) error {
if err != nil {
log.Debug("proxy", zap.String("send search request failed", err.Error()))
}
metrics.ProxySendMessageLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), collectionName, metrics.SearchLabel).Observe(float64(tr.RecordSpan().Milliseconds()))
st.tr.Record("send message done")
log.Debug("proxy sent one searchMsg",
zap.Int64("collectionID", st.CollectionID),
zap.Int64("msgID", tsMsg.ID()),
zap.Int("length of search msg", len(msgPack.Msgs)),
zap.Uint64("timeoutTs", st.SearchRequest.TimeoutTimestamp))
tr.Record("send search msg to message stream")
sendMsgDur := tr.Record("send search msg to message stream")
metrics.ProxySendMessageLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), collectionName, metrics.SearchLabel).Observe(float64(sendMsgDur.Milliseconds()))
return err
}