mirror of
https://gitee.com/milvus-io/milvus.git
synced 2024-11-29 18:38:44 +08:00
Use the same lint rules with golint in revive (#13324)
Signed-off-by: Xiangyu Wang <xiangyu.wang@zilliz.com>
This commit is contained in:
parent
8476791492
commit
405b3cd932
@ -11,14 +11,6 @@ linters-settings:
|
||||
misspell:
|
||||
locale: US
|
||||
|
||||
revive:
|
||||
ignore-generated-header: true
|
||||
severity: warning
|
||||
confidence: 0.8
|
||||
rules:
|
||||
- name: context-as-argument
|
||||
severity: warning
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
|
@ -35,7 +35,7 @@ const (
|
||||
maxOperationsPerTxn = 128
|
||||
)
|
||||
|
||||
var errUnknownOpType error = errors.New("unknown operation type")
|
||||
var errUnknownOpType = errors.New("unknown operation type")
|
||||
|
||||
// ChannelOpType type alias uses int8 stands for Channel operation type
|
||||
type ChannelOpType int8
|
||||
|
@ -142,7 +142,7 @@ func (m *meta) GetSegmentsChanPart(selector SegmentInfoSelector) []*chanPartSegm
|
||||
func (m *meta) GetNumRowsOfCollection(collectionID UniqueID) int64 {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
var ret int64 = 0
|
||||
var ret int64
|
||||
segments := m.segments.GetSegments()
|
||||
for _, segment := range segments {
|
||||
if isSegmentHealthy(segment) && segment.GetCollectionID() == collectionID {
|
||||
@ -595,7 +595,7 @@ func (m *meta) GetSegmentsIDOfPartition(collectionID, partitionID UniqueID) []Un
|
||||
func (m *meta) GetNumRowsOfPartition(collectionID UniqueID, partitionID UniqueID) int64 {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
var ret int64 = 0
|
||||
var ret int64
|
||||
segments := m.segments.GetSegments()
|
||||
for _, segment := range segments {
|
||||
if isSegmentHealthy(segment) && segment.CollectionID == collectionID && segment.PartitionID == partitionID {
|
||||
|
@ -468,7 +468,7 @@ func TestAllocationPool(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("put nil", func(t *testing.T) {
|
||||
var allo *Allocation = nil
|
||||
var allo *Allocation
|
||||
allocPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &Allocation{}
|
||||
|
@ -1997,7 +1997,7 @@ func TestOptions(t *testing.T) {
|
||||
})
|
||||
t.Run("SetDataNodeCreator", func(t *testing.T) {
|
||||
var target int64
|
||||
var val int64 = rand.Int63()
|
||||
var val = rand.Int63()
|
||||
opt := SetDataNodeCreator(func(context.Context, string) (types.DataNode, error) {
|
||||
target = val
|
||||
return nil, nil
|
||||
|
@ -342,12 +342,12 @@ func TestBytesReader(t *testing.T) {
|
||||
// Bytes Reader is able to recording the position
|
||||
rawDataReader := bytes.NewReader(rawData)
|
||||
|
||||
var fvector []float32 = make([]float32, 2)
|
||||
var fvector = make([]float32, 2)
|
||||
err := binary.Read(rawDataReader, common.Endian, &fvector)
|
||||
assert.Nil(t, err)
|
||||
assert.ElementsMatch(t, fvector, []float32{1, 2})
|
||||
|
||||
var bvector []byte = make([]byte, 4)
|
||||
var bvector = make([]byte, 4)
|
||||
err = binary.Read(rawDataReader, common.Endian, &bvector)
|
||||
assert.Nil(t, err)
|
||||
assert.ElementsMatch(t, bvector, []byte{255, 255, 255, 0})
|
||||
|
@ -503,7 +503,7 @@ func (ibNode *insertBufferNode) bufferInsertMsg(msg *msgstream.InsertMsg, endPos
|
||||
|
||||
fieldData := idata.Data[field.FieldID].(*storage.FloatVectorFieldData)
|
||||
for _, r := range blobReaders {
|
||||
var v []float32 = make([]float32, dim)
|
||||
var v = make([]float32, dim)
|
||||
|
||||
readBinary(r, &v, field.DataType)
|
||||
|
||||
@ -535,7 +535,7 @@ func (ibNode *insertBufferNode) bufferInsertMsg(msg *msgstream.InsertMsg, endPos
|
||||
fieldData := idata.Data[field.FieldID].(*storage.BinaryVectorFieldData)
|
||||
|
||||
for _, r := range blobReaders {
|
||||
var v []byte = make([]byte, dim/8)
|
||||
var v = make([]byte, dim/8)
|
||||
readBinary(r, &v, field.DataType)
|
||||
|
||||
fieldData.Data = append(fieldData.Data, v...)
|
||||
|
@ -168,7 +168,7 @@ func (t *flushTaskRunner) waitFinish(notifyFunc notifyMetaFunc, postFunc taskPos
|
||||
<-t.startSignal
|
||||
|
||||
pack := t.getFlushPack()
|
||||
var postInjection postInjectionFunc = nil
|
||||
var postInjection postInjectionFunc
|
||||
select {
|
||||
case injection := <-t.injectSignal:
|
||||
// notify injected
|
||||
|
@ -170,7 +170,7 @@ func (replica *SegmentReplica) segmentFlushed(segID UniqueID) {
|
||||
}
|
||||
|
||||
func (replica *SegmentReplica) new2NormalSegment(segID UniqueID) {
|
||||
var seg Segment = *replica.newSegments[segID]
|
||||
var seg = *replica.newSegments[segID]
|
||||
|
||||
seg.isNew.Store(false)
|
||||
replica.normalSegments[segID] = &seg
|
||||
@ -179,7 +179,7 @@ func (replica *SegmentReplica) new2NormalSegment(segID UniqueID) {
|
||||
}
|
||||
|
||||
func (replica *SegmentReplica) new2FlushedSegment(segID UniqueID) {
|
||||
var seg Segment = *replica.newSegments[segID]
|
||||
var seg = *replica.newSegments[segID]
|
||||
|
||||
seg.isNew.Store(false)
|
||||
seg.isFlushed.Store(true)
|
||||
@ -191,7 +191,7 @@ func (replica *SegmentReplica) new2FlushedSegment(segID UniqueID) {
|
||||
// normal2FlushedSegment transfers a segment from *normal* to *flushed* by changing *isFlushed*
|
||||
// flag into true, and mv the segment from normalSegments map to flushedSegments map.
|
||||
func (replica *SegmentReplica) normal2FlushedSegment(segID UniqueID) {
|
||||
var seg Segment = *replica.normalSegments[segID]
|
||||
var seg = *replica.normalSegments[segID]
|
||||
|
||||
seg.isFlushed.Store(true)
|
||||
replica.flushedSegments[segID] = &seg
|
||||
|
@ -131,7 +131,7 @@ func (i *IndexCoord) Register() error {
|
||||
|
||||
// Init initializes the IndexCoord component.
|
||||
func (i *IndexCoord) Init() error {
|
||||
var initErr error = nil
|
||||
var initErr error
|
||||
Params.InitOnce()
|
||||
i.initOnce.Do(func() {
|
||||
log.Debug("IndexCoord", zap.Strings("etcd endpoints", Params.EtcdEndpoints))
|
||||
@ -233,7 +233,7 @@ func (i *IndexCoord) Init() error {
|
||||
|
||||
// Start starts the IndexCoord component.
|
||||
func (i *IndexCoord) Start() error {
|
||||
var startErr error = nil
|
||||
var startErr error
|
||||
i.startOnce.Do(func() {
|
||||
i.loopWg.Add(1)
|
||||
go i.tsLoop()
|
||||
@ -532,7 +532,7 @@ func (i *IndexCoord) GetIndexFilePaths(ctx context.Context, req *indexpb.GetInde
|
||||
log.Debug("IndexCoord GetIndexFilePaths", zap.Int64s("IndexBuildIds", req.IndexBuildIDs))
|
||||
sp, _ := trace.StartSpanFromContextWithOperationName(ctx, "IndexCoord-BuildIndex")
|
||||
defer sp.Finish()
|
||||
var indexPaths []*indexpb.IndexFilePathInfo = nil
|
||||
var indexPaths []*indexpb.IndexFilePathInfo
|
||||
|
||||
for _, indexID := range req.IndexBuildIDs {
|
||||
indexPathInfo, err := i.metaTable.GetIndexFilePathInfo(indexID)
|
||||
|
@ -97,7 +97,7 @@ func (pq *PriorityQueue) CheckExist(nodeID UniqueID) bool {
|
||||
}
|
||||
|
||||
func (pq *PriorityQueue) getItemByKey(key UniqueID) interface{} {
|
||||
var ret interface{} = nil
|
||||
var ret interface{}
|
||||
for _, item := range pq.items {
|
||||
if item.key == key {
|
||||
ret = item
|
||||
|
@ -915,7 +915,7 @@ func (ms *MqTtMsgStream) consumeToTtMsg(consumer mqclient.Consumer) {
|
||||
// return true only when all channels reach same timetick
|
||||
func (ms *MqTtMsgStream) allChanReachSameTtMsg(chanTtMsgSync map[mqclient.Consumer]bool) (Timestamp, bool) {
|
||||
tsMap := make(map[Timestamp]int)
|
||||
var maxTime Timestamp = 0
|
||||
var maxTime Timestamp
|
||||
for _, t := range ms.chanTtMsgTime {
|
||||
tsMap[t]++
|
||||
if t > maxTime {
|
||||
|
@ -1147,7 +1147,7 @@ func (cct *createCollectionTask) PreExecute(ctx context.Context) error {
|
||||
}
|
||||
if field.DataType == schemapb.DataType_FloatVector || field.DataType == schemapb.DataType_BinaryVector {
|
||||
exist := false
|
||||
var dim int64 = 0
|
||||
var dim int64
|
||||
for _, param := range field.TypeParams {
|
||||
if param.Key == "dim" {
|
||||
exist = true
|
||||
@ -1798,7 +1798,7 @@ func reduceSearchResultData(searchResultData []*schemapb.SearchResultData, nq in
|
||||
//printSearchResultData(sData, strconv.FormatInt(int64(i), 10))
|
||||
}
|
||||
|
||||
var skipDupCnt int64 = 0
|
||||
var skipDupCnt int64
|
||||
var realTopK int64 = -1
|
||||
for i := int64(0); i < nq; i++ {
|
||||
offsets := make([]int64, len(searchResultData))
|
||||
@ -2283,7 +2283,7 @@ func (qt *queryTask) Execute(ctx context.Context) error {
|
||||
|
||||
func mergeRetrieveResults(retrieveResults []*internalpb.RetrieveResults) (*milvuspb.QueryResults, error) {
|
||||
var ret *milvuspb.QueryResults
|
||||
var skipDupCnt int64 = 0
|
||||
var skipDupCnt int64
|
||||
var idSet = make(map[int64]struct{})
|
||||
|
||||
// merge results and remove duplicates
|
||||
|
@ -115,7 +115,7 @@ func (qc *QueryCoord) Init() error {
|
||||
qc.kvClient = etcdKV
|
||||
return nil
|
||||
}
|
||||
var initError error = nil
|
||||
var initError error
|
||||
qc.initOnce.Do(func() {
|
||||
log.Debug("query coordinator try to connect etcd")
|
||||
initError = retry.Do(qc.loopCtx, connectEtcdFn, retry.Attempts(300))
|
||||
@ -493,7 +493,7 @@ func (qc *QueryCoord) loadBalanceSegmentLoop() {
|
||||
memoryInsufficient := false
|
||||
loadBalanceTasks := make([]*loadBalanceTask, 0)
|
||||
for {
|
||||
var selectedSegmentInfo *querypb.SegmentInfo = nil
|
||||
var selectedSegmentInfo *querypb.SegmentInfo
|
||||
sort.Slice(onlineNodeIDs, func(i, j int) bool {
|
||||
return nodeID2MemUsageRate[onlineNodeIDs[i]] > nodeID2MemUsageRate[onlineNodeIDs[j]]
|
||||
})
|
||||
@ -579,7 +579,7 @@ func chooseSegmentToBalance(sourceNodeID int64, dstNodeID int64,
|
||||
nodeID2MemUsageRate map[int64]float64) (*querypb.SegmentInfo, error) {
|
||||
memoryInsufficient := true
|
||||
minMemDiffPercentage := 1.0
|
||||
var selectedSegmentInfo *querypb.SegmentInfo = nil
|
||||
var selectedSegmentInfo *querypb.SegmentInfo
|
||||
for _, info := range segmentInfos {
|
||||
dstNodeMemUsageAfterBalance := nodeID2MemUsage[dstNodeID] + uint64(info.MemSize)
|
||||
dstNodeMemUsageRateAfterBalance := float64(dstNodeMemUsageAfterBalance) / float64(nodeID2TotalMem[dstNodeID])
|
||||
|
@ -238,7 +238,7 @@ func (scheduler *TaskScheduler) reloadFromKV() error {
|
||||
triggerTasks[taskID].setState(state)
|
||||
}
|
||||
|
||||
var doneTriggerTask task = nil
|
||||
var doneTriggerTask task
|
||||
for _, t := range triggerTasks {
|
||||
if t.getState() == taskDone {
|
||||
doneTriggerTask = t
|
||||
|
@ -1087,7 +1087,7 @@ func (q *queryCollection) search(msg queryMsg) error {
|
||||
}
|
||||
|
||||
numSegment := int64(len(searchResults))
|
||||
var marshaledHits *MarshaledHits = nil
|
||||
var marshaledHits *MarshaledHits
|
||||
err = reduceSearchResultsAndFillData(plan, searchResults, numSegment)
|
||||
sp.LogFields(oplog.String("statistical time", "reduceSearchResults end"))
|
||||
if err != nil {
|
||||
@ -1106,7 +1106,7 @@ func (q *queryCollection) search(msg queryMsg) error {
|
||||
}
|
||||
tr.Record(fmt.Sprintf("reduce result done, msgID = %d", searchMsg.ID()))
|
||||
|
||||
var offset int64 = 0
|
||||
var offset int64
|
||||
for index := range searchRequests {
|
||||
hitBlobSizePeerQuery, err := marshaledHits.hitBlobSizeInGroup(int64(index))
|
||||
if err != nil {
|
||||
@ -1309,7 +1309,7 @@ func (q *queryCollection) retrieve(msg queryMsg) error {
|
||||
|
||||
func mergeRetrieveResults(retrieveResults []*segcorepb.RetrieveResults) (*segcorepb.RetrieveResults, error) {
|
||||
var ret *segcorepb.RetrieveResults
|
||||
var skipDupCnt int64 = 0
|
||||
var skipDupCnt int64
|
||||
var idSet = make(map[int64]struct{})
|
||||
|
||||
// merge results and remove duplicates
|
||||
|
@ -90,7 +90,7 @@ func TestReduce_AllFunc(t *testing.T) {
|
||||
hitsBlob, err := marshaledHits.getHitsBlob()
|
||||
assert.Nil(t, err)
|
||||
|
||||
var offset int64 = 0
|
||||
var offset int64
|
||||
for index := range placeholderGroups {
|
||||
hitBolbSizePeerQuery, err := marshaledHits.hitBlobSizeInGroup(int64(index))
|
||||
assert.Nil(t, err)
|
||||
|
@ -540,7 +540,7 @@ func TestSegment_segmentSearch(t *testing.T) {
|
||||
hitsBlob, err := marshaledHits.getHitsBlob()
|
||||
assert.NoError(t, err)
|
||||
|
||||
var placeHolderOffset int64 = 0
|
||||
var placeHolderOffset int64
|
||||
for index := range placeholderGroups {
|
||||
hitBlobSizePeerQuery, err := marshaledHits.hitBlobSizeInGroup(int64(index))
|
||||
assert.NoError(t, err)
|
||||
|
@ -1052,7 +1052,7 @@ func (mt *MetaTable) GetNotIndexedSegments(collName string, fieldName string, id
|
||||
return nil, fieldSchema, err
|
||||
}
|
||||
|
||||
var dupIdx typeutil.UniqueID = 0
|
||||
var dupIdx typeutil.UniqueID
|
||||
for _, f := range collMeta.FieldIndexes {
|
||||
if info, ok := mt.indexID2Meta[f.IndexID]; ok {
|
||||
if info.IndexName == idxInfo.IndexName {
|
||||
|
@ -923,7 +923,7 @@ func (c *Core) Register() error {
|
||||
|
||||
// Init initialize routine
|
||||
func (c *Core) Init() error {
|
||||
var initError error = nil
|
||||
var initError error
|
||||
if c.kvBaseCreate == nil {
|
||||
c.kvBaseCreate = func(root string) (kv.TxnKV, error) {
|
||||
return etcdkv.NewEtcdKV(Params.EtcdEndpoints, root)
|
||||
|
@ -612,7 +612,7 @@ func TestRootCoord(t *testing.T) {
|
||||
err = core.Init()
|
||||
assert.Nil(t, err)
|
||||
|
||||
var localTSO uint64 = 0
|
||||
var localTSO uint64
|
||||
localTSOLock := sync.RWMutex{}
|
||||
core.TSOAllocator = func(c uint32) (uint64, error) {
|
||||
localTSOLock.Lock()
|
||||
|
@ -95,7 +95,7 @@ func (writer *baseBinlogWriter) Finish() error {
|
||||
return fmt.Errorf("invalid start/end timestamp")
|
||||
}
|
||||
|
||||
var offset int32 = 0
|
||||
var offset int32
|
||||
writer.buffer = new(bytes.Buffer)
|
||||
if err := binary.Write(writer.buffer, common.Endian, int32(MagicNumber)); err != nil {
|
||||
return err
|
||||
|
@ -155,7 +155,7 @@ func buildVectorChunkManager(t *testing.T, localPath string, localCacheEnable bo
|
||||
}
|
||||
|
||||
var Params paramtable.BaseTable
|
||||
var localPath string = "/tmp/milvus/test_data"
|
||||
var localPath = "/tmp/milvus/test_data"
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
Params.Init()
|
||||
|
@ -62,7 +62,7 @@ func ValidateFloatArrayLength(dim int64, length int) error {
|
||||
|
||||
// CalcL2 returns the Euclidean distance of input vectors
|
||||
func CalcL2(dim int64, left []float32, lIndex int64, right []float32, rIndex int64) float32 {
|
||||
var sum float32 = 0.0
|
||||
var sum float32
|
||||
lFrom := lIndex * dim
|
||||
rFrom := rIndex * dim
|
||||
for i := int64(0); i < dim; i++ {
|
||||
@ -75,7 +75,7 @@ func CalcL2(dim int64, left []float32, lIndex int64, right []float32, rIndex int
|
||||
|
||||
// CalcIP returns the inner product distance of input vectors
|
||||
func CalcIP(dim int64, left []float32, lIndex int64, right []float32, rIndex int64) float32 {
|
||||
var sum float32 = 0.0
|
||||
var sum float32
|
||||
lFrom := lIndex * dim
|
||||
rFrom := rIndex * dim
|
||||
for i := int64(0); i < dim; i++ {
|
||||
@ -187,9 +187,9 @@ func CalcHamming(dim int64, left []byte, lIndex int64, right []byte, rIndex int6
|
||||
lFrom := lIndex * numBytes
|
||||
rFrom := rIndex * numBytes
|
||||
|
||||
var hamming int32 = 0
|
||||
var hamming int32
|
||||
for i := int64(0); i < numBytes; i++ {
|
||||
var xor uint8 = left[lFrom+i] ^ right[rFrom+i]
|
||||
var xor = left[lFrom+i] ^ right[rFrom+i]
|
||||
|
||||
// The dimension "dim" may not be an integer multiple of 8
|
||||
// For example:
|
||||
|
@ -62,7 +62,7 @@ func DistanceL2(left, right []float32) float32 {
|
||||
if len(left) != len(right) {
|
||||
panic("array dimension not equal")
|
||||
}
|
||||
var sum float32 = 0.0
|
||||
var sum float32
|
||||
for i := 0; i < len(left); i++ {
|
||||
gap := left[i] - right[i]
|
||||
sum += gap * gap
|
||||
@ -75,7 +75,7 @@ func DistanceIP(left, right []float32) float32 {
|
||||
if len(left) != len(right) {
|
||||
panic("array dimension not equal")
|
||||
}
|
||||
var sum float32 = 0.0
|
||||
var sum float32
|
||||
for i := 0; i < len(left); i++ {
|
||||
sum += left[i] * right[i]
|
||||
}
|
||||
|
@ -60,7 +60,7 @@ func TestInputNode(t *testing.T) {
|
||||
|
||||
func Test_NewInputNode(t *testing.T) {
|
||||
nodeName := "input_node"
|
||||
var maxQueueLength int32 = 0
|
||||
var maxQueueLength int32
|
||||
var maxParallelism int32 = 100
|
||||
node := NewInputNode(nil, nodeName, maxQueueLength, maxParallelism)
|
||||
assert.NotNil(t, node)
|
||||
|
@ -95,7 +95,7 @@ func TestMsgStreamMsg(t *testing.T) {
|
||||
Ctx: context.TODO(),
|
||||
}
|
||||
|
||||
var timestampMin Timestamp = 0
|
||||
var timestampMin Timestamp
|
||||
var timestampMax Timestamp = 100
|
||||
streamMsg := &MsgStreamMsg{
|
||||
tsMessages: messages,
|
||||
|
@ -143,7 +143,7 @@ func GetPulsarConfig(protocol, ip, port, url string, args ...int64) (map[string]
|
||||
}
|
||||
|
||||
var attempt uint = 10
|
||||
var interval time.Duration = time.Second
|
||||
var interval = time.Second
|
||||
if len(args) > 0 && args[0] > 0 {
|
||||
attempt = uint(args[0])
|
||||
}
|
||||
|
@ -17,7 +17,7 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
var r *rand.Rand = nil
|
||||
var r *rand.Rand
|
||||
|
||||
func init() {
|
||||
r = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
@ -144,7 +144,7 @@ func getNowTs(idAllocator allocator.GIDAllocator) (int64, error) {
|
||||
return nowTs.Unix(), err
|
||||
}
|
||||
|
||||
var topicMu sync.Map = sync.Map{}
|
||||
var topicMu = sync.Map{}
|
||||
|
||||
type rocksmq struct {
|
||||
store *gorocksdb.DB
|
||||
|
@ -32,10 +32,10 @@ import (
|
||||
)
|
||||
|
||||
var Params paramtable.BaseTable
|
||||
var rmqPath string = "/tmp/rocksmq"
|
||||
var kvPathSuffix string = "_kv"
|
||||
var dbPathSuffix string = "_db"
|
||||
var metaPathSuffix string = "_meta"
|
||||
var rmqPath = "/tmp/rocksmq"
|
||||
var kvPathSuffix = "_kv"
|
||||
var dbPathSuffix = "_db"
|
||||
var metaPathSuffix = "_meta"
|
||||
|
||||
func InitIDAllocator(kvPath string) *allocator.GlobalIDAllocator {
|
||||
rocksdbKV, err := rocksdbkv.NewRocksdbKV(kvPath)
|
||||
|
@ -135,10 +135,10 @@ func (ri *retentionInfo) Stop() {
|
||||
// 4. delete message by range of page id;
|
||||
func (ri *retentionInfo) expiredCleanUp(topic string) error {
|
||||
log.Debug("Timeticker triggers an expiredCleanUp task for topic: " + topic)
|
||||
var deletedAckedSize int64 = 0
|
||||
var startID UniqueID = 0
|
||||
var pageStartID UniqueID = 0
|
||||
var pageEndID UniqueID = 0
|
||||
var deletedAckedSize int64
|
||||
var startID UniqueID
|
||||
var pageStartID UniqueID
|
||||
var pageEndID UniqueID
|
||||
var err error
|
||||
|
||||
fixedAckedTsKey, _ := constructKey(AckedTsTitle, topic)
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var retentionPath string = "/tmp/rmq_retention/"
|
||||
var retentionPath = "/tmp/rmq_retention/"
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
err := os.MkdirAll(retentionPath, os.ModePerm)
|
||||
|
@ -43,7 +43,7 @@ func TestGetServerIDConcurrently(t *testing.T) {
|
||||
defer etcdKV.RemoveWithPrefix("")
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var muList sync.Mutex = sync.Mutex{}
|
||||
var muList = sync.Mutex{}
|
||||
|
||||
s := NewSession(ctx, metaRoot, etcdEndpoints)
|
||||
res := make([]int64, 0)
|
||||
@ -114,7 +114,7 @@ func TestUpdateSessions(t *testing.T) {
|
||||
defer etcdKV.RemoveWithPrefix("")
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var muList sync.Mutex = sync.Mutex{}
|
||||
var muList = sync.Mutex{}
|
||||
|
||||
s := NewSession(ctx, metaRoot, etcdEndpoints)
|
||||
|
||||
|
@ -21,7 +21,7 @@ import (
|
||||
|
||||
func TestUint64(t *testing.T) {
|
||||
var i int64 = -1
|
||||
var u uint64 = uint64(i)
|
||||
var u = uint64(i)
|
||||
t.Log(i)
|
||||
t.Log(u)
|
||||
}
|
||||
@ -48,7 +48,7 @@ func TestHash32_Uint64(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHash32_String(t *testing.T) {
|
||||
var u string = "ok"
|
||||
var u = "ok"
|
||||
h, err := Hash32String(u)
|
||||
assert.Nil(t, err)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user