mirror of
https://gitee.com/milvus-io/milvus.git
synced 2024-12-02 03:48:37 +08:00
Fix pkg codecov & static-check script (#26720)
Signed-off-by: Congqi Xia <congqi.xia@zilliz.com>
This commit is contained in:
parent
a8e5dc3517
commit
d343888f3c
3
Makefile
3
Makefile
@ -72,6 +72,7 @@ else
|
||||
@GO111MODULE=on env bash $(PWD)/scripts/gofmt.sh internal/
|
||||
@GO111MODULE=on env bash $(PWD)/scripts/gofmt.sh tests/integration/
|
||||
@GO111MODULE=on env bash $(PWD)/scripts/gofmt.sh tests/go/
|
||||
@GO111MODULE=on env bash $(PWD)/scripts/gofmt.sh pkg/
|
||||
endif
|
||||
|
||||
lint: tools/bin/revive
|
||||
@ -85,7 +86,7 @@ static-check: getdeps
|
||||
@source $(PWD)/scripts/setenv.sh && GO111MODULE=on $(INSTALL_PATH)/golangci-lint run --timeout=30m --config ./.golangci.yml ./internal/...
|
||||
@source $(PWD)/scripts/setenv.sh && GO111MODULE=on $(INSTALL_PATH)/golangci-lint run --timeout=30m --config ./.golangci.yml ./cmd/...
|
||||
@source $(PWD)/scripts/setenv.sh && GO111MODULE=on $(INSTALL_PATH)/golangci-lint run --timeout=30m --config ./.golangci.yml ./tests/integration/...
|
||||
@source $(PWD)/scripts/setenv.sh && GO111MODULE=on $(INSTALL_PATH)/golangci-lint run --timeout=30m --config ./.golangci.yml ./...
|
||||
@source $(PWD)/scripts/setenv.sh && cd pkg && GO111MODULE=on $(INSTALL_PATH)/golangci-lint run --timeout=30m --config ../.golangci.yml ./...
|
||||
|
||||
verifiers: build-cpp getdeps cppcheck fmt static-check
|
||||
|
||||
|
@ -18,19 +18,24 @@ package metrics
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestRegisterMetrics(t *testing.T) {
|
||||
r := NewMilvusRegistry()
|
||||
assert.NotPanics(t, func() {
|
||||
r := prometheus.NewRegistry()
|
||||
// Make sure it doesn't panic.
|
||||
RegisterRootCoord(r.GoRegistry)
|
||||
RegisterDataNode(r.GoRegistry)
|
||||
RegisterDataCoord(r.GoRegistry)
|
||||
RegisterIndexNode(r.GoRegistry)
|
||||
RegisterProxy(r.GoRegistry)
|
||||
RegisterQueryNode(r.GoRegistry)
|
||||
RegisterQueryCoord(r.GoRegistry)
|
||||
RegisterMetaMetrics(r.GoRegistry)
|
||||
RegisterStorageMetrics(r.GoRegistry)
|
||||
RegisterMsgStreamMetrics(r.GoRegistry)
|
||||
RegisterRootCoord(r)
|
||||
RegisterDataNode(r)
|
||||
RegisterDataCoord(r)
|
||||
RegisterIndexNode(r)
|
||||
RegisterProxy(r)
|
||||
RegisterQueryNode(r)
|
||||
RegisterQueryCoord(r)
|
||||
RegisterMetaMetrics(r)
|
||||
RegisterStorageMetrics(r)
|
||||
RegisterMsgStreamMetrics(r)
|
||||
})
|
||||
}
|
||||
|
@ -307,25 +307,66 @@ func TestKafkaClient_MsgSerializAndDeserialize(t *testing.T) {
|
||||
assert.Nil(t, msgID)
|
||||
}
|
||||
|
||||
/*
|
||||
func createParamItem(v string) paramtable.ParamItem {
|
||||
item := paramtable.ParamItem{
|
||||
Formatter: func(originValue string) string { return v },
|
||||
}
|
||||
item.Init(&config.Manager{})
|
||||
return item
|
||||
}*/
|
||||
|
||||
func initParamItem(item *paramtable.ParamItem, v string) {
|
||||
item.Formatter = func(originValue string) string { return v }
|
||||
item.Init(&config.Manager{})
|
||||
}
|
||||
|
||||
type kafkaCfgOption func(cfg *paramtable.KafkaConfig)
|
||||
|
||||
func withAddr(v string) kafkaCfgOption {
|
||||
return func(cfg *paramtable.KafkaConfig) {
|
||||
initParamItem(&cfg.Address, v)
|
||||
}
|
||||
}
|
||||
|
||||
func withUsername(v string) kafkaCfgOption {
|
||||
return func(cfg *paramtable.KafkaConfig) {
|
||||
initParamItem(&cfg.SaslUsername, v)
|
||||
}
|
||||
}
|
||||
|
||||
func withPasswd(v string) kafkaCfgOption {
|
||||
return func(cfg *paramtable.KafkaConfig) {
|
||||
initParamItem(&cfg.SaslPassword, v)
|
||||
}
|
||||
}
|
||||
|
||||
func withMechanism(v string) kafkaCfgOption {
|
||||
return func(cfg *paramtable.KafkaConfig) {
|
||||
initParamItem(&cfg.SaslMechanisms, v)
|
||||
}
|
||||
}
|
||||
|
||||
func withProtocol(v string) kafkaCfgOption {
|
||||
return func(cfg *paramtable.KafkaConfig) {
|
||||
initParamItem(&cfg.SecurityProtocol, v)
|
||||
}
|
||||
}
|
||||
|
||||
func createKafkaConfig(opts ...kafkaCfgOption) *paramtable.KafkaConfig {
|
||||
cfg := ¶mtable.KafkaConfig{}
|
||||
for _, opt := range opts {
|
||||
opt(cfg)
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
func TestKafkaClient_NewKafkaClientInstanceWithConfig(t *testing.T) {
|
||||
config1 := ¶mtable.KafkaConfig{
|
||||
Address: createParamItem("addr"),
|
||||
SaslPassword: createParamItem("password"),
|
||||
}
|
||||
config1 := createKafkaConfig(withAddr("addr"), withPasswd("password"))
|
||||
|
||||
assert.Panics(t, func() { NewKafkaClientInstanceWithConfig(config1) })
|
||||
|
||||
config2 := ¶mtable.KafkaConfig{
|
||||
Address: createParamItem("addr"),
|
||||
SaslUsername: createParamItem("username"),
|
||||
}
|
||||
config2 := createKafkaConfig(withAddr("addr"), withUsername("username"))
|
||||
assert.Panics(t, func() { NewKafkaClientInstanceWithConfig(config2) })
|
||||
|
||||
producerConfig := make(map[string]string)
|
||||
@ -333,15 +374,10 @@ func TestKafkaClient_NewKafkaClientInstanceWithConfig(t *testing.T) {
|
||||
consumerConfig := make(map[string]string)
|
||||
consumerConfig["client.id"] = "dc"
|
||||
|
||||
config := ¶mtable.KafkaConfig{
|
||||
Address: createParamItem("addr"),
|
||||
SaslUsername: createParamItem("username"),
|
||||
SaslPassword: createParamItem("password"),
|
||||
SaslMechanisms: createParamItem("sasl"),
|
||||
SecurityProtocol: createParamItem("plain"),
|
||||
ConsumerExtraConfig: paramtable.ParamGroup{GetFunc: func() map[string]string { return consumerConfig }},
|
||||
ProducerExtraConfig: paramtable.ParamGroup{GetFunc: func() map[string]string { return producerConfig }},
|
||||
}
|
||||
config := createKafkaConfig(withAddr("addr"), withUsername("username"), withPasswd("password"), withMechanism("sasl"), withProtocol("plain"))
|
||||
config.ConsumerExtraConfig = paramtable.ParamGroup{GetFunc: func() map[string]string { return consumerConfig }}
|
||||
config.ProducerExtraConfig = paramtable.ParamGroup{GetFunc: func() map[string]string { return producerConfig }}
|
||||
|
||||
client := NewKafkaClientInstanceWithConfig(config)
|
||||
assert.NotNil(t, client)
|
||||
assert.NotNil(t, client.basicConfig)
|
||||
|
@ -234,30 +234,30 @@ func TestComponentParam(t *testing.T) {
|
||||
// })
|
||||
|
||||
t.Run("test queryCoordConfig", func(t *testing.T) {
|
||||
Params := params.QueryCoordCfg
|
||||
Params := ¶ms.QueryCoordCfg
|
||||
assert.Equal(t, Params.EnableActiveStandby.GetAsBool(), false)
|
||||
t.Logf("queryCoord EnableActiveStandby = %t", Params.EnableActiveStandby.GetAsBool())
|
||||
|
||||
params.Save("queryCoord.NextTargetSurviveTime", "100")
|
||||
NextTargetSurviveTime := Params.NextTargetSurviveTime
|
||||
NextTargetSurviveTime := &Params.NextTargetSurviveTime
|
||||
assert.Equal(t, int64(100), NextTargetSurviveTime.GetAsInt64())
|
||||
|
||||
params.Save("queryCoord.UpdateNextTargetInterval", "100")
|
||||
UpdateNextTargetInterval := Params.UpdateNextTargetInterval
|
||||
UpdateNextTargetInterval := &Params.UpdateNextTargetInterval
|
||||
assert.Equal(t, int64(100), UpdateNextTargetInterval.GetAsInt64())
|
||||
|
||||
params.Save("queryCoord.checkNodeInReplicaInterval", "100")
|
||||
checkNodeInReplicaInterval := Params.CheckNodeInReplicaInterval
|
||||
checkNodeInReplicaInterval := &Params.CheckNodeInReplicaInterval
|
||||
assert.Equal(t, 100, checkNodeInReplicaInterval.GetAsInt())
|
||||
|
||||
params.Save("queryCoord.checkResourceGroupInterval", "10")
|
||||
checkResourceGroupInterval := Params.CheckResourceGroupInterval
|
||||
checkResourceGroupInterval := &Params.CheckResourceGroupInterval
|
||||
assert.Equal(t, 10, checkResourceGroupInterval.GetAsInt())
|
||||
|
||||
enableResourceGroupAutoRecover := Params.EnableRGAutoRecover
|
||||
enableResourceGroupAutoRecover := &Params.EnableRGAutoRecover
|
||||
assert.Equal(t, true, enableResourceGroupAutoRecover.GetAsBool())
|
||||
params.Save("queryCoord.enableRGAutoRecover", "false")
|
||||
enableResourceGroupAutoRecover = Params.EnableRGAutoRecover
|
||||
enableResourceGroupAutoRecover = &Params.EnableRGAutoRecover
|
||||
assert.Equal(t, false, enableResourceGroupAutoRecover.GetAsBool())
|
||||
|
||||
checkHealthInterval := Params.CheckHealthInterval.GetAsInt()
|
||||
@ -284,7 +284,7 @@ func TestComponentParam(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("test queryNodeConfig", func(t *testing.T) {
|
||||
Params := params.QueryNodeCfg
|
||||
Params := ¶ms.QueryNodeCfg
|
||||
|
||||
interval := Params.StatsPublishInterval.GetAsInt()
|
||||
assert.Equal(t, 1000, interval)
|
||||
@ -338,14 +338,14 @@ func TestComponentParam(t *testing.T) {
|
||||
assert.Equal(t, int64(1024), chunkRows)
|
||||
|
||||
params.Save("queryNode.gracefulStopTimeout", "100")
|
||||
gracefulStopTimeout := Params.GracefulStopTimeout
|
||||
gracefulStopTimeout := &Params.GracefulStopTimeout
|
||||
assert.Equal(t, int64(100), gracefulStopTimeout.GetAsInt64())
|
||||
|
||||
assert.Equal(t, false, Params.EnableWorkerSQCostMetrics.GetAsBool())
|
||||
})
|
||||
|
||||
t.Run("test dataCoordConfig", func(t *testing.T) {
|
||||
Params := params.DataCoordCfg
|
||||
Params := ¶ms.DataCoordCfg
|
||||
assert.Equal(t, 24*60*60*time.Second, Params.SegmentMaxLifetime.GetAsDuration(time.Second))
|
||||
assert.True(t, Params.EnableGarbageCollection.GetAsBool())
|
||||
assert.Equal(t, Params.EnableActiveStandby.GetAsBool(), false)
|
||||
@ -353,7 +353,7 @@ func TestComponentParam(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("test dataNodeConfig", func(t *testing.T) {
|
||||
Params := params.DataNodeCfg
|
||||
Params := ¶ms.DataNodeCfg
|
||||
|
||||
SetNodeID(2)
|
||||
|
||||
@ -372,23 +372,23 @@ func TestComponentParam(t *testing.T) {
|
||||
size := Params.FlushInsertBufferSize.GetAsInt()
|
||||
t.Logf("FlushInsertBufferSize: %d", size)
|
||||
|
||||
period := Params.SyncPeriod
|
||||
period := &Params.SyncPeriod
|
||||
t.Logf("SyncPeriod: %v", period)
|
||||
assert.Equal(t, 10*time.Minute, Params.SyncPeriod.GetAsDuration(time.Second))
|
||||
|
||||
bulkinsertTimeout := Params.BulkInsertTimeoutSeconds
|
||||
bulkinsertTimeout := &Params.BulkInsertTimeoutSeconds
|
||||
t.Logf("BulkInsertTimeoutSeconds: %v", bulkinsertTimeout)
|
||||
assert.Equal(t, "18000", Params.BulkInsertTimeoutSeconds.GetValue())
|
||||
})
|
||||
|
||||
t.Run("test indexNodeConfig", func(t *testing.T) {
|
||||
Params := params.IndexNodeCfg
|
||||
Params := ¶ms.IndexNodeCfg
|
||||
params.Save(Params.GracefulStopTimeout.Key, "50")
|
||||
assert.Equal(t, Params.GracefulStopTimeout.GetAsInt64(), int64(50))
|
||||
})
|
||||
|
||||
t.Run("channel config priority", func(t *testing.T) {
|
||||
Params := params.CommonCfg
|
||||
Params := ¶ms.CommonCfg
|
||||
params.Save(Params.RootCoordDml.Key, "dml1")
|
||||
params.Save(Params.RootCoordDml.FallbackKeys[0], "dml2")
|
||||
|
||||
|
@ -50,7 +50,7 @@ func TestGrpcServerParams(t *testing.T) {
|
||||
base.Save("grpc.serverMaxRecvSize", "a")
|
||||
assert.Equal(t, serverConfig.ServerMaxSendSize.GetAsInt(), DefaultServerMaxRecvSize)
|
||||
|
||||
assert.NotZero(t, serverConfig.ServerMaxSendSize)
|
||||
assert.NotZero(t, serverConfig.ServerMaxSendSize.GetAsInt())
|
||||
t.Logf("ServerMaxSendSize = %d", serverConfig.ServerMaxSendSize.GetAsInt())
|
||||
|
||||
base.Remove(role + ".grpc.serverMaxSendSize")
|
||||
|
@ -9,8 +9,8 @@ import (
|
||||
func TestHTTPConfig_Init(t *testing.T) {
|
||||
params := ComponentParam{}
|
||||
params.Init()
|
||||
cf := params.HTTPCfg
|
||||
assert.Equal(t, cf.Enabled.GetAsBool(), true)
|
||||
assert.Equal(t, cf.DebugMode.GetAsBool(), false)
|
||||
assert.Equal(t, cf.Port.GetValue(), "")
|
||||
cfg := ¶ms.HTTPCfg
|
||||
assert.Equal(t, cfg.Enabled.GetAsBool(), true)
|
||||
assert.Equal(t, cfg.DebugMode.GetAsBool(), false)
|
||||
assert.Equal(t, cfg.Port.GetValue(), "")
|
||||
}
|
||||
|
@ -37,13 +37,15 @@ for d in $(go list ./internal/... | grep -v -e vendor -e kafka -e planparserv2/g
|
||||
rm profile.out
|
||||
fi
|
||||
done
|
||||
for d in $(go list ./pkg/... | grep -v -e vendor -e kafka -e planparserv2/generated -e mocks); do
|
||||
pushd pkg
|
||||
for d in $(go list ./... | grep -v -e vendor -e kafka -e planparserv2/generated -e mocks); do
|
||||
go test -race -tags dynamic -v -coverpkg=./... -coverprofile=profile.out -covermode=atomic "$d"
|
||||
if [ -f profile.out ]; then
|
||||
grep -v kafka profile.out | grep -v planparserv2/generated | grep -v mocks | sed '1d' >> ${FILE_COVERAGE_INFO}
|
||||
grep -v kafka profile.out | grep -v planparserv2/generated | grep -v mocks | sed '1d' >> ../${FILE_COVERAGE_INFO}
|
||||
rm profile.out
|
||||
fi
|
||||
done
|
||||
popd
|
||||
endTime=`date +%s`
|
||||
|
||||
echo "Total time for go unittest:" $(($endTime-$beginTime)) "s"
|
||||
|
Loading…
Reference in New Issue
Block a user