Merge branch 'V5.1' into multi

This commit is contained in:
黄润豪 2019-04-11 22:20:34 +08:00 committed by GitHub
commit 189d1e0eb8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
68 changed files with 4315 additions and 216 deletions

View File

@ -96,12 +96,9 @@ func (t *TaskManager) Do() {
time.Sleep(time.Second * 2)
continue
}
logrus.Debugf("Receive a task: %s", data.String())
err = t.exec.AddTask(data)
if err != nil {
logrus.Error("add task error:", err.Error())
//TODO:
//速率控制
}
}
}

View File

@ -23,8 +23,9 @@ import (
"fmt"
"runtime/debug"
"github.com/Sirupsen/logrus"
"github.com/ghodss/yaml"
"github.com/Sirupsen/logrus"
"github.com/goodrain/rainbond/builder/parser"
"github.com/goodrain/rainbond/event"
"github.com/goodrain/rainbond/mq/api/grpc/pb"
@ -104,14 +105,17 @@ func (e *exectorManager) serviceCheck(task *pb.TaskMessage) {
case "docker-run":
pr = parser.CreateDockerRunOrImageParse(input.Username, input.Password, input.SourceBody, e.DockerClient, logger)
case "docker-compose":
logrus.Debugf("source body is \n%v", input.SourceBody)
y, err := yaml.JSONToYAML([]byte(input.SourceBody))
if err != nil {
logrus.Errorf("json bytes format is error, %s", input.SourceBody)
logger.Error("dockercompose文件格式不正确。", map[string]string{"step": "callback", "status": "failure"})
return
var yamlbody = input.SourceBody
if input.SourceBody[0] == '{' {
yamlbyte, err := yaml.JSONToYAML([]byte(input.SourceBody))
if err != nil {
logrus.Errorf("json bytes format is error, %s", input.SourceBody)
logger.Error("The dockercompose file is not in the correct format", map[string]string{"step": "callback", "status": "failure"})
return
}
yamlbody = string(yamlbyte)
}
pr = parser.CreateDockerComposeParse(string(y), e.DockerClient, logger)
pr = parser.CreateDockerComposeParse(yamlbody, e.DockerClient, input.Username, input.Password, logger)
case "sourcecode":
pr = parser.CreateSourceCodeParse(input.SourceBody, logger)
case "third-party-service":

View File

@ -22,6 +22,8 @@ import (
"fmt"
"strings"
"github.com/goodrain/rainbond/builder/sources"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/client"
"github.com/goodrain/rainbond/builder/parser/compose"
@ -33,22 +35,27 @@ import (
//DockerComposeParse docker compose 文件解析
type DockerComposeParse struct {
services map[string]*serviceInfoFromDC
services map[string]*ServiceInfoFromDC
errors []ParseError
dockerclient *client.Client
logger event.Logger
source string
user string
password string
}
type serviceInfoFromDC struct {
ports map[int]*types.Port
volumes map[string]*types.Volume
envs map[string]*types.Env
//ServiceInfoFromDC service info from dockercompose
type ServiceInfoFromDC struct {
ports map[int]*Port
volumes map[string]*Volume
envs map[string]*Env
source string
memory int
image Image
args []string
depends []string
imageAlias string
deployType string
}
//GetPorts 获取端口列表
@ -76,12 +83,14 @@ func (d *serviceInfoFromDC) GetEnvs() (envs []types.Env) {
}
//CreateDockerComposeParse create parser
func CreateDockerComposeParse(source string, dockerclient *client.Client, logger event.Logger) Parser {
func CreateDockerComposeParse(source string, dockerclient *client.Client, user, pass string, logger event.Logger) Parser {
return &DockerComposeParse{
source: source,
dockerclient: dockerclient,
logger: logger,
services: make(map[string]*serviceInfoFromDC),
services: make(map[string]*ServiceInfoFromDC),
user: user,
password: pass,
}
}
@ -95,13 +104,13 @@ func (d *DockerComposeParse) Parse() ParseErrorList {
co, err := comp.LoadBytes([][]byte{[]byte(d.source)})
if err != nil {
logrus.Warning("parse compose file error,", err.Error())
d.logger.Error(fmt.Sprintf("解析ComposeFile失败 %s", err.Error()), map[string]string{"step": "compose-parse"})
d.errappend(ErrorAndSolve(FatalError, fmt.Sprintf("ComposeFile解析错误"), SolveAdvice("modify_compose", "请确认ComposeFile输入是否语法正确")))
return d.errors
}
for kev, sc := range co.ServiceConfigs {
logrus.Debugf("service config is %v, container name is %s", sc, sc.ContainerName)
ports := make(map[int]*types.Port)
for _, p := range sc.Port {
pro := string(p.Protocol)
if pro != "udp" {
@ -114,9 +123,19 @@ func (d *DockerComposeParse) Parse() ParseErrorList {
}
volumes := make(map[string]*types.Volume)
for _, v := range sc.Volumes {
volumes[v.MountPath] = &types.Volume{
VolumePath: v.MountPath,
VolumeType: model.ShareFileVolumeType.String(),
if strings.Contains(v.MountPath, ":") {
infos := strings.Split(v.MountPath, ":")
if len(infos) > 1 {
volumes[v.MountPath] = &types.Volume{
VolumePath: infos[1],
VolumeType: model.ShareFileVolumeType.String(),
}
}
} else {
volumes[v.MountPath] = &types.Volume{
VolumePath: v.MountPath,
VolumeType: model.ShareFileVolumeType.String(),
}
}
}
envs := make(map[string]*types.Env)
@ -126,12 +145,12 @@ func (d *DockerComposeParse) Parse() ParseErrorList {
Value: e.Value,
}
}
service := serviceInfoFromDC{
service := ServiceInfoFromDC{
ports: ports,
volumes: volumes,
envs: envs,
memory: int(sc.MemLimit / 1024 / 1024),
image: parseImageName(sc.Image),
image: ParseImageName(sc.Image),
args: sc.Args,
depends: sc.Links,
imageAlias: sc.ContainerName,
@ -139,6 +158,7 @@ func (d *DockerComposeParse) Parse() ParseErrorList {
if sc.DependsON != nil {
service.depends = sc.DependsON
}
service.deployType = DetermineDeployType(service.image)
d.services[kev] = &service
}
for serviceName, service := range d.services {
@ -152,15 +172,10 @@ func (d *DockerComposeParse) Parse() ParseErrorList {
return d.errors
}
}
//获取镜像,验证是否存在
imageInspect, err := sources.ImagePull(d.dockerclient, service.image.String(), "", "", d.logger, 10)
//do not pull image, but check image exist
exist, err := sources.ImageExist(service.image.String(), d.user, d.password)
if err != nil {
if strings.Contains(err.Error(), "No such image") {
d.errappend(ErrorAndSolve(FatalError, fmt.Sprintf("镜像(%s)不存在", service.image.String()), SolveAdvice("modify_compose", "请确认ComposeFile输入镜像名是否正确")))
} else {
d.errappend(ErrorAndSolve(FatalError, fmt.Sprintf("镜像(%s)获取失败", service.image.String()), SolveAdvice("modify_compose", "请确认ComposeFile输入镜像可以正常获取")))
}
return d.errors
logrus.Errorf("check image exist failure %s", err.Error())
}
if imageInspect != nil && imageInspect.ContainerConfig != nil {
for _, env := range imageInspect.ContainerConfig.Env {
@ -188,6 +203,8 @@ func (d *DockerComposeParse) Parse() ParseErrorList {
service.ports[port] = &types.Port{Protocol: proto, ContainerPort: port}
}
}
if !exist {
d.errappend(ErrorAndSolve(FatalError, fmt.Sprintf("服务%s镜像%s不存在", serviceName, service.image.String()), SolveAdvice("modify_compose", fmt.Sprintf("请确认ComposeFile中%s服务的依赖服务是否正确", serviceName))))
}
}
return d.errors
@ -202,18 +219,19 @@ func (d *DockerComposeParse) GetServiceInfo() []ServiceInfo {
var sis []ServiceInfo
for _, service := range d.services {
si := ServiceInfo{
Ports: service.GetPorts(),
Envs: service.GetEnvs(),
Volumes: service.GetVolumes(),
Image: service.image,
Args: service.args,
DependServices: service.depends,
ImageAlias: service.imageAlias,
Ports: service.GetPorts(),
Envs: service.GetEnvs(),
Volumes: service.GetVolumes(),
Image: service.image,
Args: service.args,
DependServices: service.depends,
ImageAlias: service.imageAlias,
ServiceDeployType: service.deployType,
}
if service.memory != 0 {
si.Memory = service.memory
} else {
si.Memory = 128
si.Memory = 512
}
sis = append(sis, si)
}

View File

@ -22,8 +22,9 @@ import (
"fmt"
"testing"
"github.com/goodrain/rainbond/event"
"github.com/Sirupsen/logrus"
//"github.com/docker/docker/client"
"github.com/docker/docker/client"
"github.com/ghodss/yaml"
)
@ -188,98 +189,53 @@ services:
`
var dockercompose3 = `
version: "3"
version: '3'
services:
redis:
image: redis:alpine
ports:
- "6379"
networks:
- frontend
deploy:
replicas: 2
update_config:
parallelism: 2
delay: 10s
restart_policy:
condition: on-failure
image: redis
restart: always
db:
image: postgres:9.4
mongo:
image: mongo
restart: always
ports:
- "27017:27017"
volumes:
- db-data:/var/lib/postgresql/data
networks:
- backend
deploy:
placement:
constraints: [node.role == manager]
- ~/.container/data/mongo/db:/data/db
- ~/.container/data/mongo/configdb:/data/configdb
environment:
- MONGO_INITDB_ROOT_USERNAME=$MONGO_INITDB_ROOT_USERNAME
- MONGO_INITDB_ROOT_PASSWORD=$MONGO_INITDB_ROOT_PASSWORD
vote:
image: dockersamples/examplevotingapp_vote:before
ports:
- 5000:80
networks:
- frontend
treasure-island:
depends_on:
- mongo
- redis
deploy:
replicas: 2
update_config:
parallelism: 2
restart_policy:
condition: on-failure
result:
image: dockersamples/examplevotingapp_result:before
image: di94sh/treasure-island
restart: always
ports:
- 5001:80
networks:
- backend
- "4000:4000"
links:
- "mongo"
- "redis"
environment:
- MONGO_INITDB_ROOT_USERNAME=$MONGO_INITDB_ROOT_USERNAME
- MONGO_INITDB_ROOT_PASSWORD=$MONGO_INITDB_ROOT_PASSWORD
celery:
depends_on:
- db
deploy:
replicas: 1
update_config:
parallelism: 2
delay: 10s
restart_policy:
condition: on-failure
worker:
image: dockersamples/examplevotingapp_worker
networks:
- frontend
- backend
deploy:
mode: replicated
replicas: 1
labels: [APP=VOTING]
restart_policy:
condition: on-failure
delay: 10s
max_attempts: 3
window: 120s
placement:
constraints: [node.role == manager]
visualizer:
image: dockersamples/visualizer:stable
ports:
- "8080:8080"
stop_grace_period: 1m30s
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
deploy:
placement:
constraints: [node.role == manager]
networks:
frontend:
backend:
volumes:
db-data:
- mongo
- redis
image: di94sh/treasure-island
restart: always
links:
- "mongo"
- "redis"
environment:
- MONGO_INITDB_ROOT_USERNAME=$MONGO_INITDB_ROOT_USERNAME
- MONGO_INITDB_ROOT_PASSWORD=$MONGO_INITDB_ROOT_PASSWORD
command: celery -B -A app.tasks worker
`
var dockercompose20 = `
@ -318,7 +274,20 @@ func TestDockerComposeParse(t *testing.T) {
fmt.Printf("yaml error, %v", err.Error())
}
fmt.Printf("yaml is %s", string(y))
p := CreateDockerComposeParse(string(y), dockerclient, nil)
p := CreateDockerComposeParse(string(y), dockerclient, "", "", nil)
if err := p.Parse(); err != nil {
logrus.Errorf(err.Error())
return
}
fmt.Printf("ServiceInfo:%+v \n", p.GetServiceInfo())
}
func TestDockerCompose30Parse(t *testing.T) {
dockerclient, err := client.NewEnvClient()
if err != nil {
t.Fatal(err)
}
p := CreateDockerComposeParse(dockercompose3, dockerclient, "", "", event.GetTestLogger())
if err := p.Parse(); err != nil {
logrus.Errorf(err.Error())
return

View File

@ -38,6 +38,7 @@ type DockerRunOrImageParse struct {
volumes map[string]*types.Volume
envs map[string]*types.Env
source string
deployType string
memory int
image Image
args []string
@ -89,7 +90,7 @@ func (d *DockerRunOrImageParse) Parse() ParseErrorList {
d.errappend(ErrorAndSolve(FatalError, fmt.Sprintf("镜像名称(%s)不合法", d.image.String()), SolveAdvice("modify_image", "请确认输入镜像名是否正确")))
return d.errors
}
d.image = parseImageName(d.source)
d.image = ParseImageName(d.source)
}
//获取镜像,验证是否存在
imageInspect, err := sources.ImagePull(d.dockerclient, d.image.String(), d.user, d.pass, d.logger, 10)
@ -128,6 +129,7 @@ func (d *DockerRunOrImageParse) Parse() ParseErrorList {
}
}
}
d.deployType = DetermineDeployType(d.image)
return d.errors
}
@ -191,7 +193,7 @@ func (d *DockerRunOrImageParse) dockerun(source []string) {
case "memory", "m":
d.memory = readmemory(s)
case "", "d", "i", "t", "it", "P":
d.image = parseImageName(s)
d.image = ParseImageName(s)
if len(source) > i+1 {
d.args = source[i+1:]
}
@ -260,13 +262,14 @@ func (d *DockerRunOrImageParse) GetMemory() int {
//GetServiceInfo 获取service info
func (d *DockerRunOrImageParse) GetServiceInfo() []ServiceInfo {
serviceInfo := ServiceInfo{
Ports: d.GetPorts(),
Envs: d.GetEnvs(),
Volumes: d.GetVolumes(),
Image: d.GetImage(),
Args: d.GetArgs(),
Branchs: d.GetBranchs(),
Memory: d.memory,
Ports: d.GetPorts(),
Envs: d.GetEnvs(),
Volumes: d.GetVolumes(),
Image: d.GetImage(),
Args: d.GetArgs(),
Branchs: d.GetBranchs(),
Memory: d.memory,
ServiceDeployType: d.deployType,
}
if serviceInfo.Memory == 0 {
serviceInfo.Memory = 256

View File

@ -23,6 +23,13 @@ import (
"strconv"
"strings"
"github.com/Sirupsen/logrus"
"github.com/goodrain/rainbond/builder/sources"
"github.com/docker/distribution/reference"
"github.com/goodrain/rainbond/util"
"github.com/goodrain/rainbond/builder/parser/code"
"github.com/goodrain/rainbond/builder/parser/discovery"
"github.com/goodrain/rainbond/builder/parser/types"
@ -94,12 +101,41 @@ func (ps ParseErrorList) IsFatalError() bool {
//Image 镜像
type Image struct {
name reference.Named
Name string `json:"name"`
Tag string `json:"tag"`
}
//String -
func (i Image) String() string {
return fmt.Sprintf("%s:%s", i.Name, i.Tag)
return i.Name
}
//GetTag get tag
func (i Image) GetTag() string {
return i.Tag
}
//GetRepostory get repostory
func (i Image) GetRepostory() string {
return reference.Path(i.name)
}
//GetDomain get image registry domain
func (i Image) GetDomain() string {
domain := reference.Domain(i.name)
if domain == "docker.io" {
domain = "registry-1.docker.io"
}
return domain
}
//GetSimpleName get image name without tag and organizations
func (i Image) GetSimpleName() string {
if strings.Contains(i.GetRepostory(), "/") {
return strings.Split(i.GetRepostory(), "/")[1]
}
return i.GetRepostory()
}
//Parser 解析器
@ -179,6 +215,25 @@ func GetPortProtocol(port int) string {
return "http"
}
var dbImageKey = []string{
"mysql", "mariadb", "mongo", "redis", "tidb",
"zookeeper", "kafka", "mysqldb", "mongodb",
"memcached", "cockroachdb", "cockroach", "etcd",
"postgres", "postgresql", "elasticsearch", "consul",
"percona", "mysql-server", "mysql-cluster",
}
//DetermineDeployType Determine the deployment type
// if image like db image,return stateful type
func DetermineDeployType(imageName Image) string {
for _, key := range dbImageKey {
if strings.ToLower(imageName.GetSimpleName()) == key {
return util.StatefulServiceType
}
}
return util.StatelessServiceType
}
//readmemory
//10m 10
//10g 10*1024
@ -202,16 +257,20 @@ func readmemory(s string) int {
return 128
}
func parseImageName(s string) Image {
index := strings.LastIndex(s, ":")
if index > -1 {
return Image{
Name: s[0:index],
Tag: s[index+1:],
}
//ParseImageName parse image name
func ParseImageName(s string) (i Image) {
ref, err := reference.ParseAnyReference(s)
if err != nil {
logrus.Errorf("parse image failure %s", err.Error())
return i
}
return Image{
Name: s,
Tag: "latest",
name, err := reference.ParseNamed(ref.String())
if err != nil {
logrus.Errorf("parse image failure %s", err.Error())
return i
}
i.name = name
i.Tag = sources.GetTagFromNamedRef(name)
i.Name = name.String()
return
}

View File

@ -19,11 +19,31 @@
package parser
import (
"fmt"
"testing"
)
func TestParseImageName(t *testing.T) {
image := parseImageName("192.168.0.1:9090/asdasd/asdasd:asdad")
fmt.Println(image.Name, image.Tag)
image := ParseImageName("192.168.0.1:9090/asdasd/asdasd:asdad")
t.Logf("string %s", image.String())
t.Logf("domain %s", image.GetDomain())
t.Logf("repostory %s", image.GetRepostory())
t.Logf("name %s", image.GetSimpleName())
t.Logf("tag %s", image.GetTag())
image2 := ParseImageName("192.168.0.1/asdasd/name")
t.Logf("string %s", image2.String())
t.Logf("domain %s", image2.GetDomain())
t.Logf("repostory %s", image2.GetRepostory())
t.Logf("name %s", image2.GetSimpleName())
t.Logf("tag %s", image2.GetTag())
image3 := ParseImageName("barnett/name:tag")
t.Logf("string %s", image3.String())
t.Logf("domain %s", image3.GetDomain())
t.Logf("repostory %s", image3.GetRepostory())
t.Logf("name %s", image3.GetSimpleName())
t.Logf("tag %s", image3.GetTag())
}
func TestDetermineDeployType(t *testing.T) {
t.Log(DetermineDeployType(ParseImageName("barnett/zookeeper:3.2")))
t.Log(DetermineDeployType(ParseImageName("elcolio/etcd:2.0.10")))
t.Log(DetermineDeployType(ParseImageName("phpmyadmin")))
}

View File

@ -24,6 +24,8 @@ import (
"strconv"
"strings"
"github.com/goodrain/rainbond/util"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/client"
"github.com/goodrain/rainbond/builder"
@ -53,9 +55,11 @@ type SourceCodeParse struct {
dockerclient *client.Client
logger event.Logger
Lang code.Lang
Runtime bool `json:"runtime"`
Dependencies bool `json:"dependencies"`
Procfile bool `json:"procfile"`
isMulti bool
services []*types.Service
}
@ -68,7 +72,7 @@ func CreateSourceCodeParse(source string, logger event.Logger) Parser {
volumes: make(map[string]*types.Volume),
envs: make(map[string]*types.Env),
logger: logger,
image: parseImageName(builder.RUNNERIMAGENAME),
image: ParseImageName(builder.RUNNERIMAGENAME),
args: []string{"start", "web"},
}
}
@ -263,7 +267,6 @@ func (d *SourceCodeParse) Parse() ParseErrorList {
return d.errors
}
}
d.Dependencies = code.CheckDependencies(buildPath, lang)
runtimeInfo, err := code.CheckRuntime(buildPath, lang)
if err != nil && err == code.ErrRuntimeNotSupport {
d.errappend(ErrorAndSolve(FatalError, "代码选择的运行时版本不支持", "请参考文档查看平台各语言支持的Runtime版本"))
@ -452,23 +455,21 @@ func (d *SourceCodeParse) GetLang() code.Lang {
return d.Lang
}
//GetRuntime GetRuntime
func (d *SourceCodeParse) GetRuntime() bool {
return d.Runtime
}
//GetServiceInfo 获取service info
func (d *SourceCodeParse) GetServiceInfo() []ServiceInfo {
serviceInfo := ServiceInfo{
Volumes: d.GetVolumes(),
Image: d.GetImage(),
Args: d.GetArgs(),
Branchs: d.GetBranchs(),
Memory: d.memory,
Lang: d.GetLang(),
Dependencies: d.Dependencies,
Procfile: d.Procfile,
Runtime: d.Runtime,
Dependencies: d.Dependencies,
Procfile: d.Procfile,
Runtime: d.Runtime,
Ports: d.GetPorts(),
Envs: d.GetEnvs(),
Volumes: d.GetVolumes(),
Image: d.GetImage(),
Args: d.GetArgs(),
Branchs: d.GetBranchs(),
Memory: d.memory,
Lang: d.GetLang(),
ServiceDeployType: util.StatelessServiceType,
}
var res []ServiceInfo
if d.isMulti && d.services != nil && len(d.services) > 0 {

View File

@ -35,7 +35,7 @@ func TestParseDockerfileInfo(t *testing.T) {
volumes: make(map[string]*types.Volume),
envs: make(map[string]*types.Env),
logger: nil,
image: parseImageName(builder.RUNNERIMAGENAME),
image: ParseImageName(builder.RUNNERIMAGENAME),
args: []string{"start", "web"},
}
parse.parseDockerfileInfo("./Dockerfile")

View File

@ -0,0 +1,73 @@
// RAINBOND, Application Management Platform
// Copyright (C) 2014-2019 Goodrain Co., Ltd.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version. For any non-GPL usage of Rainbond,
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
// must be obtained first.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package sources
import (
"github.com/goodrain/rainbond/builder/sources/registry"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/reference"
)
//GetTagFromNamedRef get image tag by name
func GetTagFromNamedRef(ref reference.Named) string {
if digested, ok := ref.(reference.Digested); ok {
return digested.Digest().String()
}
ref = reference.TagNameOnly(ref)
if tagged, ok := ref.(reference.Tagged); ok {
return tagged.Tag()
}
return ""
}
//ImageExist check image exist
func ImageExist(imageName, user, password string) (bool, error) {
ref, err := reference.ParseAnyReference(imageName)
if err != nil {
logrus.Errorf("reference image error: %s", err.Error())
return false, err
}
name, err := reference.ParseNamed(ref.String())
if err != nil {
logrus.Errorf("reference parse image name error: %s", err.Error())
return false, err
}
domain := reference.Domain(name)
if domain == "docker.io" {
domain = "registry-1.docker.io"
}
reg, err := registry.New(domain, user, password)
if err != nil {
reg, err = registry.NewInsecure(domain, user, password)
if err != nil {
logrus.Errorf("new registry client failure %s", err.Error())
return false, err
}
}
if err := reg.Ping(); err != nil {
return false, err
}
tag := GetTagFromNamedRef(name)
_, err = reg.ManifestV2(reference.Path(name), tag)
if err != nil {
return false, err
}
return true, nil
}

View File

@ -0,0 +1,41 @@
// RAINBOND, Application Management Platform
// Copyright (C) 2014-2019 Goodrain Co., Ltd.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version. For any non-GPL usage of Rainbond,
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
// must be obtained first.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package sources
import "testing"
func TestPublicImageExist(t *testing.T) {
exist, err := ImageExist("barnett/nextcloud-runtime:0.2", "", "")
if err != nil {
t.Fail()
}
if exist {
t.Log("image exist")
}
}
func TestPrivateImageExist(t *testing.T) {
exist, err := ImageExist("barnett/collabora:190114", "barnett", "5258423Zqg")
if err != nil {
t.Fail()
}
if exist {
t.Log("image exist")
}
}

View File

@ -19,9 +19,14 @@
package dao
import (
"time"
"errors"
"github.com/goodrain/rainbond/db/model"
"time"
)
var (
// VolumeNotFound volume not found error, happens when haven't find any matched data
VolumeNotFound = errors.New("Volume not found.")
)
//Dao 数据持久化层接口

View File

@ -20,6 +20,7 @@ package dao
import (
"fmt"
"github.com/goodrain/rainbond/db/dao"
"os"
"reflect"
"strconv"
@ -974,6 +975,9 @@ func (t *TenantServiceVolumeDaoImpl) GetVolumeByServiceIDAndName(serviceID, name
func (t *TenantServiceVolumeDaoImpl) GetVolumeByID(id int) (*model.TenantServiceVolume, error) {
var volume model.TenantServiceVolume
if err := t.DB.Where("ID=?", id).Find(&volume).Error; err != nil {
if err == gorm.ErrRecordNotFound {
return nil, dao.VolumeNotFound
}
return nil, err
}
return &volume, nil

View File

@ -20,6 +20,7 @@ package store
import (
"errors"
"strconv"
"github.com/goodrain/rainbond/eventlog/db"
"github.com/goodrain/rainbond/eventlog/util"
@ -323,13 +324,17 @@ func (s *storeManager) deleteFile(filename string) error {
if err != nil {
return err
}
if now.After(theTime.Add(7 * time.Hour * 24)) {
saveDay, _ := strconv.Atoi(os.Getenv("DOCKER_LOG_SAVE_DAY"))
if saveDay == 0 {
saveDay = 7
}
if now.After(theTime.Add(time.Duration(saveDay) * time.Hour * 24)) {
if err := os.Remove(filename); err != nil {
if !strings.Contains(err.Error(), "No such file or directory") {
return err
}
}
logrus.Debug("clean service log %s", filename)
logrus.Debugf("clean service log %s", filename)
}
return nil
}

View File

@ -215,9 +215,9 @@ func (o *OrService) persistUpstreams(pools []*v1.Pool, tmpl string, path string,
func getNgxServer(conf *v1.Config) (l7srv []*model.Server, l4srv []*model.Server) {
for _, vs := range conf.L7VS {
server := &model.Server{
Listen: strings.Join(vs.Listening, " "),
ServerName: strings.Replace(vs.ServerName, "tls", "", 1),
ForceSSLRedirect: vs.ForceSSLRedirect,
Listen: strings.Join(vs.Listening, " "),
ServerName: strings.Replace(vs.ServerName, "tls", "", 1),
// ForceSSLRedirect: vs.ForceSSLRedirect,
OptionValue: map[string]string{
"tenant_id": vs.Namespace,
"service_id": vs.ServiceID,
@ -227,9 +227,6 @@ func getNgxServer(conf *v1.Config) (l7srv []*model.Server, l4srv []*model.Server
if vs.SSLCert != nil {
server.SSLCertificate = vs.SSLCert.CertificatePem
server.SSLCertificateKey = vs.SSLCert.CertificatePem
if vs.ForceSSLRedirect {
}
}
for _, loc := range vs.Locations {
location := &model.Location{
@ -240,6 +237,7 @@ func getNgxServer(conf *v1.Config) (l7srv []*model.Server, l4srv []*model.Server
Proxy: loc.Proxy,
Rewrite: loc.Rewrite,
PathRewrite: false,
DisableProxyPass: loc.DisableProxyPass,
}
server.Locations = append(server.Locations, location)
}

View File

@ -34,6 +34,7 @@ import (
"github.com/goodrain/rainbond/cmd/gateway/option"
"github.com/goodrain/rainbond/gateway/annotations"
"github.com/goodrain/rainbond/gateway/annotations/l4"
"github.com/goodrain/rainbond/gateway/annotations/rewrite"
"github.com/goodrain/rainbond/gateway/controller/config"
"github.com/goodrain/rainbond/gateway/defaults"
"github.com/goodrain/rainbond/gateway/util"
@ -537,10 +538,9 @@ func (s *k8sStore) ListVirtualService() (l7vs []*v1.VirtualService, l4vs []*v1.V
vs = l7vsMap[virSrvName]
if vs == nil {
vs = &v1.VirtualService{
Listening: []string{strconv.Itoa(s.conf.ListenPorts.HTTP)},
ServerName: virSrvName,
Locations: []*v1.Location{},
ForceSSLRedirect: anns.Rewrite.ForceSSLRedirect,
Listening: []string{strconv.Itoa(s.conf.ListenPorts.HTTP)},
ServerName: virSrvName,
Locations: []*v1.Location{},
}
vs.Namespace = ing.Namespace
vs.ServiceID = anns.Labels["service_id"]
@ -600,6 +600,66 @@ func (s *k8sStore) ListVirtualService() (l7vs []*v1.VirtualService, l4vs []*v1.V
// endregion
}
}
for _, item := range s.listers.Ingress.List() {
ing := item.(*extensions.Ingress)
if !s.ingressIsValid(ing) {
continue
}
ingKey := k8s.MetaNamespaceKey(ing)
anns, err := s.GetIngressAnnotations(ingKey)
if err != nil {
logrus.Errorf("Error getting Ingress annotations %q: %v", ingKey, err)
}
if !anns.Rewrite.ForceSSLRedirect {
continue
}
if !anns.L4.L4Enable || anns.L4.L4Port == 0 {
for _, rule := range ing.Spec.Rules {
var vs *v1.VirtualService
virSrvName := strings.TrimSpace(rule.Host)
vs = l7vsMap[virSrvName]
if vs == nil {
vs = &v1.VirtualService{
Listening: []string{strconv.Itoa(s.conf.ListenPorts.HTTP)},
ServerName: virSrvName,
Locations: []*v1.Location{},
}
l7vsMap[virSrvName] = vs
l7vs = append(l7vs, vs)
}
for _, path := range rule.IngressRuleValue.HTTP.Paths {
locKey := fmt.Sprintf("%s_%s", virSrvName, path.Path)
location := srvLocMap[locKey]
if location != nil {
// If location != nil, the http policy for path is already set.
// In this case, ForceSSLRedirect should be ignored.
continue
}
location = &v1.Location{
Path: path.Path,
DisableProxyPass: true,
Rewrite: rewrite.Config{
Rewrites: []*rewrite.Rewrite{
{
Regex: "^",
Replacement: "https://$http_host$request_uri?",
Flag: "permanent",
},
},
},
}
location.Proxy = anns.Proxy
vs.Locations = append(vs.Locations, location)
}
}
// endregion
}
}
return l7vs, l4vs
}

View File

@ -45,7 +45,8 @@ type Location struct {
// Proxy contains information about timeouts and buffer sizes
// to be used in connections against endpoints
// +optional
Proxy proxy.Config `json:"proxy,omitempty"`
Proxy proxy.Config `json:"proxy,omitempty"`
DisableProxyPass bool
}
// Condition is the condition that the traffic can reach the specified backend

View File

@ -1,19 +1,5 @@
{{ $http_port := .Set.ListenPorts.HTTP }}
{{ range $srv := .Servers }}
{{ if $srv.ForceSSLRedirect }}
server {
listen {{ $http_port }};
{{ if $srv.ServerName }}server_name {{$srv.ServerName}};{{end}}
{{ range $loc := $srv.Locations }}
location {{$loc.Path}} {
rewrite ^ https://$http_host$request_uri? permanent;
{{ if $loc.DisableAccessLog }}
access_log off;
{{ end }}
}
{{ end }}
}
{{ end }}
server {
{{ if $srv.Listen }}listen {{$srv.Listen}};{{ end }}
{{ if $srv.Root }}root {{$srv.Root}};{{ end }}

View File

@ -51,8 +51,10 @@ func (e *Node) UpdateEndpoints(endpoints ...*config.Endpoint) {
e.sortedEndpoints = newArr
scrape := e.toScrape()
e.Prometheus.UpdateScrape(scrape)
scrapes := e.toScrape()
for _, scrape := range scrapes {
e.Prometheus.UpdateScrape(scrape)
}
}
func (e *Node) Error(err error) {
@ -64,13 +66,13 @@ func (e *Node) Name() string {
return "rbd_node"
}
func (e *Node) toScrape() *prometheus.ScrapeConfig {
func (e *Node) toScrape() []*prometheus.ScrapeConfig {
ts := make([]string, 0, len(e.sortedEndpoints))
for _, end := range e.sortedEndpoints {
ts = append(ts, end)
}
return &prometheus.ScrapeConfig{
return []*prometheus.ScrapeConfig{&prometheus.ScrapeConfig{
JobName: e.Name(),
ScrapeInterval: model.Duration(30 * time.Second),
ScrapeTimeout: model.Duration(30 * time.Second),
@ -85,6 +87,21 @@ func (e *Node) toScrape() *prometheus.ScrapeConfig {
},
},
},
},
&prometheus.ScrapeConfig{
JobName: "rbd_cluster",
ScrapeInterval: model.Duration(30 * time.Second),
ScrapeTimeout: model.Duration(30 * time.Second),
MetricsPath: "/cluster/metrics",
ServiceDiscoveryConfig: prometheus.ServiceDiscoveryConfig{
StaticConfigs: []*prometheus.Group{
{
Targets: ts,
Labels: map[model.LabelName]model.LabelValue{},
},
},
},
},
}
}

View File

@ -50,7 +50,7 @@ func (s *mqServer) Enqueue(ctx context.Context, in *pb.EnqueueRequest) (*pb.Task
if err != nil {
return nil, err
}
logrus.Debugf("task (%v) enqueue.", in.Message.String())
logrus.Debugf("task (%v) enqueue.", in.Message.TaskType)
return &pb.TaskReply{
Status: "success",
}, nil
@ -77,7 +77,7 @@ func (s *mqServer) Dequeue(ctx context.Context, in *pb.DequeueRequest) (*pb.Task
if err != nil {
return nil, err
}
logrus.Infof("task (%s) dnqueue by (%s).", task.GetTaskType(), in.ClientHost)
logrus.Debugf("task (%s) dnqueue by (%s).", task.GetTaskType(), in.ClientHost)
return &task, nil
}

View File

@ -23,6 +23,9 @@ import (
"net/http"
"strconv"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/goodrain/rainbond/discover"
"github.com/goodrain/rainbond/node/kubecache"
"github.com/goodrain/rainbond/node/masterserver"
@ -70,6 +73,8 @@ func NewManager(c option.Conf, node *nodeclient.HostNode, ms *masterserver.Maste
node: node,
ms: ms,
}
// set node cluster monitor route
m.router.Get("/cluster/metrics", m.HandleClusterScrape)
return m
}
@ -125,3 +130,20 @@ func (m *Manager) Stop() error {
func (m *Manager) GetRouter() *chi.Mux {
return m.router
}
//HandleClusterScrape prometheus handle
func (m *Manager) HandleClusterScrape(w http.ResponseWriter, r *http.Request) {
gatherers := prometheus.Gatherers{
prometheus.DefaultGatherer,
}
if m.ms != nil {
gatherers = append(gatherers, m.ms.GetRegistry())
}
// Delegate http serving to Prometheus client library, which will call collector.Collect.
h := promhttp.HandlerFor(gatherers,
promhttp.HandlerOpts{
ErrorLog: logrus.StandardLogger(),
ErrorHandling: promhttp.ContinueOnError,
})
h.ServeHTTP(w, r)
}

View File

@ -0,0 +1,59 @@
// RAINBOND, Application Management Platform
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version. For any non-GPL usage of Rainbond,
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
// must be obtained first.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package monitor
import (
"github.com/goodrain/rainbond/node/masterserver/node"
"github.com/prometheus/client_golang/prometheus"
)
//Manager Manager
type Manager interface {
Start(errchan chan error) error
Stop() error
GetRegistry() *prometheus.Registry
}
type manager struct {
clusterExporterRestry *prometheus.Registry
cluster *node.Cluster
}
//CreateManager CreateManager
func CreateManager(cluster *node.Cluster) (Manager, error) {
clusterRegistry := prometheus.NewRegistry()
manage := &manager{
clusterExporterRestry: clusterRegistry,
cluster: cluster,
}
return manage, nil
}
func (m *manager) Start(errchan chan error) error {
return m.clusterExporterRestry.Register(m.cluster)
}
func (m *manager) Stop() error {
return nil
}
func (m *manager) GetRegistry() *prometheus.Registry {
return m.clusterExporterRestry
}

View File

@ -364,7 +364,6 @@ func (n *Cluster) checkNodeInstall(node *client.HostNode) {
if index > -1 {
jsonOutPut = result[index:]
}
fmt.Println("Init node Result:" + jsonOutPut)
output, err := model.ParseTaskOutPut(jsonOutPut)
if err != nil {
errorCondition("节点初始化输出数据错误", err)

View File

@ -0,0 +1,80 @@
// RAINBOND, Application Management Platform
// Copyright (C) 2014-2019 Goodrain Co., Ltd.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version. For any non-GPL usage of Rainbond,
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
// must be obtained first.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package node
import (
"strconv"
"time"
"github.com/goodrain/rainbond/node/nodem/client"
"github.com/prometheus/client_golang/prometheus"
)
var (
namespace = "rainbond"
scrapeDurationDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "cluster", "collector_duration_seconds"),
"cluster_exporter: Duration of a collector scrape.",
[]string{},
nil,
)
nodeStatus = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "cluster", "node_health"),
"node_health: Rainbond node health status.",
[]string{"node_id", "node_ip", "status", "healthy"},
nil,
)
componentStatus = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "cluster", "component_health"),
"component_health: Rainbond node component health status.",
[]string{"node_id", "node_ip", "component"},
nil,
)
)
//Collect prometheus collect
func (n *Cluster) Collect(ch chan<- prometheus.Metric) {
begin := time.Now()
for _, node := range n.GetAllNode() {
ch <- prometheus.MustNewConstMetric(nodeStatus, prometheus.GaugeValue, func() float64 {
if node.Status == client.Running && node.NodeStatus.NodeHealth {
return 0
}
return 1
}(), node.ID, node.InternalIP, node.Status, strconv.FormatBool(node.NodeStatus.NodeHealth))
for _, con := range node.NodeStatus.Conditions {
ch <- prometheus.MustNewConstMetric(componentStatus, prometheus.GaugeValue, func() float64 {
if con.Status == client.ConditionTrue {
return 0
}
return 1
}(), node.ID, node.InternalIP, string(con.Type))
}
}
duration := time.Since(begin)
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, duration.Seconds())
}
//Describe prometheus describe
func (n *Cluster) Describe(ch chan<- *prometheus.Desc) {
ch <- scrapeDurationDesc
ch <- nodeStatus
ch <- componentStatus
}

View File

@ -21,6 +21,10 @@ package masterserver
import (
"context"
"github.com/prometheus/client_golang/prometheus"
"github.com/goodrain/rainbond/node/masterserver/monitor"
"github.com/Sirupsen/logrus"
"github.com/goodrain/rainbond/node/kubecache"
@ -39,6 +43,7 @@ type MasterServer struct {
ctx context.Context
cancel context.CancelFunc
datacenterConfig *config.DataCenterConfig
clusterMonitor monitor.Manager
}
//NewMasterServer 创建master节点
@ -46,6 +51,11 @@ func NewMasterServer(modelnode *client.HostNode, kubecli kubecache.KubeClient) (
datacenterConfig := config.GetDataCenterConfig()
ctx, cancel := context.WithCancel(context.Background())
nodecluster := node.CreateCluster(kubecli, modelnode, datacenterConfig)
clusterMonitor, err := monitor.CreateManager(nodecluster)
if err != nil {
cancel()
return nil, err
}
ms := &MasterServer{
Client: store.DefalutClient,
HostNode: modelnode,
@ -53,6 +63,7 @@ func NewMasterServer(modelnode *client.HostNode, kubecli kubecache.KubeClient) (
ctx: ctx,
cancel: cancel,
datacenterConfig: datacenterConfig,
clusterMonitor: clusterMonitor,
}
return ms, nil
}
@ -64,7 +75,7 @@ func (m *MasterServer) Start(errchan chan error) error {
logrus.Error("node cluster start error,", err.Error())
return err
}
return nil
return m.clusterMonitor.Start(errchan)
}
//Stop 停止
@ -72,5 +83,13 @@ func (m *MasterServer) Stop(i interface{}) {
if m.Cluster != nil {
m.Cluster.Stop(i)
}
if m.clusterMonitor != nil {
m.clusterMonitor.Stop()
}
m.cancel()
}
//GetRegistry get monitor metric registry
func (m *MasterServer) GetRegistry() *prometheus.Registry {
return m.clusterMonitor.GetRegistry()
}

View File

@ -47,7 +47,10 @@ type manager struct {
func createNodeExporterRestry() (*prometheus.Registry, error) {
registry := prometheus.NewRegistry()
filters := []string{"cpu", "diskstats", "filesystem", "ipvs", "loadavg", "meminfo", "netdev", "netstat", "uname", "mountstats", "nfs"}
filters := []string{"cpu", "diskstats", "filesystem",
"ipvs", "loadavg", "meminfo", "netdev",
"netclass", "netdev", "netstat",
"uname", "mountstats", "nfs"}
nc, err := collector.NewNodeCollector(filters...)
if err != nil {
return nil, err

View File

@ -7,9 +7,12 @@ BASE_NAME=rainbond
GO_VERSION=1.11
GATEWAY_GO_VERSION=1.11-alpine3.8
VERSION=5.1.1
if [ -z "$TRAVIS_TAG" ]; then
VERSION=$TRAVIS_BRANCH-dev
if [ -z "$TRAVIS_BRANCH" ]; then
VERSION=v5.1-dev
else
VERSION=$TRAVIS_BRANCH-dev
fi
else
VERSION=$TRAVIS_TAG
fi

View File

@ -0,0 +1,182 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !nonetclass
// +build linux
package collector
import (
"fmt"
"regexp"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/sysfs"
)
var (
netclassIgnoredDevices = "^$"
)
type netClassCollector struct {
subsystem string
ignoredDevicesPattern *regexp.Regexp
metricDescs map[string]*prometheus.Desc
}
func init() {
registerCollector("netclass", defaultEnabled, NewNetClassCollector)
}
// NewNetClassCollector returns a new Collector exposing network class stats.
func NewNetClassCollector() (Collector, error) {
pattern := regexp.MustCompile(netclassIgnoredDevices)
return &netClassCollector{
subsystem: "network",
ignoredDevicesPattern: pattern,
metricDescs: map[string]*prometheus.Desc{},
}, nil
}
func (c *netClassCollector) Update(ch chan<- prometheus.Metric) error {
netClass, err := getNetClassInfo(c.ignoredDevicesPattern)
if err != nil {
return fmt.Errorf("could not get net class info: %s", err)
}
for _, ifaceInfo := range netClass {
upDesc := prometheus.NewDesc(
prometheus.BuildFQName(namespace, c.subsystem, "up"),
"Value is 1 if operstate is 'up', 0 otherwise.",
[]string{"device"},
nil,
)
upValue := 0.0
if ifaceInfo.OperState == "up" {
upValue = 1.0
}
ch <- prometheus.MustNewConstMetric(upDesc, prometheus.GaugeValue, upValue, ifaceInfo.Name)
infoDesc := prometheus.NewDesc(
prometheus.BuildFQName(namespace, c.subsystem, "info"),
"Non-numeric data from /sys/class/net/<iface>, value is always 1.",
[]string{"device", "address", "broadcast", "duplex", "operstate", "ifalias"},
nil,
)
infoValue := 1.0
ch <- prometheus.MustNewConstMetric(infoDesc, prometheus.GaugeValue, infoValue, ifaceInfo.Name, ifaceInfo.Address, ifaceInfo.Broadcast, ifaceInfo.Duplex, ifaceInfo.OperState, ifaceInfo.IfAlias)
if ifaceInfo.AddrAssignType != nil {
pushMetric(ch, c.subsystem, "address_assign_type", *ifaceInfo.AddrAssignType, ifaceInfo.Name, prometheus.GaugeValue)
}
if ifaceInfo.Carrier != nil {
pushMetric(ch, c.subsystem, "carrier", *ifaceInfo.Carrier, ifaceInfo.Name, prometheus.GaugeValue)
}
if ifaceInfo.CarrierChanges != nil {
pushMetric(ch, c.subsystem, "carrier_changes_total", *ifaceInfo.CarrierChanges, ifaceInfo.Name, prometheus.CounterValue)
}
if ifaceInfo.CarrierUpCount != nil {
pushMetric(ch, c.subsystem, "carrier_up_changes_total", *ifaceInfo.CarrierUpCount, ifaceInfo.Name, prometheus.CounterValue)
}
if ifaceInfo.CarrierDownCount != nil {
pushMetric(ch, c.subsystem, "carrier_down_changes_total", *ifaceInfo.CarrierDownCount, ifaceInfo.Name, prometheus.CounterValue)
}
if ifaceInfo.DevID != nil {
pushMetric(ch, c.subsystem, "device_id", *ifaceInfo.DevID, ifaceInfo.Name, prometheus.GaugeValue)
}
if ifaceInfo.Dormant != nil {
pushMetric(ch, c.subsystem, "dormant", *ifaceInfo.Dormant, ifaceInfo.Name, prometheus.GaugeValue)
}
if ifaceInfo.Flags != nil {
pushMetric(ch, c.subsystem, "flags", *ifaceInfo.Flags, ifaceInfo.Name, prometheus.GaugeValue)
}
if ifaceInfo.IfIndex != nil {
pushMetric(ch, c.subsystem, "iface_id", *ifaceInfo.IfIndex, ifaceInfo.Name, prometheus.GaugeValue)
}
if ifaceInfo.IfLink != nil {
pushMetric(ch, c.subsystem, "iface_link", *ifaceInfo.IfLink, ifaceInfo.Name, prometheus.GaugeValue)
}
if ifaceInfo.LinkMode != nil {
pushMetric(ch, c.subsystem, "iface_link_mode", *ifaceInfo.LinkMode, ifaceInfo.Name, prometheus.GaugeValue)
}
if ifaceInfo.MTU != nil {
pushMetric(ch, c.subsystem, "mtu_bytes", *ifaceInfo.MTU, ifaceInfo.Name, prometheus.GaugeValue)
}
if ifaceInfo.NameAssignType != nil {
pushMetric(ch, c.subsystem, "name_assign_type", *ifaceInfo.NameAssignType, ifaceInfo.Name, prometheus.GaugeValue)
}
if ifaceInfo.NetDevGroup != nil {
pushMetric(ch, c.subsystem, "net_dev_group", *ifaceInfo.NetDevGroup, ifaceInfo.Name, prometheus.GaugeValue)
}
if ifaceInfo.Speed != nil {
speedBytes := int64(*ifaceInfo.Speed / 8 * 1000 * 1000)
pushMetric(ch, c.subsystem, "speed_bytes", speedBytes, ifaceInfo.Name, prometheus.GaugeValue)
}
if ifaceInfo.TxQueueLen != nil {
pushMetric(ch, c.subsystem, "transmit_queue_length", *ifaceInfo.TxQueueLen, ifaceInfo.Name, prometheus.GaugeValue)
}
if ifaceInfo.Type != nil {
pushMetric(ch, c.subsystem, "protocol_type", *ifaceInfo.Type, ifaceInfo.Name, prometheus.GaugeValue)
}
}
return nil
}
func pushMetric(ch chan<- prometheus.Metric, subsystem string, name string, value int64, ifaceName string, valueType prometheus.ValueType) {
fieldDesc := prometheus.NewDesc(
prometheus.BuildFQName(namespace, subsystem, name),
fmt.Sprintf("%s value of /sys/class/net/<iface>.", name),
[]string{"device"},
nil,
)
ch <- prometheus.MustNewConstMetric(fieldDesc, valueType, float64(value), ifaceName)
}
func getNetClassInfo(ignore *regexp.Regexp) (sysfs.NetClass, error) {
fs, err := sysfs.NewFS(*sysPath)
if err != nil {
return nil, err
}
netClass, err := fs.NewNetClass()
if err != nil {
return netClass, fmt.Errorf("error obtaining net class info: %s", err)
}
for device := range netClass {
if ignore.MatchString(device) {
delete(netClass, device)
}
}
return netClass, nil
}

View File

@ -22,11 +22,10 @@ import (
"strconv"
"github.com/prometheus/client_golang/prometheus"
"gopkg.in/alecthomas/kingpin.v2"
)
var (
netdevIgnoredDevices = kingpin.Flag("collector.netdev.ignored-devices", "Regexp of net devices to ignore for netdev collector.").Default("^$").String()
netdevIgnoredDevices = "^$"
)
type netDevCollector struct {
@ -41,7 +40,7 @@ func init() {
// NewNetDevCollector returns a new Collector exposing network device stats.
func NewNetDevCollector() (Collector, error) {
pattern := regexp.MustCompile(*netdevIgnoredDevices)
pattern := regexp.MustCompile(netdevIgnoredDevices)
return &netDevCollector{
subsystem: "network",
ignoredDevicesPattern: pattern,

29
vendor/github.com/prometheus/procfs/internal/util/parse.go generated vendored Executable file → Normal file
View File

@ -13,7 +13,11 @@
package util
import "strconv"
import (
"io/ioutil"
"strconv"
"strings"
)
// ParseUint32s parses a slice of strings into a slice of uint32s.
func ParseUint32s(ss []string) ([]uint32, error) {
@ -44,3 +48,26 @@ func ParseUint64s(ss []string) ([]uint64, error) {
return us, nil
}
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
func ReadUintFromFile(path string) (uint64, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return 0, err
}
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
}
// ParseBool parses a string into a boolean pointer.
func ParseBool(b string) *bool {
var truth bool
switch b {
case "enabled":
truth = true
case "disabled":
truth = false
default:
return nil
}
return &truth
}

View File

@ -0,0 +1,45 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux,!appengine
package util
import (
"bytes"
"os"
"syscall"
)
// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
// https://github.com/prometheus/node_exporter/pull/728/files
func SysReadFile(file string) (string, error) {
f, err := os.Open(file)
if err != nil {
return "", err
}
defer f.Close()
// On some machines, hwmon drivers are broken and return EAGAIN. This causes
// Go's ioutil.ReadFile implementation to poll forever.
//
// Since we either want to read data or bail immediately, do the simplest
// possible read using syscall directly.
b := make([]byte, 128)
n, err := syscall.Read(int(f.Fd()), b)
if err != nil {
return "", err
}
return string(bytes.TrimSpace(b[:n])), nil
}

View File

@ -0,0 +1,26 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux,appengine !linux
package util
import (
"fmt"
)
// SysReadFile is here implemented as a noop for builds that do not support
// the read syscall. For example Windows, or Linux on Google App Engine.
func SysReadFile(file string) (string, error) {
return "", fmt.Errorf("not supported on this platform")
}

View File

@ -0,0 +1,188 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package sysfs
import (
"fmt"
"io/ioutil"
"os"
"reflect"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// PowerSupply contains info from files in /sys/class/power_supply for a single power supply.
type PowerSupply struct {
Name string // Power Supply Name
Authentic *int64 `fileName:"authentic"` // /sys/class/power_suppy/<Name>/authentic
Calibrate *int64 `fileName:"calibrate"` // /sys/class/power_suppy/<Name>/calibrate
Capacity *int64 `fileName:"capacity"` // /sys/class/power_suppy/<Name>/capacity
CapacityAlertMax *int64 `fileName:"capacity_alert_max"` // /sys/class/power_suppy/<Name>/capacity_alert_max
CapacityAlertMin *int64 `fileName:"capacity_alert_min"` // /sys/class/power_suppy/<Name>/capacity_alert_min
CapacityLevel string `fileName:"capacity_level"` // /sys/class/power_suppy/<Name>/capacity_level
ChargeAvg *int64 `fileName:"charge_avg"` // /sys/class/power_suppy/<Name>/charge_avg
ChargeControlLimit *int64 `fileName:"charge_control_limit"` // /sys/class/power_suppy/<Name>/charge_control_limit
ChargeControlLimitMax *int64 `fileName:"charge_control_limit_max"` // /sys/class/power_suppy/<Name>/charge_control_limit_max
ChargeCounter *int64 `fileName:"charge_counter"` // /sys/class/power_suppy/<Name>/charge_counter
ChargeEmpty *int64 `fileName:"charge_empty"` // /sys/class/power_suppy/<Name>/charge_empty
ChargeEmptyDesign *int64 `fileName:"charge_empty_design"` // /sys/class/power_suppy/<Name>/charge_empty_design
ChargeFull *int64 `fileName:"charge_full"` // /sys/class/power_suppy/<Name>/charge_full
ChargeFullDesign *int64 `fileName:"charge_full_design"` // /sys/class/power_suppy/<Name>/charge_full_design
ChargeNow *int64 `fileName:"charge_now"` // /sys/class/power_suppy/<Name>/charge_now
ChargeTermCurrent *int64 `fileName:"charge_term_current"` // /sys/class/power_suppy/<Name>/charge_term_current
ChargeType string `fileName:"charge_type"` // /sys/class/power_supply/<Name>/charge_type
ConstantChargeCurrent *int64 `fileName:"constant_charge_current"` // /sys/class/power_suppy/<Name>/constant_charge_current
ConstantChargeCurrentMax *int64 `fileName:"constant_charge_current_max"` // /sys/class/power_suppy/<Name>/constant_charge_current_max
ConstantChargeVoltage *int64 `fileName:"constant_charge_voltage"` // /sys/class/power_suppy/<Name>/constant_charge_voltage
ConstantChargeVoltageMax *int64 `fileName:"constant_charge_voltage_max"` // /sys/class/power_suppy/<Name>/constant_charge_voltage_max
CurrentAvg *int64 `fileName:"current_avg"` // /sys/class/power_suppy/<Name>/current_avg
CurrentBoot *int64 `fileName:"current_boot"` // /sys/class/power_suppy/<Name>/current_boot
CurrentMax *int64 `fileName:"current_max"` // /sys/class/power_suppy/<Name>/current_max
CurrentNow *int64 `fileName:"current_now"` // /sys/class/power_suppy/<Name>/current_now
CycleCount *int64 `fileName:"cycle_count"` // /sys/class/power_suppy/<Name>/cycle_count
EnergyAvg *int64 `fileName:"energy_avg"` // /sys/class/power_supply/<Name>/energy_avg
EnergyEmpty *int64 `fileName:"energy_empty"` // /sys/class/power_suppy/<Name>/energy_empty
EnergyEmptyDesign *int64 `fileName:"energy_empty_design"` // /sys/class/power_suppy/<Name>/energy_empty_design
EnergyFull *int64 `fileName:"energy_full"` // /sys/class/power_suppy/<Name>/energy_full
EnergyFullDesign *int64 `fileName:"energy_full_design"` // /sys/class/power_suppy/<Name>/energy_full_design
EnergyNow *int64 `fileName:"energy_now"` // /sys/class/power_supply/<Name>/energy_now
Health string `fileName:"health"` // /sys/class/power_suppy/<Name>/health
InputCurrentLimit *int64 `fileName:"input_current_limit"` // /sys/class/power_suppy/<Name>/input_current_limit
Manufacturer string `fileName:"manufacturer"` // /sys/class/power_suppy/<Name>/manufacturer
ModelName string `fileName:"model_name"` // /sys/class/power_suppy/<Name>/model_name
Online *int64 `fileName:"online"` // /sys/class/power_suppy/<Name>/online
PowerAvg *int64 `fileName:"power_avg"` // /sys/class/power_suppy/<Name>/power_avg
PowerNow *int64 `fileName:"power_now"` // /sys/class/power_suppy/<Name>/power_now
PrechargeCurrent *int64 `fileName:"precharge_current"` // /sys/class/power_suppy/<Name>/precharge_current
Present *int64 `fileName:"present"` // /sys/class/power_suppy/<Name>/present
Scope string `fileName:"scope"` // /sys/class/power_suppy/<Name>/scope
SerialNumber string `fileName:"serial_number"` // /sys/class/power_suppy/<Name>/serial_number
Status string `fileName:"status"` // /sys/class/power_supply/<Name>/status
Technology string `fileName:"technology"` // /sys/class/power_suppy/<Name>/technology
Temp *int64 `fileName:"temp"` // /sys/class/power_suppy/<Name>/temp
TempAlertMax *int64 `fileName:"temp_alert_max"` // /sys/class/power_suppy/<Name>/temp_alert_max
TempAlertMin *int64 `fileName:"temp_alert_min"` // /sys/class/power_suppy/<Name>/temp_alert_min
TempAmbient *int64 `fileName:"temp_ambient"` // /sys/class/power_suppy/<Name>/temp_ambient
TempAmbientMax *int64 `fileName:"temp_ambient_max"` // /sys/class/power_suppy/<Name>/temp_ambient_max
TempAmbientMin *int64 `fileName:"temp_ambient_min"` // /sys/class/power_suppy/<Name>/temp_ambient_min
TempMax *int64 `fileName:"temp_max"` // /sys/class/power_suppy/<Name>/temp_max
TempMin *int64 `fileName:"temp_min"` // /sys/class/power_suppy/<Name>/temp_min
TimeToEmptyAvg *int64 `fileName:"time_to_empty_avg"` // /sys/class/power_suppy/<Name>/time_to_empty_avg
TimeToEmptyNow *int64 `fileName:"time_to_empty_now"` // /sys/class/power_suppy/<Name>/time_to_empty_now
TimeToFullAvg *int64 `fileName:"time_to_full_avg"` // /sys/class/power_suppy/<Name>/time_to_full_avg
TimeToFullNow *int64 `fileName:"time_to_full_now"` // /sys/class/power_suppy/<Name>/time_to_full_now
Type string `fileName:"type"` // /sys/class/power_supply/<Name>/type
UsbType string `fileName:"usb_type"` // /sys/class/power_supply/<Name>/usb_type
VoltageAvg *int64 `fileName:"voltage_avg"` // /sys/class/power_supply/<Name>/voltage_avg
VoltageBoot *int64 `fileName:"voltage_boot"` // /sys/class/power_suppy/<Name>/voltage_boot
VoltageMax *int64 `fileName:"voltage_max"` // /sys/class/power_suppy/<Name>/voltage_max
VoltageMaxDesign *int64 `fileName:"voltage_max_design"` // /sys/class/power_suppy/<Name>/voltage_max_design
VoltageMin *int64 `fileName:"voltage_min"` // /sys/class/power_suppy/<Name>/voltage_min
VoltageMinDesign *int64 `fileName:"voltage_min_design"` // /sys/class/power_suppy/<Name>/voltage_min_design
VoltageNow *int64 `fileName:"voltage_now"` // /sys/class/power_supply/<Name>/voltage_now
VoltageOCV *int64 `fileName:"voltage_ocv"` // /sys/class/power_suppy/<Name>/voltage_ocv
}
// PowerSupplyClass is a collection of every power supply in /sys/class/power_supply/.
// The map keys are the names of the power supplies.
type PowerSupplyClass map[string]PowerSupply
// NewPowerSupplyClass returns info for all power supplies read from /sys/class/power_supply/.
func NewPowerSupplyClass() (PowerSupplyClass, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return nil, err
}
return fs.NewPowerSupplyClass()
}
// NewPowerSupplyClass returns info for all power supplies read from /sys/class/power_supply/.
func (fs FS) NewPowerSupplyClass() (PowerSupplyClass, error) {
path := fs.Path("class/power_supply")
powerSupplyDirs, err := ioutil.ReadDir(path)
if err != nil {
return PowerSupplyClass{}, fmt.Errorf("cannot access %s dir %s", path, err)
}
powerSupplyClass := PowerSupplyClass{}
for _, powerSupplyDir := range powerSupplyDirs {
powerSupply, err := powerSupplyClass.parsePowerSupply(path + "/" + powerSupplyDir.Name())
if err != nil {
return nil, err
}
powerSupply.Name = powerSupplyDir.Name()
powerSupplyClass[powerSupplyDir.Name()] = *powerSupply
}
return powerSupplyClass, nil
}
func (psc PowerSupplyClass) parsePowerSupply(powerSupplyPath string) (*PowerSupply, error) {
powerSupply := PowerSupply{}
powerSupplyElem := reflect.ValueOf(&powerSupply).Elem()
powerSupplyType := reflect.TypeOf(powerSupply)
//start from 1 - skip the Name field
for i := 1; i < powerSupplyElem.NumField(); i++ {
fieldType := powerSupplyType.Field(i)
fieldValue := powerSupplyElem.Field(i)
if fieldType.Tag.Get("fileName") == "" {
panic(fmt.Errorf("field %s does not have a filename tag", fieldType.Name))
}
value, err := util.SysReadFile(powerSupplyPath + "/" + fieldType.Tag.Get("fileName"))
if err != nil {
if os.IsNotExist(err) || err.Error() == "operation not supported" || err.Error() == "invalid argument" {
continue
}
return nil, fmt.Errorf("could not access file %s: %s", fieldType.Tag.Get("fileName"), err)
}
switch fieldValue.Kind() {
case reflect.String:
fieldValue.SetString(value)
case reflect.Ptr:
var int64ptr *int64
switch fieldValue.Type() {
case reflect.TypeOf(int64ptr):
var intValue int64
if strings.HasPrefix(value, "0x") {
intValue, err = strconv.ParseInt(value[2:], 16, 64)
if err != nil {
return nil, fmt.Errorf("expected hex value for %s, got: %s", fieldType.Name, value)
}
} else {
intValue, err = strconv.ParseInt(value, 10, 64)
if err != nil {
return nil, fmt.Errorf("expected Uint64 value for %s, got: %s", fieldType.Name, value)
}
}
fieldValue.Set(reflect.ValueOf(&intValue))
default:
return nil, fmt.Errorf("unhandled pointer type %q", fieldValue.Type())
}
default:
return nil, fmt.Errorf("unhandled type %q", fieldValue.Kind())
}
}
return &powerSupply, nil
}

View File

@ -0,0 +1,80 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package sysfs
import (
"encoding/json"
"reflect"
"testing"
)
func TestNewPowerSupplyClass(t *testing.T) {
fs, err := NewFS(sysTestFixtures)
if err != nil {
t.Fatal(err)
}
psc, err := fs.NewPowerSupplyClass()
if err != nil {
t.Fatal(err)
}
var (
acOnline int64 = 0
bat0Capacity int64 = 98
bat0CycleCount int64 = 0
bat0EnergyFull int64 = 50060000
bat0EnergyFullDesign int64 = 47520000
bat0EnergyNow int64 = 49450000
bat0PowerNow int64 = 4830000
bat0Present int64 = 1
bat0VoltageMinDesign int64 = 10800000
bat0VoltageNow int64 = 12229000
)
powerSupplyClass := PowerSupplyClass{
"AC": {
Name: "AC",
Type: "Mains",
Online: &acOnline,
},
"BAT0": {
Name: "BAT0",
Capacity: &bat0Capacity,
CapacityLevel: "Normal",
CycleCount: &bat0CycleCount,
EnergyFull: &bat0EnergyFull,
EnergyFullDesign: &bat0EnergyFullDesign,
EnergyNow: &bat0EnergyNow,
Manufacturer: "LGC",
ModelName: "LNV-45N1",
PowerNow: &bat0PowerNow,
Present: &bat0Present,
SerialNumber: "38109",
Status: "Discharging",
Technology: "Li-ion",
Type: "Battery",
VoltageMinDesign: &bat0VoltageMinDesign,
VoltageNow: &bat0VoltageNow,
},
}
if !reflect.DeepEqual(powerSupplyClass, psc) {
want, _ := json.Marshal(powerSupplyClass)
get, _ := json.Marshal(psc)
t.Errorf("Result not correct: want %v, have %v.", string(want), string(get))
}
}

View File

@ -0,0 +1,99 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package sysfs
import (
"os"
"path/filepath"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// ClassThermalZoneStats contains info from files in /sys/class/thermal/thermal_zone<zone>
// for a single <zone>.
// https://www.kernel.org/doc/Documentation/thermal/sysfs-api.txt
type ClassThermalZoneStats struct {
Name string // The name of the zone from the directory structure.
Type string // The type of thermal zone.
Temp uint64 // Temperature in millidegree Celsius.
Policy string // One of the various thermal governors used for a particular zone.
Mode *bool // Optional: One of the predefined values in [enabled, disabled].
Passive *uint64 // Optional: millidegrees Celsius. (0 for disabled, > 1000 for enabled+value)
}
// NewClassThermalZoneStats returns Thermal Zone metrics for all zones.
func (fs FS) NewClassThermalZoneStats() ([]ClassThermalZoneStats, error) {
zones, err := filepath.Glob(fs.Path("class/thermal/thermal_zone[0-9]*"))
if err != nil {
return []ClassThermalZoneStats{}, err
}
var zoneStats = ClassThermalZoneStats{}
stats := make([]ClassThermalZoneStats, len(zones))
for i, zone := range zones {
zoneName := strings.TrimPrefix(filepath.Base(zone), "thermal_zone")
zoneStats, err = parseClassThermalZone(zone)
if err != nil {
return []ClassThermalZoneStats{}, err
}
zoneStats.Name = zoneName
stats[i] = zoneStats
}
return stats, nil
}
func parseClassThermalZone(zone string) (ClassThermalZoneStats, error) {
// Required attributes.
zoneType, err := util.SysReadFile(filepath.Join(zone, "type"))
if err != nil {
return ClassThermalZoneStats{}, err
}
zonePolicy, err := util.SysReadFile(filepath.Join(zone, "policy"))
if err != nil {
return ClassThermalZoneStats{}, err
}
zoneTemp, err := util.ReadUintFromFile(filepath.Join(zone, "temp"))
if err != nil {
return ClassThermalZoneStats{}, err
}
// Optional attributes.
mode, err := util.SysReadFile(filepath.Join(zone, "mode"))
if err != nil && !os.IsNotExist(err) && !os.IsPermission(err) {
return ClassThermalZoneStats{}, err
}
zoneMode := util.ParseBool(mode)
var zonePassive *uint64
passive, err := util.ReadUintFromFile(filepath.Join(zone, "passive"))
if os.IsNotExist(err) || os.IsPermission(err) {
zonePassive = nil
} else if err != nil {
return ClassThermalZoneStats{}, err
} else {
zonePassive = &passive
}
return ClassThermalZoneStats{
Type: zoneType,
Policy: zonePolicy,
Temp: zoneTemp,
Mode: zoneMode,
Passive: zonePassive,
}, nil
}

View File

@ -0,0 +1,61 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package sysfs
import (
"reflect"
"testing"
"github.com/prometheus/procfs/internal/util"
)
func TestClassThermalZoneStats(t *testing.T) {
fs, err := NewFS(sysTestFixtures)
if err != nil {
t.Fatal(err)
}
thermalTest, err := fs.NewClassThermalZoneStats()
if err != nil {
t.Fatal(err)
}
enabled := util.ParseBool("enabled")
passive := uint64(0)
classThermalZoneStats := []ClassThermalZoneStats{
{
Name: "0",
Type: "bcm2835_thermal",
Policy: "step_wise",
Temp: 49925,
Mode: nil,
Passive: nil,
},
{
Name: "1",
Type: "acpitz",
Policy: "step_wise",
Temp: 44000,
Mode: enabled,
Passive: &passive,
},
}
if !reflect.DeepEqual(classThermalZoneStats, thermalTest) {
t.Errorf("Result not correct: want %v, have %v", classThermalZoneStats, thermalTest)
}
}

View File

@ -1,6 +1,191 @@
# Archive created by ttar -C sysfs/ -c -f sysfs/fixtures.ttar fixtures/
Directory: fixtures
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/class
Mode: 775
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/class/net
Mode: 775
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/class/net/eth0
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/addr_assign_type
Lines: 1
3
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/addr_len
Lines: 1
6
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/address
Lines: 1
01:01:01:01:01:01
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/broadcast
Lines: 1
ff:ff:ff:ff:ff:ff
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/carrier
Lines: 1
1
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/carrier_changes
Lines: 1
2
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/carrier_down_count
Lines: 1
1
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/carrier_up_count
Lines: 1
1
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/dev_id
Lines: 1
0x20
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/dormant
Lines: 1
1
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/duplex
Lines: 1
full
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/flags
Lines: 1
0x1303
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/ifalias
Lines: 0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/ifindex
Lines: 1
2
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/iflink
Lines: 1
2
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/link_mode
Lines: 1
1
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/mtu
Lines: 1
1500
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/name_assign_type
Lines: 1
2
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/netdev_group
Lines: 1
0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/operstate
Lines: 1
up
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/phys_port_id
Lines: 0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/phys_port_name
Lines: 0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/phys_switch_id
Lines: 0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/speed
Lines: 1
1000
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/tx_queue_len
Lines: 1
1000
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/net/eth0/type
Lines: 1
1
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/class/thermal
Mode: 775
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/class/thermal/thermal_zone0
Mode: 775
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/thermal/thermal_zone0/policy
Lines: 1
step_wise
Mode: 664
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/thermal/thermal_zone0/temp
Lines: 1
49925
Mode: 664
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/thermal/thermal_zone0/type
Lines: 1
bcm2835_thermal
Mode: 664
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/class/thermal/thermal_zone1
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/thermal/thermal_zone1/mode
Lines: 1
enabled
Mode: 664
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/thermal/thermal_zone1/passive
Lines: 1
0
Mode: 664
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/thermal/thermal_zone1/policy
Lines: 1
step_wise
Mode: 664
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/thermal/thermal_zone1/temp
Lines: 1
44000
Mode: 664
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/class/thermal/thermal_zone1/type
Lines: 1
acpitz
Mode: 664
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/devices
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@ -253,6 +438,148 @@ Lines: 1
0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/devices/system
Mode: 775
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/devices/system/cpu
Mode: 775
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/devices/system/cpu/cpu0
Mode: 775
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/devices/system/cpu/cpu0/cpufreq
SymlinkTo: ../cpufreq/policy0
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/devices/system/cpu/cpu1
Mode: 775
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/devices/system/cpu/cpu1/cpufreq
Mode: 775
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq
Lines: 1
1200195
Mode: 400
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/devices/system/cpu/cpu1/cpufreq/cpuinfo_max_freq
Lines: 1
3300000
Mode: 664
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/devices/system/cpu/cpu1/cpufreq/cpuinfo_min_freq
Lines: 1
1200000
Mode: 664
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/devices/system/cpu/cpu1/cpufreq/cpuinfo_transition_latency
Lines: 1
4294967295
Mode: 664
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/devices/system/cpu/cpu1/cpufreq/related_cpus
Lines: 1
1
Mode: 664
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/devices/system/cpu/cpu1/cpufreq/scaling_available_governors
Lines: 1
performance powersave
Mode: 664
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/devices/system/cpu/cpu1/cpufreq/scaling_driver
Lines: 1
intel_pstate
Mode: 664
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/devices/system/cpu/cpu1/cpufreq/scaling_governor
Lines: 1
powersave
Mode: 664
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/devices/system/cpu/cpu1/cpufreq/scaling_max_freq
Lines: 1
3300000
Mode: 664
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/devices/system/cpu/cpu1/cpufreq/scaling_min_freq
Lines: 1
1200000
Mode: 664
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/devices/system/cpu/cpu1/cpufreq/scaling_setspeed
Lines: 1
<unsupported>
Mode: 664
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/devices/system/cpu/cpufreq
Mode: 775
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/devices/system/cpu/cpufreq/policy0
Mode: 775
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/devices/system/cpu/cpufreq/policy0/affected_cpus
Lines: 1
0
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/devices/system/cpu/cpufreq/policy0/cpuinfo_max_freq
Lines: 1
2400000
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/devices/system/cpu/cpufreq/policy0/cpuinfo_min_freq
Lines: 1
800000
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/devices/system/cpu/cpufreq/policy0/cpuinfo_transition_latency
Lines: 1
0
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/devices/system/cpu/cpufreq/policy0/related_cpus
Lines: 1
0
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/devices/system/cpu/cpufreq/policy0/scaling_available_governors
Lines: 1
performance powersave
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/devices/system/cpu/cpufreq/policy0/scaling_cur_freq
Lines: 1
1219917
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/devices/system/cpu/cpufreq/policy0/scaling_driver
Lines: 1
intel_pstate
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/devices/system/cpu/cpufreq/policy0/scaling_governor
Lines: 1
powersave
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/devices/system/cpu/cpufreq/policy0/scaling_max_freq
Lines: 1
2400000
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/devices/system/cpu/cpufreq/policy0/scaling_min_freq
Lines: 1
800000
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/devices/system/cpu/cpufreq/policy0/scaling_setspeed
Lines: 1
<unsupported>
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/devices/system/cpu/cpufreq/policy1
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/fs
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

34
vendor/github.com/prometheus/procfs/sysfs/fs_test.go generated vendored Normal file
View File

@ -0,0 +1,34 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sysfs
import "testing"
const (
sysTestFixtures = "../fixtures/sys"
)
func TestNewFS(t *testing.T) {
if _, err := NewFS("foobar"); err == nil {
t.Error("want NewFS to fail for non-existing mount point")
}
if _, err := NewFS("doc.go"); err == nil {
t.Error("want NewFS to fail if mount point is not a directory")
}
if _, err := NewFS(sysTestFixtures); err != nil {
t.Error("want NewFS to succeed if mount point exists")
}
}

153
vendor/github.com/prometheus/procfs/sysfs/net_class.go generated vendored Normal file
View File

@ -0,0 +1,153 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package sysfs
import (
"fmt"
"io/ioutil"
"os"
"reflect"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// NetClassIface contains info from files in /sys/class/net/<iface>
// for single interface (iface).
type NetClassIface struct {
Name string // Interface name
AddrAssignType *int64 `fileName:"addr_assign_type"` // /sys/class/net/<iface>/addr_assign_type
AddrLen *int64 `fileName:"addr_len"` // /sys/class/net/<iface>/addr_len
Address string `fileName:"address"` // /sys/class/net/<iface>/address
Broadcast string `fileName:"broadcast"` // /sys/class/net/<iface>/broadcast
Carrier *int64 `fileName:"carrier"` // /sys/class/net/<iface>/carrier
CarrierChanges *int64 `fileName:"carrier_changes"` // /sys/class/net/<iface>/carrier_changes
CarrierUpCount *int64 `fileName:"carrier_up_count"` // /sys/class/net/<iface>/carrier_up_count
CarrierDownCount *int64 `fileName:"carrier_down_count"` // /sys/class/net/<iface>/carrier_down_count
DevID *int64 `fileName:"dev_id"` // /sys/class/net/<iface>/dev_id
Dormant *int64 `fileName:"dormant"` // /sys/class/net/<iface>/dormant
Duplex string `fileName:"duplex"` // /sys/class/net/<iface>/duplex
Flags *int64 `fileName:"flags"` // /sys/class/net/<iface>/flags
IfAlias string `fileName:"ifalias"` // /sys/class/net/<iface>/ifalias
IfIndex *int64 `fileName:"ifindex"` // /sys/class/net/<iface>/ifindex
IfLink *int64 `fileName:"iflink"` // /sys/class/net/<iface>/iflink
LinkMode *int64 `fileName:"link_mode"` // /sys/class/net/<iface>/link_mode
MTU *int64 `fileName:"mtu"` // /sys/class/net/<iface>/mtu
NameAssignType *int64 `fileName:"name_assign_type"` // /sys/class/net/<iface>/name_assign_type
NetDevGroup *int64 `fileName:"netdev_group"` // /sys/class/net/<iface>/netdev_group
OperState string `fileName:"operstate"` // /sys/class/net/<iface>/operstate
PhysPortID string `fileName:"phys_port_id"` // /sys/class/net/<iface>/phys_port_id
PhysPortName string `fileName:"phys_port_name"` // /sys/class/net/<iface>/phys_port_name
PhysSwitchID string `fileName:"phys_switch_id"` // /sys/class/net/<iface>/phys_switch_id
Speed *int64 `fileName:"speed"` // /sys/class/net/<iface>/speed
TxQueueLen *int64 `fileName:"tx_queue_len"` // /sys/class/net/<iface>/tx_queue_len
Type *int64 `fileName:"type"` // /sys/class/net/<iface>/type
}
// NetClass is collection of info for every interface (iface) in /sys/class/net. The map keys
// are interface (iface) names.
type NetClass map[string]NetClassIface
// NewNetClass returns info for all net interfaces (iface) read from /sys/class/net/<iface>.
func NewNetClass() (NetClass, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return nil, err
}
return fs.NewNetClass()
}
// NewNetClass returns info for all net interfaces (iface) read from /sys/class/net/<iface>.
func (fs FS) NewNetClass() (NetClass, error) {
path := fs.Path("class/net")
devices, err := ioutil.ReadDir(path)
if err != nil {
return NetClass{}, fmt.Errorf("cannot access %s dir %s", path, err)
}
netClass := NetClass{}
for _, deviceDir := range devices {
if deviceDir.Mode().IsRegular() {
continue
}
interfaceClass, err := netClass.parseNetClassIface(path + "/" + deviceDir.Name())
if err != nil {
return nil, err
}
interfaceClass.Name = deviceDir.Name()
netClass[deviceDir.Name()] = *interfaceClass
}
return netClass, nil
}
// parseNetClassIface scans predefined files in /sys/class/net/<iface>
// directory and gets their contents.
func (nc NetClass) parseNetClassIface(devicePath string) (*NetClassIface, error) {
interfaceClass := NetClassIface{}
interfaceElem := reflect.ValueOf(&interfaceClass).Elem()
interfaceType := reflect.TypeOf(interfaceClass)
//start from 1 - skip the Name field
for i := 1; i < interfaceElem.NumField(); i++ {
fieldType := interfaceType.Field(i)
fieldValue := interfaceElem.Field(i)
if fieldType.Tag.Get("fileName") == "" {
panic(fmt.Errorf("field %s does not have a filename tag", fieldType.Name))
}
value, err := util.SysReadFile(devicePath + "/" + fieldType.Tag.Get("fileName"))
if err != nil {
if os.IsNotExist(err) || err.Error() == "operation not supported" || err.Error() == "invalid argument" {
continue
}
return nil, fmt.Errorf("could not access file %s: %s", fieldType.Tag.Get("fileName"), err)
}
switch fieldValue.Kind() {
case reflect.String:
fieldValue.SetString(value)
case reflect.Ptr:
var int64ptr *int64
switch fieldValue.Type() {
case reflect.TypeOf(int64ptr):
var intValue int64
if strings.HasPrefix(value, "0x") {
intValue, err = strconv.ParseInt(value[2:], 16, 64)
if err != nil {
return nil, fmt.Errorf("expected hex value for %s, got: %s", fieldType.Name, value)
}
} else {
intValue, err = strconv.ParseInt(value, 10, 64)
if err != nil {
return nil, fmt.Errorf("expected Uint64 value for %s, got: %s", fieldType.Name, value)
}
}
fieldValue.Set(reflect.ValueOf(&intValue))
default:
return nil, fmt.Errorf("unhandled pointer type %q", fieldValue.Type())
}
default:
return nil, fmt.Errorf("unhandled type %q", fieldValue.Kind())
}
}
return &interfaceClass, nil
}

View File

@ -0,0 +1,109 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package sysfs
import (
"reflect"
"testing"
)
func TestNewNetClassDevices(t *testing.T) {
fs, err := NewFS(sysTestFixtures)
if err != nil {
t.Fatal(err)
}
devices, err := fs.NetClassDevices()
if err != nil {
t.Fatal(err)
}
if len(devices) != 1 {
t.Errorf("Unexpected number of devices, want %d, have %d", 1, len(devices))
}
if devices[0] != "eth0" {
t.Errorf("Found unexpected device, want %s, have %s", "eth0", devices[0])
}
}
func TestNewNetClass(t *testing.T) {
fs, err := NewFS(sysTestFixtures)
if err != nil {
t.Fatal(err)
}
nc, err := fs.NewNetClass()
if err != nil {
t.Fatal(err)
}
var (
addrAssignType int64 = 3
addrLen int64 = 6
carrier int64 = 1
carrierChanges int64 = 2
carrierDownCount int64 = 1
carrierUpCount int64 = 1
devID int64 = 32
dormant int64 = 1
flags int64 = 4867
ifIndex int64 = 2
ifLink int64 = 2
linkMode int64 = 1
mtu int64 = 1500
nameAssignType int64 = 2
netDevGroup int64 = 0
speed int64 = 1000
txQueueLen int64 = 1000
netType int64 = 1
)
netClass := NetClass{
"eth0": {
Address: "01:01:01:01:01:01",
AddrAssignType: &addrAssignType,
AddrLen: &addrLen,
Broadcast: "ff:ff:ff:ff:ff:ff",
Carrier: &carrier,
CarrierChanges: &carrierChanges,
CarrierDownCount: &carrierDownCount,
CarrierUpCount: &carrierUpCount,
DevID: &devID,
Dormant: &dormant,
Duplex: "full",
Flags: &flags,
IfAlias: "",
IfIndex: &ifIndex,
IfLink: &ifLink,
LinkMode: &linkMode,
MTU: &mtu,
Name: "eth0",
NameAssignType: &nameAssignType,
NetDevGroup: &netDevGroup,
OperState: "up",
PhysPortID: "",
PhysPortName: "",
PhysSwitchID: "",
Speed: &speed,
TxQueueLen: &txQueueLen,
Type: &netType,
},
}
if !reflect.DeepEqual(netClass, nc) {
t.Errorf("Result not correct: want %v, have %v", netClass, nc)
}
}

156
vendor/github.com/prometheus/procfs/sysfs/system_cpu.go generated vendored Normal file
View File

@ -0,0 +1,156 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package sysfs
import (
"os"
"path/filepath"
"strings"
"golang.org/x/sync/errgroup"
"github.com/prometheus/procfs/internal/util"
)
// SystemCPUCpufreqStats contains stats from devices/system/cpu/cpu[0-9]*/cpufreq/...
type SystemCPUCpufreqStats struct {
Name string
CpuinfoCurrentFrequency *uint64
CpuinfoMinimumFrequency *uint64
CpuinfoMaximumFrequency *uint64
CpuinfoTransitionLatency *uint64
ScalingCurrentFrequency *uint64
ScalingMinimumFrequency *uint64
ScalingMaximumFrequency *uint64
AvailableGovernors string
Driver string
Governor string
RelatedCpus string
SetSpeed string
}
// TODO: Add topology support.
// TODO: Add thermal_throttle support.
// NewSystemCpufreq returns CPU frequency metrics for all CPUs.
func NewSystemCpufreq() ([]SystemCPUCpufreqStats, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return []SystemCPUCpufreqStats{}, err
}
return fs.NewSystemCpufreq()
}
// NewSystemCpufreq returns CPU frequency metrics for all CPUs.
func (fs FS) NewSystemCpufreq() ([]SystemCPUCpufreqStats, error) {
var g errgroup.Group
cpus, err := filepath.Glob(fs.Path("devices/system/cpu/cpu[0-9]*"))
if err != nil {
return nil, err
}
systemCpufreq := make([]SystemCPUCpufreqStats, len(cpus))
for i, cpu := range cpus {
cpuName := strings.TrimPrefix(filepath.Base(cpu), "cpu")
cpuCpufreqPath := filepath.Join(cpu, "cpufreq")
if _, err := os.Stat(cpuCpufreqPath); os.IsNotExist(err) {
continue
}
if err != nil {
return nil, err
}
// Execute the parsing of each CPU in parallel.
// This is done because the kernel intentionally delays access to each CPU by
// 50 milliseconds to avoid DDoSing possibly expensive functions.
i := i // https://golang.org/doc/faq#closures_and_goroutines
g.Go(func() error {
cpufreq, err := parseCpufreqCpuinfo(cpuCpufreqPath)
if err == nil {
cpufreq.Name = cpuName
systemCpufreq[i] = *cpufreq
}
return err
})
}
if err = g.Wait(); err != nil {
return nil, err
}
return systemCpufreq, nil
}
func parseCpufreqCpuinfo(cpuPath string) (*SystemCPUCpufreqStats, error) {
uintFiles := []string{
"cpuinfo_cur_freq",
"cpuinfo_max_freq",
"cpuinfo_min_freq",
"cpuinfo_transition_latency",
"scaling_cur_freq",
"scaling_max_freq",
"scaling_min_freq",
}
uintOut := make([]*uint64, len(uintFiles))
for i, f := range uintFiles {
v, err := util.ReadUintFromFile(filepath.Join(cpuPath, f))
if err != nil {
if os.IsNotExist(err) || os.IsPermission(err) {
continue
}
return &SystemCPUCpufreqStats{}, err
}
uintOut[i] = &v
}
stringFiles := []string{
"scaling_available_governors",
"scaling_driver",
"scaling_governor",
"related_cpus",
"scaling_setspeed",
}
stringOut := make([]string, len(stringFiles))
var err error
for i, f := range stringFiles {
stringOut[i], err = util.SysReadFile(filepath.Join(cpuPath, f))
if err != nil {
return &SystemCPUCpufreqStats{}, err
}
}
return &SystemCPUCpufreqStats{
CpuinfoCurrentFrequency: uintOut[0],
CpuinfoMaximumFrequency: uintOut[1],
CpuinfoMinimumFrequency: uintOut[2],
CpuinfoTransitionLatency: uintOut[3],
ScalingCurrentFrequency: uintOut[4],
ScalingMaximumFrequency: uintOut[5],
ScalingMinimumFrequency: uintOut[6],
AvailableGovernors: stringOut[0],
Driver: stringOut[1],
Governor: stringOut[2],
RelatedCpus: stringOut[3],
SetSpeed: stringOut[4],
}, nil
}

View File

@ -0,0 +1,76 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package sysfs
import (
"reflect"
"testing"
)
func makeUint64(v uint64) *uint64 {
return &v
}
func TestNewSystemCpufreq(t *testing.T) {
fs, err := NewFS(sysTestFixtures)
if err != nil {
t.Fatal(err)
}
c, err := fs.NewSystemCpufreq()
if err != nil {
t.Fatal(err)
}
systemCpufreq := []SystemCPUCpufreqStats{
// Has missing `cpuinfo_cur_freq` file.
{
Name: "0",
CpuinfoCurrentFrequency: nil,
CpuinfoMinimumFrequency: makeUint64(800000),
CpuinfoMaximumFrequency: makeUint64(2400000),
CpuinfoTransitionLatency: makeUint64(0),
ScalingCurrentFrequency: makeUint64(1219917),
ScalingMinimumFrequency: makeUint64(800000),
ScalingMaximumFrequency: makeUint64(2400000),
AvailableGovernors: "performance powersave",
Driver: "intel_pstate",
Governor: "powersave",
RelatedCpus: "0",
SetSpeed: "<unsupported>",
},
// Has missing `scaling_cur_freq` file.
{
Name: "1",
CpuinfoCurrentFrequency: makeUint64(1200195),
CpuinfoMinimumFrequency: makeUint64(1200000),
CpuinfoMaximumFrequency: makeUint64(3300000),
CpuinfoTransitionLatency: makeUint64(4294967295),
ScalingCurrentFrequency: nil,
ScalingMinimumFrequency: makeUint64(1200000),
ScalingMaximumFrequency: makeUint64(3300000),
AvailableGovernors: "performance powersave",
Driver: "intel_pstate",
Governor: "powersave",
RelatedCpus: "1",
SetSpeed: "<unsupported>",
},
}
if !reflect.DeepEqual(systemCpufreq, c) {
t.Errorf("Result not correct: want %v, have %v", systemCpufreq, c)
}
}

3
vendor/golang.org/x/sync/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

26
vendor/golang.org/x/sync/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,26 @@
# Contributing to Go
Go is an open source project.
It is the work of hundreds of contributors. We appreciate your help!
## Filing issues
When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions:
1. What version of Go are you using (`go version`)?
2. What operating system and processor architecture are you using?
3. What did you do?
4. What did you expect to see?
5. What did you see instead?
General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
The gophers there will answer or ask you to file an issue if you've tripped over a bug.
## Contributing code
Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
before sending patches.
Unless otherwise noted, the Go source files are distributed under
the BSD-style license found in the LICENSE file.

3
vendor/golang.org/x/sync/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

27
vendor/golang.org/x/sync/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/sync/PATENTS generated vendored Normal file
View File

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

18
vendor/golang.org/x/sync/README.md generated vendored Normal file
View File

@ -0,0 +1,18 @@
# Go Sync
This repository provides Go concurrency primitives in addition to the
ones provided by the language and "sync" and "sync/atomic" packages.
## Download/Install
The easiest way to install is to run `go get -u golang.org/x/sync`. You can
also manually git clone the repository to `$GOPATH/src/golang.org/x/sync`.
## Report Issues / Send Patches
This repository uses Gerrit for code changes. To learn how to submit changes to
this repository, see https://golang.org/doc/contribute.html.
The main issue tracker for the sync repository is located at
https://github.com/golang/go/issues. Prefix your issue with "x/sync:" in the
subject line, so it is easy to find.

1
vendor/golang.org/x/sync/codereview.cfg generated vendored Normal file
View File

@ -0,0 +1 @@
issuerepo: golang/go

66
vendor/golang.org/x/sync/errgroup/errgroup.go generated vendored Normal file
View File

@ -0,0 +1,66 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package errgroup provides synchronization, error propagation, and Context
// cancelation for groups of goroutines working on subtasks of a common task.
package errgroup
import (
"context"
"sync"
)
// A Group is a collection of goroutines working on subtasks that are part of
// the same overall task.
//
// A zero Group is valid and does not cancel on error.
type Group struct {
cancel func()
wg sync.WaitGroup
errOnce sync.Once
err error
}
// WithContext returns a new Group and an associated Context derived from ctx.
//
// The derived Context is canceled the first time a function passed to Go
// returns a non-nil error or the first time Wait returns, whichever occurs
// first.
func WithContext(ctx context.Context) (*Group, context.Context) {
ctx, cancel := context.WithCancel(ctx)
return &Group{cancel: cancel}, ctx
}
// Wait blocks until all function calls from the Go method have returned, then
// returns the first non-nil error (if any) from them.
func (g *Group) Wait() error {
g.wg.Wait()
if g.cancel != nil {
g.cancel()
}
return g.err
}
// Go calls the given function in a new goroutine.
//
// The first call to return a non-nil error cancels the group; its error will be
// returned by Wait.
func (g *Group) Go(f func() error) {
g.wg.Add(1)
go func() {
defer g.wg.Done()
if err := f(); err != nil {
g.errOnce.Do(func() {
g.err = err
if g.cancel != nil {
g.cancel()
}
})
}
}()
}

View File

@ -0,0 +1,101 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package errgroup_test
import (
"context"
"crypto/md5"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"golang.org/x/sync/errgroup"
)
// Pipeline demonstrates the use of a Group to implement a multi-stage
// pipeline: a version of the MD5All function with bounded parallelism from
// https://blog.golang.org/pipelines.
func ExampleGroup_pipeline() {
m, err := MD5All(context.Background(), ".")
if err != nil {
log.Fatal(err)
}
for k, sum := range m {
fmt.Printf("%s:\t%x\n", k, sum)
}
}
type result struct {
path string
sum [md5.Size]byte
}
// MD5All reads all the files in the file tree rooted at root and returns a map
// from file path to the MD5 sum of the file's contents. If the directory walk
// fails or any read operation fails, MD5All returns an error.
func MD5All(ctx context.Context, root string) (map[string][md5.Size]byte, error) {
// ctx is canceled when g.Wait() returns. When this version of MD5All returns
// - even in case of error! - we know that all of the goroutines have finished
// and the memory they were using can be garbage-collected.
g, ctx := errgroup.WithContext(ctx)
paths := make(chan string)
g.Go(func() error {
defer close(paths)
return filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.Mode().IsRegular() {
return nil
}
select {
case paths <- path:
case <-ctx.Done():
return ctx.Err()
}
return nil
})
})
// Start a fixed number of goroutines to read and digest files.
c := make(chan result)
const numDigesters = 20
for i := 0; i < numDigesters; i++ {
g.Go(func() error {
for path := range paths {
data, err := ioutil.ReadFile(path)
if err != nil {
return err
}
select {
case c <- result{path, md5.Sum(data)}:
case <-ctx.Done():
return ctx.Err()
}
}
return nil
})
}
go func() {
g.Wait()
close(c)
}()
m := make(map[string][md5.Size]byte)
for r := range c {
m[r.path] = r.sum
}
// Check whether any of the goroutines failed. Since g is accumulating the
// errors, we don't need to send them (or check for them) in the individual
// results sent on the channel.
if err := g.Wait(); err != nil {
return nil, err
}
return m, nil
}

176
vendor/golang.org/x/sync/errgroup/errgroup_test.go generated vendored Normal file
View File

@ -0,0 +1,176 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package errgroup_test
import (
"context"
"errors"
"fmt"
"net/http"
"os"
"testing"
"golang.org/x/sync/errgroup"
)
var (
Web = fakeSearch("web")
Image = fakeSearch("image")
Video = fakeSearch("video")
)
type Result string
type Search func(ctx context.Context, query string) (Result, error)
func fakeSearch(kind string) Search {
return func(_ context.Context, query string) (Result, error) {
return Result(fmt.Sprintf("%s result for %q", kind, query)), nil
}
}
// JustErrors illustrates the use of a Group in place of a sync.WaitGroup to
// simplify goroutine counting and error handling. This example is derived from
// the sync.WaitGroup example at https://golang.org/pkg/sync/#example_WaitGroup.
func ExampleGroup_justErrors() {
var g errgroup.Group
var urls = []string{
"http://www.golang.org/",
"http://www.google.com/",
"http://www.somestupidname.com/",
}
for _, url := range urls {
// Launch a goroutine to fetch the URL.
url := url // https://golang.org/doc/faq#closures_and_goroutines
g.Go(func() error {
// Fetch the URL.
resp, err := http.Get(url)
if err == nil {
resp.Body.Close()
}
return err
})
}
// Wait for all HTTP fetches to complete.
if err := g.Wait(); err == nil {
fmt.Println("Successfully fetched all URLs.")
}
}
// Parallel illustrates the use of a Group for synchronizing a simple parallel
// task: the "Google Search 2.0" function from
// https://talks.golang.org/2012/concurrency.slide#46, augmented with a Context
// and error-handling.
func ExampleGroup_parallel() {
Google := func(ctx context.Context, query string) ([]Result, error) {
g, ctx := errgroup.WithContext(ctx)
searches := []Search{Web, Image, Video}
results := make([]Result, len(searches))
for i, search := range searches {
i, search := i, search // https://golang.org/doc/faq#closures_and_goroutines
g.Go(func() error {
result, err := search(ctx, query)
if err == nil {
results[i] = result
}
return err
})
}
if err := g.Wait(); err != nil {
return nil, err
}
return results, nil
}
results, err := Google(context.Background(), "golang")
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
for _, result := range results {
fmt.Println(result)
}
// Output:
// web result for "golang"
// image result for "golang"
// video result for "golang"
}
func TestZeroGroup(t *testing.T) {
err1 := errors.New("errgroup_test: 1")
err2 := errors.New("errgroup_test: 2")
cases := []struct {
errs []error
}{
{errs: []error{}},
{errs: []error{nil}},
{errs: []error{err1}},
{errs: []error{err1, nil}},
{errs: []error{err1, nil, err2}},
}
for _, tc := range cases {
var g errgroup.Group
var firstErr error
for i, err := range tc.errs {
err := err
g.Go(func() error { return err })
if firstErr == nil && err != nil {
firstErr = err
}
if gErr := g.Wait(); gErr != firstErr {
t.Errorf("after %T.Go(func() error { return err }) for err in %v\n"+
"g.Wait() = %v; want %v",
g, tc.errs[:i+1], err, firstErr)
}
}
}
}
func TestWithContext(t *testing.T) {
errDoom := errors.New("group_test: doomed")
cases := []struct {
errs []error
want error
}{
{want: nil},
{errs: []error{nil}, want: nil},
{errs: []error{errDoom}, want: errDoom},
{errs: []error{errDoom, nil}, want: errDoom},
}
for _, tc := range cases {
g, ctx := errgroup.WithContext(context.Background())
for _, err := range tc.errs {
err := err
g.Go(func() error { return err })
}
if err := g.Wait(); err != tc.want {
t.Errorf("after %T.Go(func() error { return err }) for err in %v\n"+
"g.Wait() = %v; want %v",
g, tc.errs, err, tc.want)
}
canceled := false
select {
case <-ctx.Done():
canceled = true
default:
}
if !canceled {
t.Errorf("after %T.Go(func() error { return err }) for err in %v\n"+
"ctx.Done() was not closed",
g, tc.errs)
}
}
}

1
vendor/golang.org/x/sync/go.mod generated vendored Normal file
View File

@ -0,0 +1 @@
module golang.org/x/sync

127
vendor/golang.org/x/sync/semaphore/semaphore.go generated vendored Normal file
View File

@ -0,0 +1,127 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package semaphore provides a weighted semaphore implementation.
package semaphore // import "golang.org/x/sync/semaphore"
import (
"container/list"
"context"
"sync"
)
type waiter struct {
n int64
ready chan<- struct{} // Closed when semaphore acquired.
}
// NewWeighted creates a new weighted semaphore with the given
// maximum combined weight for concurrent access.
func NewWeighted(n int64) *Weighted {
w := &Weighted{size: n}
return w
}
// Weighted provides a way to bound concurrent access to a resource.
// The callers can request access with a given weight.
type Weighted struct {
size int64
cur int64
mu sync.Mutex
waiters list.List
}
// Acquire acquires the semaphore with a weight of n, blocking until resources
// are available or ctx is done. On success, returns nil. On failure, returns
// ctx.Err() and leaves the semaphore unchanged.
//
// If ctx is already done, Acquire may still succeed without blocking.
func (s *Weighted) Acquire(ctx context.Context, n int64) error {
s.mu.Lock()
if s.size-s.cur >= n && s.waiters.Len() == 0 {
s.cur += n
s.mu.Unlock()
return nil
}
if n > s.size {
// Don't make other Acquire calls block on one that's doomed to fail.
s.mu.Unlock()
<-ctx.Done()
return ctx.Err()
}
ready := make(chan struct{})
w := waiter{n: n, ready: ready}
elem := s.waiters.PushBack(w)
s.mu.Unlock()
select {
case <-ctx.Done():
err := ctx.Err()
s.mu.Lock()
select {
case <-ready:
// Acquired the semaphore after we were canceled. Rather than trying to
// fix up the queue, just pretend we didn't notice the cancelation.
err = nil
default:
s.waiters.Remove(elem)
}
s.mu.Unlock()
return err
case <-ready:
return nil
}
}
// TryAcquire acquires the semaphore with a weight of n without blocking.
// On success, returns true. On failure, returns false and leaves the semaphore unchanged.
func (s *Weighted) TryAcquire(n int64) bool {
s.mu.Lock()
success := s.size-s.cur >= n && s.waiters.Len() == 0
if success {
s.cur += n
}
s.mu.Unlock()
return success
}
// Release releases the semaphore with a weight of n.
func (s *Weighted) Release(n int64) {
s.mu.Lock()
s.cur -= n
if s.cur < 0 {
s.mu.Unlock()
panic("semaphore: bad release")
}
for {
next := s.waiters.Front()
if next == nil {
break // No more waiters blocked.
}
w := next.Value.(waiter)
if s.size-s.cur < w.n {
// Not enough tokens for the next waiter. We could keep going (to try to
// find a waiter with a smaller request), but under load that could cause
// starvation for large requests; instead, we leave all remaining waiters
// blocked.
//
// Consider a semaphore used as a read-write lock, with N tokens, N
// readers, and one writer. Each reader can Acquire(1) to obtain a read
// lock. The writer can Acquire(N) to obtain a write lock, excluding all
// of the readers. If we allow the readers to jump ahead in the queue,
// the writer will starve — there is always one token available for every
// reader.
break
}
s.cur += w.n
s.waiters.Remove(next)
close(w.ready)
}
s.mu.Unlock()
}

View File

@ -0,0 +1,131 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7
package semaphore_test
import (
"context"
"fmt"
"testing"
"golang.org/x/sync/semaphore"
)
// weighted is an interface matching a subset of *Weighted. It allows
// alternate implementations for testing and benchmarking.
type weighted interface {
Acquire(context.Context, int64) error
TryAcquire(int64) bool
Release(int64)
}
// semChan implements Weighted using a channel for
// comparing against the condition variable-based implementation.
type semChan chan struct{}
func newSemChan(n int64) semChan {
return semChan(make(chan struct{}, n))
}
func (s semChan) Acquire(_ context.Context, n int64) error {
for i := int64(0); i < n; i++ {
s <- struct{}{}
}
return nil
}
func (s semChan) TryAcquire(n int64) bool {
if int64(len(s))+n > int64(cap(s)) {
return false
}
for i := int64(0); i < n; i++ {
s <- struct{}{}
}
return true
}
func (s semChan) Release(n int64) {
for i := int64(0); i < n; i++ {
<-s
}
}
// acquireN calls Acquire(size) on sem N times and then calls Release(size) N times.
func acquireN(b *testing.B, sem weighted, size int64, N int) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
for j := 0; j < N; j++ {
sem.Acquire(context.Background(), size)
}
for j := 0; j < N; j++ {
sem.Release(size)
}
}
}
// tryAcquireN calls TryAcquire(size) on sem N times and then calls Release(size) N times.
func tryAcquireN(b *testing.B, sem weighted, size int64, N int) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
for j := 0; j < N; j++ {
if !sem.TryAcquire(size) {
b.Fatalf("TryAcquire(%v) = false, want true", size)
}
}
for j := 0; j < N; j++ {
sem.Release(size)
}
}
}
func BenchmarkNewSeq(b *testing.B) {
for _, cap := range []int64{1, 128} {
b.Run(fmt.Sprintf("Weighted-%d", cap), func(b *testing.B) {
for i := 0; i < b.N; i++ {
_ = semaphore.NewWeighted(cap)
}
})
b.Run(fmt.Sprintf("semChan-%d", cap), func(b *testing.B) {
for i := 0; i < b.N; i++ {
_ = newSemChan(cap)
}
})
}
}
func BenchmarkAcquireSeq(b *testing.B) {
for _, c := range []struct {
cap, size int64
N int
}{
{1, 1, 1},
{2, 1, 1},
{16, 1, 1},
{128, 1, 1},
{2, 2, 1},
{16, 2, 8},
{128, 2, 64},
{2, 1, 2},
{16, 8, 2},
{128, 64, 2},
} {
for _, w := range []struct {
name string
w weighted
}{
{"Weighted", semaphore.NewWeighted(c.cap)},
{"semChan", newSemChan(c.cap)},
} {
b.Run(fmt.Sprintf("%s-acquire-%d-%d-%d", w.name, c.cap, c.size, c.N), func(b *testing.B) {
acquireN(b, w.w, c.size, c.N)
})
b.Run(fmt.Sprintf("%s-tryAcquire-%d-%d-%d", w.name, c.cap, c.size, c.N), func(b *testing.B) {
tryAcquireN(b, w.w, c.size, c.N)
})
}
}
}

View File

@ -0,0 +1,84 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package semaphore_test
import (
"context"
"fmt"
"log"
"runtime"
"golang.org/x/sync/semaphore"
)
// Example_workerPool demonstrates how to use a semaphore to limit the number of
// goroutines working on parallel tasks.
//
// This use of a semaphore mimics a typical “worker pool” pattern, but without
// the need to explicitly shut down idle workers when the work is done.
func Example_workerPool() {
ctx := context.TODO()
var (
maxWorkers = runtime.GOMAXPROCS(0)
sem = semaphore.NewWeighted(int64(maxWorkers))
out = make([]int, 32)
)
// Compute the output using up to maxWorkers goroutines at a time.
for i := range out {
// When maxWorkers goroutines are in flight, Acquire blocks until one of the
// workers finishes.
if err := sem.Acquire(ctx, 1); err != nil {
log.Printf("Failed to acquire semaphore: %v", err)
break
}
go func(i int) {
defer sem.Release(1)
out[i] = collatzSteps(i + 1)
}(i)
}
// Acquire all of the tokens to wait for any remaining workers to finish.
//
// If you are already waiting for the workers by some other means (such as an
// errgroup.Group), you can omit this final Acquire call.
if err := sem.Acquire(ctx, int64(maxWorkers)); err != nil {
log.Printf("Failed to acquire semaphore: %v", err)
}
fmt.Println(out)
// Output:
// [0 1 7 2 5 8 16 3 19 6 14 9 9 17 17 4 12 20 20 7 7 15 15 10 23 10 111 18 18 18 106 5]
}
// collatzSteps computes the number of steps to reach 1 under the Collatz
// conjecture. (See https://en.wikipedia.org/wiki/Collatz_conjecture.)
func collatzSteps(n int) (steps int) {
if n <= 0 {
panic("nonpositive input")
}
for ; n > 1; steps++ {
if steps < 0 {
panic("too many steps")
}
if n%2 == 0 {
n /= 2
continue
}
const maxInt = int(^uint(0) >> 1)
if n > (maxInt-1)/3 {
panic("overflow")
}
n = 3*n + 1
}
return steps
}

171
vendor/golang.org/x/sync/semaphore/semaphore_test.go generated vendored Normal file
View File

@ -0,0 +1,171 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package semaphore_test
import (
"context"
"math/rand"
"runtime"
"sync"
"testing"
"time"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
)
const maxSleep = 1 * time.Millisecond
func HammerWeighted(sem *semaphore.Weighted, n int64, loops int) {
for i := 0; i < loops; i++ {
sem.Acquire(context.Background(), n)
time.Sleep(time.Duration(rand.Int63n(int64(maxSleep/time.Nanosecond))) * time.Nanosecond)
sem.Release(n)
}
}
func TestWeighted(t *testing.T) {
t.Parallel()
n := runtime.GOMAXPROCS(0)
loops := 10000 / n
sem := semaphore.NewWeighted(int64(n))
var wg sync.WaitGroup
wg.Add(n)
for i := 0; i < n; i++ {
i := i
go func() {
defer wg.Done()
HammerWeighted(sem, int64(i), loops)
}()
}
wg.Wait()
}
func TestWeightedPanic(t *testing.T) {
t.Parallel()
defer func() {
if recover() == nil {
t.Fatal("release of an unacquired weighted semaphore did not panic")
}
}()
w := semaphore.NewWeighted(1)
w.Release(1)
}
func TestWeightedTryAcquire(t *testing.T) {
t.Parallel()
ctx := context.Background()
sem := semaphore.NewWeighted(2)
tries := []bool{}
sem.Acquire(ctx, 1)
tries = append(tries, sem.TryAcquire(1))
tries = append(tries, sem.TryAcquire(1))
sem.Release(2)
tries = append(tries, sem.TryAcquire(1))
sem.Acquire(ctx, 1)
tries = append(tries, sem.TryAcquire(1))
want := []bool{true, false, true, false}
for i := range tries {
if tries[i] != want[i] {
t.Errorf("tries[%d]: got %t, want %t", i, tries[i], want[i])
}
}
}
func TestWeightedAcquire(t *testing.T) {
t.Parallel()
ctx := context.Background()
sem := semaphore.NewWeighted(2)
tryAcquire := func(n int64) bool {
ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond)
defer cancel()
return sem.Acquire(ctx, n) == nil
}
tries := []bool{}
sem.Acquire(ctx, 1)
tries = append(tries, tryAcquire(1))
tries = append(tries, tryAcquire(1))
sem.Release(2)
tries = append(tries, tryAcquire(1))
sem.Acquire(ctx, 1)
tries = append(tries, tryAcquire(1))
want := []bool{true, false, true, false}
for i := range tries {
if tries[i] != want[i] {
t.Errorf("tries[%d]: got %t, want %t", i, tries[i], want[i])
}
}
}
func TestWeightedDoesntBlockIfTooBig(t *testing.T) {
t.Parallel()
const n = 2
sem := semaphore.NewWeighted(n)
{
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go sem.Acquire(ctx, n+1)
}
g, ctx := errgroup.WithContext(context.Background())
for i := n * 3; i > 0; i-- {
g.Go(func() error {
err := sem.Acquire(ctx, 1)
if err == nil {
time.Sleep(1 * time.Millisecond)
sem.Release(1)
}
return err
})
}
if err := g.Wait(); err != nil {
t.Errorf("semaphore.NewWeighted(%v) failed to AcquireCtx(_, 1) with AcquireCtx(_, %v) pending", n, n+1)
}
}
// TestLargeAcquireDoesntStarve times out if a large call to Acquire starves.
// Merely returning from the test function indicates success.
func TestLargeAcquireDoesntStarve(t *testing.T) {
t.Parallel()
ctx := context.Background()
n := int64(runtime.GOMAXPROCS(0))
sem := semaphore.NewWeighted(n)
running := true
var wg sync.WaitGroup
wg.Add(int(n))
for i := n; i > 0; i-- {
sem.Acquire(ctx, 1)
go func() {
defer func() {
sem.Release(1)
wg.Done()
}()
for running {
time.Sleep(1 * time.Millisecond)
sem.Release(1)
sem.Acquire(ctx, 1)
}
}()
}
sem.Acquire(ctx, n)
running = false
sem.Release(n)
wg.Wait()
}

111
vendor/golang.org/x/sync/singleflight/singleflight.go generated vendored Normal file
View File

@ -0,0 +1,111 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package singleflight provides a duplicate function call suppression
// mechanism.
package singleflight // import "golang.org/x/sync/singleflight"
import "sync"
// call is an in-flight or completed singleflight.Do call
type call struct {
wg sync.WaitGroup
// These fields are written once before the WaitGroup is done
// and are only read after the WaitGroup is done.
val interface{}
err error
// These fields are read and written with the singleflight
// mutex held before the WaitGroup is done, and are read but
// not written after the WaitGroup is done.
dups int
chans []chan<- Result
}
// Group represents a class of work and forms a namespace in
// which units of work can be executed with duplicate suppression.
type Group struct {
mu sync.Mutex // protects m
m map[string]*call // lazily initialized
}
// Result holds the results of Do, so they can be passed
// on a channel.
type Result struct {
Val interface{}
Err error
Shared bool
}
// Do executes and returns the results of the given function, making
// sure that only one execution is in-flight for a given key at a
// time. If a duplicate comes in, the duplicate caller waits for the
// original to complete and receives the same results.
// The return value shared indicates whether v was given to multiple callers.
func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) {
g.mu.Lock()
if g.m == nil {
g.m = make(map[string]*call)
}
if c, ok := g.m[key]; ok {
c.dups++
g.mu.Unlock()
c.wg.Wait()
return c.val, c.err, true
}
c := new(call)
c.wg.Add(1)
g.m[key] = c
g.mu.Unlock()
g.doCall(c, key, fn)
return c.val, c.err, c.dups > 0
}
// DoChan is like Do but returns a channel that will receive the
// results when they are ready.
func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result {
ch := make(chan Result, 1)
g.mu.Lock()
if g.m == nil {
g.m = make(map[string]*call)
}
if c, ok := g.m[key]; ok {
c.dups++
c.chans = append(c.chans, ch)
g.mu.Unlock()
return ch
}
c := &call{chans: []chan<- Result{ch}}
c.wg.Add(1)
g.m[key] = c
g.mu.Unlock()
go g.doCall(c, key, fn)
return ch
}
// doCall handles the single call for a key.
func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) {
c.val, c.err = fn()
c.wg.Done()
g.mu.Lock()
delete(g.m, key)
for _, ch := range c.chans {
ch <- Result{c.val, c.err, c.dups > 0}
}
g.mu.Unlock()
}
// Forget tells the singleflight to forget about a key. Future calls
// to Do for this key will call the function rather than waiting for
// an earlier call to complete.
func (g *Group) Forget(key string) {
g.mu.Lock()
delete(g.m, key)
g.mu.Unlock()
}

View File

@ -0,0 +1,87 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package singleflight
import (
"errors"
"fmt"
"sync"
"sync/atomic"
"testing"
"time"
)
func TestDo(t *testing.T) {
var g Group
v, err, _ := g.Do("key", func() (interface{}, error) {
return "bar", nil
})
if got, want := fmt.Sprintf("%v (%T)", v, v), "bar (string)"; got != want {
t.Errorf("Do = %v; want %v", got, want)
}
if err != nil {
t.Errorf("Do error = %v", err)
}
}
func TestDoErr(t *testing.T) {
var g Group
someErr := errors.New("Some error")
v, err, _ := g.Do("key", func() (interface{}, error) {
return nil, someErr
})
if err != someErr {
t.Errorf("Do error = %v; want someErr %v", err, someErr)
}
if v != nil {
t.Errorf("unexpected non-nil value %#v", v)
}
}
func TestDoDupSuppress(t *testing.T) {
var g Group
var wg1, wg2 sync.WaitGroup
c := make(chan string, 1)
var calls int32
fn := func() (interface{}, error) {
if atomic.AddInt32(&calls, 1) == 1 {
// First invocation.
wg1.Done()
}
v := <-c
c <- v // pump; make available for any future calls
time.Sleep(10 * time.Millisecond) // let more goroutines enter Do
return v, nil
}
const n = 10
wg1.Add(1)
for i := 0; i < n; i++ {
wg1.Add(1)
wg2.Add(1)
go func() {
defer wg2.Done()
wg1.Done()
v, err, _ := g.Do("key", fn)
if err != nil {
t.Errorf("Do error: %v", err)
return
}
if s, _ := v.(string); s != "bar" {
t.Errorf("Do = %T %v; want %q", v, v, "bar")
}
}()
}
wg1.Wait()
// At least one goroutine is in fn now and all of them have at
// least reached the line before the Do.
c <- "bar"
wg2.Wait()
if got := atomic.LoadInt32(&calls); got <= 0 || got >= n {
t.Errorf("number of calls = %d; want over 0 and less than %d", got, n)
}
}

372
vendor/golang.org/x/sync/syncmap/map.go generated vendored Normal file
View File

@ -0,0 +1,372 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package syncmap provides a concurrent map implementation.
// It is a prototype for a proposed addition to the sync package
// in the standard library.
// (https://golang.org/issue/18177)
package syncmap
import (
"sync"
"sync/atomic"
"unsafe"
)
// Map is a concurrent map with amortized-constant-time loads, stores, and deletes.
// It is safe for multiple goroutines to call a Map's methods concurrently.
//
// The zero Map is valid and empty.
//
// A Map must not be copied after first use.
type Map struct {
mu sync.Mutex
// read contains the portion of the map's contents that are safe for
// concurrent access (with or without mu held).
//
// The read field itself is always safe to load, but must only be stored with
// mu held.
//
// Entries stored in read may be updated concurrently without mu, but updating
// a previously-expunged entry requires that the entry be copied to the dirty
// map and unexpunged with mu held.
read atomic.Value // readOnly
// dirty contains the portion of the map's contents that require mu to be
// held. To ensure that the dirty map can be promoted to the read map quickly,
// it also includes all of the non-expunged entries in the read map.
//
// Expunged entries are not stored in the dirty map. An expunged entry in the
// clean map must be unexpunged and added to the dirty map before a new value
// can be stored to it.
//
// If the dirty map is nil, the next write to the map will initialize it by
// making a shallow copy of the clean map, omitting stale entries.
dirty map[interface{}]*entry
// misses counts the number of loads since the read map was last updated that
// needed to lock mu to determine whether the key was present.
//
// Once enough misses have occurred to cover the cost of copying the dirty
// map, the dirty map will be promoted to the read map (in the unamended
// state) and the next store to the map will make a new dirty copy.
misses int
}
// readOnly is an immutable struct stored atomically in the Map.read field.
type readOnly struct {
m map[interface{}]*entry
amended bool // true if the dirty map contains some key not in m.
}
// expunged is an arbitrary pointer that marks entries which have been deleted
// from the dirty map.
var expunged = unsafe.Pointer(new(interface{}))
// An entry is a slot in the map corresponding to a particular key.
type entry struct {
// p points to the interface{} value stored for the entry.
//
// If p == nil, the entry has been deleted and m.dirty == nil.
//
// If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
// is missing from m.dirty.
//
// Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
// != nil, in m.dirty[key].
//
// An entry can be deleted by atomic replacement with nil: when m.dirty is
// next created, it will atomically replace nil with expunged and leave
// m.dirty[key] unset.
//
// An entry's associated value can be updated by atomic replacement, provided
// p != expunged. If p == expunged, an entry's associated value can be updated
// only after first setting m.dirty[key] = e so that lookups using the dirty
// map find the entry.
p unsafe.Pointer // *interface{}
}
func newEntry(i interface{}) *entry {
return &entry{p: unsafe.Pointer(&i)}
}
// Load returns the value stored in the map for a key, or nil if no
// value is present.
// The ok result indicates whether value was found in the map.
func (m *Map) Load(key interface{}) (value interface{}, ok bool) {
read, _ := m.read.Load().(readOnly)
e, ok := read.m[key]
if !ok && read.amended {
m.mu.Lock()
// Avoid reporting a spurious miss if m.dirty got promoted while we were
// blocked on m.mu. (If further loads of the same key will not miss, it's
// not worth copying the dirty map for this key.)
read, _ = m.read.Load().(readOnly)
e, ok = read.m[key]
if !ok && read.amended {
e, ok = m.dirty[key]
// Regardless of whether the entry was present, record a miss: this key
// will take the slow path until the dirty map is promoted to the read
// map.
m.missLocked()
}
m.mu.Unlock()
}
if !ok {
return nil, false
}
return e.load()
}
func (e *entry) load() (value interface{}, ok bool) {
p := atomic.LoadPointer(&e.p)
if p == nil || p == expunged {
return nil, false
}
return *(*interface{})(p), true
}
// Store sets the value for a key.
func (m *Map) Store(key, value interface{}) {
read, _ := m.read.Load().(readOnly)
if e, ok := read.m[key]; ok && e.tryStore(&value) {
return
}
m.mu.Lock()
read, _ = m.read.Load().(readOnly)
if e, ok := read.m[key]; ok {
if e.unexpungeLocked() {
// The entry was previously expunged, which implies that there is a
// non-nil dirty map and this entry is not in it.
m.dirty[key] = e
}
e.storeLocked(&value)
} else if e, ok := m.dirty[key]; ok {
e.storeLocked(&value)
} else {
if !read.amended {
// We're adding the first new key to the dirty map.
// Make sure it is allocated and mark the read-only map as incomplete.
m.dirtyLocked()
m.read.Store(readOnly{m: read.m, amended: true})
}
m.dirty[key] = newEntry(value)
}
m.mu.Unlock()
}
// tryStore stores a value if the entry has not been expunged.
//
// If the entry is expunged, tryStore returns false and leaves the entry
// unchanged.
func (e *entry) tryStore(i *interface{}) bool {
p := atomic.LoadPointer(&e.p)
if p == expunged {
return false
}
for {
if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) {
return true
}
p = atomic.LoadPointer(&e.p)
if p == expunged {
return false
}
}
}
// unexpungeLocked ensures that the entry is not marked as expunged.
//
// If the entry was previously expunged, it must be added to the dirty map
// before m.mu is unlocked.
func (e *entry) unexpungeLocked() (wasExpunged bool) {
return atomic.CompareAndSwapPointer(&e.p, expunged, nil)
}
// storeLocked unconditionally stores a value to the entry.
//
// The entry must be known not to be expunged.
func (e *entry) storeLocked(i *interface{}) {
atomic.StorePointer(&e.p, unsafe.Pointer(i))
}
// LoadOrStore returns the existing value for the key if present.
// Otherwise, it stores and returns the given value.
// The loaded result is true if the value was loaded, false if stored.
func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) {
// Avoid locking if it's a clean hit.
read, _ := m.read.Load().(readOnly)
if e, ok := read.m[key]; ok {
actual, loaded, ok := e.tryLoadOrStore(value)
if ok {
return actual, loaded
}
}
m.mu.Lock()
read, _ = m.read.Load().(readOnly)
if e, ok := read.m[key]; ok {
if e.unexpungeLocked() {
m.dirty[key] = e
}
actual, loaded, _ = e.tryLoadOrStore(value)
} else if e, ok := m.dirty[key]; ok {
actual, loaded, _ = e.tryLoadOrStore(value)
m.missLocked()
} else {
if !read.amended {
// We're adding the first new key to the dirty map.
// Make sure it is allocated and mark the read-only map as incomplete.
m.dirtyLocked()
m.read.Store(readOnly{m: read.m, amended: true})
}
m.dirty[key] = newEntry(value)
actual, loaded = value, false
}
m.mu.Unlock()
return actual, loaded
}
// tryLoadOrStore atomically loads or stores a value if the entry is not
// expunged.
//
// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
// returns with ok==false.
func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bool) {
p := atomic.LoadPointer(&e.p)
if p == expunged {
return nil, false, false
}
if p != nil {
return *(*interface{})(p), true, true
}
// Copy the interface after the first load to make this method more amenable
// to escape analysis: if we hit the "load" path or the entry is expunged, we
// shouldn't bother heap-allocating.
ic := i
for {
if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) {
return i, false, true
}
p = atomic.LoadPointer(&e.p)
if p == expunged {
return nil, false, false
}
if p != nil {
return *(*interface{})(p), true, true
}
}
}
// Delete deletes the value for a key.
func (m *Map) Delete(key interface{}) {
read, _ := m.read.Load().(readOnly)
e, ok := read.m[key]
if !ok && read.amended {
m.mu.Lock()
read, _ = m.read.Load().(readOnly)
e, ok = read.m[key]
if !ok && read.amended {
delete(m.dirty, key)
}
m.mu.Unlock()
}
if ok {
e.delete()
}
}
func (e *entry) delete() (hadValue bool) {
for {
p := atomic.LoadPointer(&e.p)
if p == nil || p == expunged {
return false
}
if atomic.CompareAndSwapPointer(&e.p, p, nil) {
return true
}
}
}
// Range calls f sequentially for each key and value present in the map.
// If f returns false, range stops the iteration.
//
// Range does not necessarily correspond to any consistent snapshot of the Map's
// contents: no key will be visited more than once, but if the value for any key
// is stored or deleted concurrently, Range may reflect any mapping for that key
// from any point during the Range call.
//
// Range may be O(N) with the number of elements in the map even if f returns
// false after a constant number of calls.
func (m *Map) Range(f func(key, value interface{}) bool) {
// We need to be able to iterate over all of the keys that were already
// present at the start of the call to Range.
// If read.amended is false, then read.m satisfies that property without
// requiring us to hold m.mu for a long time.
read, _ := m.read.Load().(readOnly)
if read.amended {
// m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
// (assuming the caller does not break out early), so a call to Range
// amortizes an entire copy of the map: we can promote the dirty copy
// immediately!
m.mu.Lock()
read, _ = m.read.Load().(readOnly)
if read.amended {
read = readOnly{m: m.dirty}
m.read.Store(read)
m.dirty = nil
m.misses = 0
}
m.mu.Unlock()
}
for k, e := range read.m {
v, ok := e.load()
if !ok {
continue
}
if !f(k, v) {
break
}
}
}
func (m *Map) missLocked() {
m.misses++
if m.misses < len(m.dirty) {
return
}
m.read.Store(readOnly{m: m.dirty})
m.dirty = nil
m.misses = 0
}
func (m *Map) dirtyLocked() {
if m.dirty != nil {
return
}
read, _ := m.read.Load().(readOnly)
m.dirty = make(map[interface{}]*entry, len(read.m))
for k, e := range read.m {
if !e.tryExpungeLocked() {
m.dirty[k] = e
}
}
}
func (e *entry) tryExpungeLocked() (isExpunged bool) {
p := atomic.LoadPointer(&e.p)
for p == nil {
if atomic.CompareAndSwapPointer(&e.p, nil, expunged) {
return true
}
p = atomic.LoadPointer(&e.p)
}
return p == expunged
}

216
vendor/golang.org/x/sync/syncmap/map_bench_test.go generated vendored Normal file
View File

@ -0,0 +1,216 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package syncmap_test
import (
"fmt"
"reflect"
"sync/atomic"
"testing"
"golang.org/x/sync/syncmap"
)
type bench struct {
setup func(*testing.B, mapInterface)
perG func(b *testing.B, pb *testing.PB, i int, m mapInterface)
}
func benchMap(b *testing.B, bench bench) {
for _, m := range [...]mapInterface{&DeepCopyMap{}, &RWMutexMap{}, &syncmap.Map{}} {
b.Run(fmt.Sprintf("%T", m), func(b *testing.B) {
m = reflect.New(reflect.TypeOf(m).Elem()).Interface().(mapInterface)
if bench.setup != nil {
bench.setup(b, m)
}
b.ResetTimer()
var i int64
b.RunParallel(func(pb *testing.PB) {
id := int(atomic.AddInt64(&i, 1) - 1)
bench.perG(b, pb, id*b.N, m)
})
})
}
}
func BenchmarkLoadMostlyHits(b *testing.B) {
const hits, misses = 1023, 1
benchMap(b, bench{
setup: func(_ *testing.B, m mapInterface) {
for i := 0; i < hits; i++ {
m.LoadOrStore(i, i)
}
// Prime the map to get it into a steady state.
for i := 0; i < hits*2; i++ {
m.Load(i % hits)
}
},
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
for ; pb.Next(); i++ {
m.Load(i % (hits + misses))
}
},
})
}
func BenchmarkLoadMostlyMisses(b *testing.B) {
const hits, misses = 1, 1023
benchMap(b, bench{
setup: func(_ *testing.B, m mapInterface) {
for i := 0; i < hits; i++ {
m.LoadOrStore(i, i)
}
// Prime the map to get it into a steady state.
for i := 0; i < hits*2; i++ {
m.Load(i % hits)
}
},
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
for ; pb.Next(); i++ {
m.Load(i % (hits + misses))
}
},
})
}
func BenchmarkLoadOrStoreBalanced(b *testing.B) {
const hits, misses = 128, 128
benchMap(b, bench{
setup: func(b *testing.B, m mapInterface) {
if _, ok := m.(*DeepCopyMap); ok {
b.Skip("DeepCopyMap has quadratic running time.")
}
for i := 0; i < hits; i++ {
m.LoadOrStore(i, i)
}
// Prime the map to get it into a steady state.
for i := 0; i < hits*2; i++ {
m.Load(i % hits)
}
},
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
for ; pb.Next(); i++ {
j := i % (hits + misses)
if j < hits {
if _, ok := m.LoadOrStore(j, i); !ok {
b.Fatalf("unexpected miss for %v", j)
}
} else {
if v, loaded := m.LoadOrStore(i, i); loaded {
b.Fatalf("failed to store %v: existing value %v", i, v)
}
}
}
},
})
}
func BenchmarkLoadOrStoreUnique(b *testing.B) {
benchMap(b, bench{
setup: func(b *testing.B, m mapInterface) {
if _, ok := m.(*DeepCopyMap); ok {
b.Skip("DeepCopyMap has quadratic running time.")
}
},
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
for ; pb.Next(); i++ {
m.LoadOrStore(i, i)
}
},
})
}
func BenchmarkLoadOrStoreCollision(b *testing.B) {
benchMap(b, bench{
setup: func(_ *testing.B, m mapInterface) {
m.LoadOrStore(0, 0)
},
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
for ; pb.Next(); i++ {
m.LoadOrStore(0, 0)
}
},
})
}
func BenchmarkRange(b *testing.B) {
const mapSize = 1 << 10
benchMap(b, bench{
setup: func(_ *testing.B, m mapInterface) {
for i := 0; i < mapSize; i++ {
m.Store(i, i)
}
},
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
for ; pb.Next(); i++ {
m.Range(func(_, _ interface{}) bool { return true })
}
},
})
}
// BenchmarkAdversarialAlloc tests performance when we store a new value
// immediately whenever the map is promoted to clean and otherwise load a
// unique, missing key.
//
// This forces the Load calls to always acquire the map's mutex.
func BenchmarkAdversarialAlloc(b *testing.B) {
benchMap(b, bench{
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
var stores, loadsSinceStore int64
for ; pb.Next(); i++ {
m.Load(i)
if loadsSinceStore++; loadsSinceStore > stores {
m.LoadOrStore(i, stores)
loadsSinceStore = 0
stores++
}
}
},
})
}
// BenchmarkAdversarialDelete tests performance when we periodically delete
// one key and add a different one in a large map.
//
// This forces the Load calls to always acquire the map's mutex and periodically
// makes a full copy of the map despite changing only one entry.
func BenchmarkAdversarialDelete(b *testing.B) {
const mapSize = 1 << 10
benchMap(b, bench{
setup: func(_ *testing.B, m mapInterface) {
for i := 0; i < mapSize; i++ {
m.Store(i, i)
}
},
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
for ; pb.Next(); i++ {
m.Load(i)
if i%mapSize == 0 {
m.Range(func(k, _ interface{}) bool {
m.Delete(k)
return false
})
m.Store(i, i)
}
}
},
})
}

151
vendor/golang.org/x/sync/syncmap/map_reference_test.go generated vendored Normal file
View File

@ -0,0 +1,151 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package syncmap_test
import (
"sync"
"sync/atomic"
)
// This file contains reference map implementations for unit-tests.
// mapInterface is the interface Map implements.
type mapInterface interface {
Load(interface{}) (interface{}, bool)
Store(key, value interface{})
LoadOrStore(key, value interface{}) (actual interface{}, loaded bool)
Delete(interface{})
Range(func(key, value interface{}) (shouldContinue bool))
}
// RWMutexMap is an implementation of mapInterface using a sync.RWMutex.
type RWMutexMap struct {
mu sync.RWMutex
dirty map[interface{}]interface{}
}
func (m *RWMutexMap) Load(key interface{}) (value interface{}, ok bool) {
m.mu.RLock()
value, ok = m.dirty[key]
m.mu.RUnlock()
return
}
func (m *RWMutexMap) Store(key, value interface{}) {
m.mu.Lock()
if m.dirty == nil {
m.dirty = make(map[interface{}]interface{})
}
m.dirty[key] = value
m.mu.Unlock()
}
func (m *RWMutexMap) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) {
m.mu.Lock()
actual, loaded = m.dirty[key]
if !loaded {
actual = value
if m.dirty == nil {
m.dirty = make(map[interface{}]interface{})
}
m.dirty[key] = value
}
m.mu.Unlock()
return actual, loaded
}
func (m *RWMutexMap) Delete(key interface{}) {
m.mu.Lock()
delete(m.dirty, key)
m.mu.Unlock()
}
func (m *RWMutexMap) Range(f func(key, value interface{}) (shouldContinue bool)) {
m.mu.RLock()
keys := make([]interface{}, 0, len(m.dirty))
for k := range m.dirty {
keys = append(keys, k)
}
m.mu.RUnlock()
for _, k := range keys {
v, ok := m.Load(k)
if !ok {
continue
}
if !f(k, v) {
break
}
}
}
// DeepCopyMap is an implementation of mapInterface using a Mutex and
// atomic.Value. It makes deep copies of the map on every write to avoid
// acquiring the Mutex in Load.
type DeepCopyMap struct {
mu sync.Mutex
clean atomic.Value
}
func (m *DeepCopyMap) Load(key interface{}) (value interface{}, ok bool) {
clean, _ := m.clean.Load().(map[interface{}]interface{})
value, ok = clean[key]
return value, ok
}
func (m *DeepCopyMap) Store(key, value interface{}) {
m.mu.Lock()
dirty := m.dirty()
dirty[key] = value
m.clean.Store(dirty)
m.mu.Unlock()
}
func (m *DeepCopyMap) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) {
clean, _ := m.clean.Load().(map[interface{}]interface{})
actual, loaded = clean[key]
if loaded {
return actual, loaded
}
m.mu.Lock()
// Reload clean in case it changed while we were waiting on m.mu.
clean, _ = m.clean.Load().(map[interface{}]interface{})
actual, loaded = clean[key]
if !loaded {
dirty := m.dirty()
dirty[key] = value
actual = value
m.clean.Store(dirty)
}
m.mu.Unlock()
return actual, loaded
}
func (m *DeepCopyMap) Delete(key interface{}) {
m.mu.Lock()
dirty := m.dirty()
delete(dirty, key)
m.clean.Store(dirty)
m.mu.Unlock()
}
func (m *DeepCopyMap) Range(f func(key, value interface{}) (shouldContinue bool)) {
clean, _ := m.clean.Load().(map[interface{}]interface{})
for k, v := range clean {
if !f(k, v) {
break
}
}
}
func (m *DeepCopyMap) dirty() map[interface{}]interface{} {
clean, _ := m.clean.Load().(map[interface{}]interface{})
dirty := make(map[interface{}]interface{}, len(clean)+1)
for k, v := range clean {
dirty[k] = v
}
return dirty
}

172
vendor/golang.org/x/sync/syncmap/map_test.go generated vendored Normal file
View File

@ -0,0 +1,172 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package syncmap_test
import (
"math/rand"
"reflect"
"runtime"
"sync"
"testing"
"testing/quick"
"golang.org/x/sync/syncmap"
)
type mapOp string
const (
opLoad = mapOp("Load")
opStore = mapOp("Store")
opLoadOrStore = mapOp("LoadOrStore")
opDelete = mapOp("Delete")
)
var mapOps = [...]mapOp{opLoad, opStore, opLoadOrStore, opDelete}
// mapCall is a quick.Generator for calls on mapInterface.
type mapCall struct {
op mapOp
k, v interface{}
}
func (c mapCall) apply(m mapInterface) (interface{}, bool) {
switch c.op {
case opLoad:
return m.Load(c.k)
case opStore:
m.Store(c.k, c.v)
return nil, false
case opLoadOrStore:
return m.LoadOrStore(c.k, c.v)
case opDelete:
m.Delete(c.k)
return nil, false
default:
panic("invalid mapOp")
}
}
type mapResult struct {
value interface{}
ok bool
}
func randValue(r *rand.Rand) interface{} {
b := make([]byte, r.Intn(4))
for i := range b {
b[i] = 'a' + byte(rand.Intn(26))
}
return string(b)
}
func (mapCall) Generate(r *rand.Rand, size int) reflect.Value {
c := mapCall{op: mapOps[rand.Intn(len(mapOps))], k: randValue(r)}
switch c.op {
case opStore, opLoadOrStore:
c.v = randValue(r)
}
return reflect.ValueOf(c)
}
func applyCalls(m mapInterface, calls []mapCall) (results []mapResult, final map[interface{}]interface{}) {
for _, c := range calls {
v, ok := c.apply(m)
results = append(results, mapResult{v, ok})
}
final = make(map[interface{}]interface{})
m.Range(func(k, v interface{}) bool {
final[k] = v
return true
})
return results, final
}
func applyMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) {
return applyCalls(new(syncmap.Map), calls)
}
func applyRWMutexMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) {
return applyCalls(new(RWMutexMap), calls)
}
func applyDeepCopyMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) {
return applyCalls(new(DeepCopyMap), calls)
}
func TestMapMatchesRWMutex(t *testing.T) {
if err := quick.CheckEqual(applyMap, applyRWMutexMap, nil); err != nil {
t.Error(err)
}
}
func TestMapMatchesDeepCopy(t *testing.T) {
if err := quick.CheckEqual(applyMap, applyDeepCopyMap, nil); err != nil {
t.Error(err)
}
}
func TestConcurrentRange(t *testing.T) {
const mapSize = 1 << 10
m := new(syncmap.Map)
for n := int64(1); n <= mapSize; n++ {
m.Store(n, int64(n))
}
done := make(chan struct{})
var wg sync.WaitGroup
defer func() {
close(done)
wg.Wait()
}()
for g := int64(runtime.GOMAXPROCS(0)); g > 0; g-- {
r := rand.New(rand.NewSource(g))
wg.Add(1)
go func(g int64) {
defer wg.Done()
for i := int64(0); ; i++ {
select {
case <-done:
return
default:
}
for n := int64(1); n < mapSize; n++ {
if r.Int63n(mapSize) == 0 {
m.Store(n, n*i*g)
} else {
m.Load(n)
}
}
}
}(g)
}
iters := 1 << 10
if testing.Short() {
iters = 16
}
for n := iters; n > 0; n-- {
seen := make(map[int64]bool, mapSize)
m.Range(func(ki, vi interface{}) bool {
k, v := ki.(int64), vi.(int64)
if v%k != 0 {
t.Fatalf("while Storing multiples of %v, Range saw value %v", k, v)
}
if seen[k] {
t.Fatalf("Range visited key %v twice", k)
}
seen[k] = true
return true
})
if len(seen) != mapSize {
t.Fatalf("Range visited %v elements of %v-element Map", len(seen), mapSize)
}
}
}

View File

@ -90,7 +90,7 @@ func (s *static) Watch() {
// do nothing
}
// NewStaticInteracter creates a new static interacter.
// NewDynamicInteracter creates a new static interacter.
func NewDynamicInteracter(sid string, updateCh *channels.RingChannel, stopCh chan struct{}) Interacter {
cfg, err := db.GetManager().ThirdPartySvcDiscoveryCfgDao().GetByServiceID(sid)
if err != nil {

View File

@ -114,9 +114,9 @@ func (t *thirdparty) Start() {
go t.runUpdate(devent)
case <-t.stopCh:
for _, stopCh := range t.svcStopCh {
close(stopCh) // TODO: close of closed channel
close(stopCh)
}
break
return
}
}
}()

View File

@ -18,6 +18,7 @@ package controller
import (
"fmt"
"github.com/goodrain/rainbond/db/dao"
"io/ioutil"
"os"
"strings"
@ -965,6 +966,10 @@ func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVol
volume, err = ctrl.provisioners[provisioner].Provision(options)
if err != nil {
if err == dao.VolumeNotFound {
logrus.Warningf("PVC: %s; volume not found.", claim.Name)
return nil
}
if ierr, ok := err.(*IgnoredError); ok {
// Provision ignored, do nothing and hope another provisioner will provision it.
logrus.Info(logOperation(operation, "volume provision ignored: %v", ierr))

View File

@ -22,6 +22,7 @@ import (
"bytes"
"encoding/json"
"fmt"
"github.com/goodrain/rainbond/db/dao"
"net/http"
"time"
@ -35,7 +36,7 @@ import (
"github.com/goodrain/rainbond/node/nodem/client"
httputil "github.com/goodrain/rainbond/util/http"
"github.com/goodrain/rainbond/worker/master/volumes/provider/lib/controller"
v1 "k8s.io/api/core/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@ -66,11 +67,13 @@ func (p *rainbondsslcProvisioner) selectNode(nodeOS string) (*v1.Node, error) {
var maxavailable int64
var selectnode *v1.Node
for _, node := range allnode.Items {
nodeReady := false
if node.Labels[client.LabelOS] != nodeOS {
continue
}
for _, condition := range node.Status.Conditions {
if condition.Type == v1.NodeReady {
nodeReady = true
if condition.Status == v1.ConditionTrue {
ip := ""
for _, address := range node.Status.Addresses {
@ -80,11 +83,14 @@ func (p *rainbondsslcProvisioner) selectNode(nodeOS string) (*v1.Node, error) {
}
}
if ip == "" {
logrus.Warningf("Node: %s; node internal address not found", node.Name)
break
}
//only contains rainbond pod
//pods, err := p.store.GetPodLister().Pods(v1.NamespaceAll).List(labels.NewSelector())
pods, err := p.kubecli.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{})
pods, err := p.kubecli.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{
FieldSelector: "spec.nodeName=" + node.Name,
})
if err != nil {
logrus.Errorf("list pods list from node ip error %s", err.Error())
break
@ -98,12 +104,19 @@ func (p *rainbondsslcProvisioner) selectNode(nodeOS string) (*v1.Node, error) {
}
available := node.Status.Allocatable.Memory().Value() - nodeUsedMemory
if available >= maxavailable {
logrus.Infof("select node: %s", node.Name)
maxavailable = available
selectnode = &node
} else {
logrus.Infof("Node: %s; node available memory(%d) is less than max available "+
"memory(%d)", node.Name, available, maxavailable)
}
}
}
}
if !nodeReady {
logrus.Warningf("Node: %s; not ready", node.Name)
}
}
return selectnode, nil
}
@ -114,7 +127,7 @@ func (p *rainbondsslcProvisioner) createPath(options controller.VolumeOptions) (
if volumeID != 0 {
volume, err := db.GetManager().TenantServiceVolumeDao().GetVolumeByID(volumeID)
if err != nil {
logrus.Errorf("get volume by id %d failre %s", volumeID, err.Error())
logrus.Warningf("get volume by id %d failure %s", volumeID, err.Error())
return "", err
}
reqoptions := map[string]string{
@ -172,7 +185,11 @@ func (p *rainbondsslcProvisioner) Provision(options controller.VolumeOptions) (*
if options.SelectedNode == nil {
var err error
options.SelectedNode, err = p.selectNode(options.PVC.Annotations[client.LabelOS])
if err != nil || options.SelectedNode == nil {
if err != nil {
return nil, fmt.Errorf("Node OS: %s; error selecting node: %v",
options.PVC.Annotations[client.LabelOS], err)
}
if options.SelectedNode == nil {
return nil, fmt.Errorf("do not select an appropriate node for local volume")
}
if _, ok := options.SelectedNode.Labels["rainbond_node_ip"]; !ok {
@ -181,6 +198,9 @@ func (p *rainbondsslcProvisioner) Provision(options controller.VolumeOptions) (*
}
path, err := p.createPath(options)
if err != nil {
if err == dao.VolumeNotFound {
return nil, err
}
return nil, fmt.Errorf("create local volume from node %s failure %s", options.SelectedNode.Name, err.Error())
}
if path == "" {

View File

@ -70,7 +70,7 @@ func (p *rainbondssscProvisioner) Provision(options controller.VolumeOptions) (*
if volumeID != 0 {
volume, err := db.GetManager().TenantServiceVolumeDao().GetVolumeByID(volumeID)
if err != nil {
logrus.Errorf("get volume by id %d failre %s", volumeID, err.Error())
logrus.Errorf("get volume by id %d failure %s", volumeID, err.Error())
return nil, err
}
hostpath = path.Join(volume.HostPath, podName)