Rainbond/worker/monitor/collector/collector.go

169 lines
5.3 KiB
Go
Raw Normal View History

2018-03-14 14:12:26 +08:00
// Copyright (C) 2014-2018 Goodrain Co., Ltd.
// RAINBOND, Application Management Platform
2018-03-14 14:33:31 +08:00
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version. For any non-GPL usage of Rainbond,
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
// must be obtained first.
2018-03-14 14:33:31 +08:00
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
2018-03-14 14:33:31 +08:00
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package collector
import (
2018-01-23 18:32:52 +08:00
"os"
2018-01-31 17:04:05 +08:00
"strings"
"time"
"github.com/goodrain/rainbond/db/model"
"github.com/goodrain/rainbond/worker/monitor/cache"
2018-01-31 17:04:05 +08:00
"github.com/Sirupsen/logrus"
status "github.com/goodrain/rainbond/appruntimesync/client"
"github.com/goodrain/rainbond/db"
"github.com/prometheus/client_golang/prometheus"
)
//Exporter 收集器
type Exporter struct {
dsn string
error prometheus.Gauge
totalScrapes prometheus.Counter
scrapeErrors *prometheus.CounterVec
memoryUse *prometheus.GaugeVec
fsUse *prometheus.GaugeVec
workerUp prometheus.Gauge
dbmanager db.Manager
statusManager *status.AppRuntimeSyncClient
2018-01-31 17:04:05 +08:00
cache *cache.DiskCache
}
var scrapeDurationDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "exporter", "collector_duration_seconds"),
"Collector time duration.",
[]string{"collector"}, nil,
)
//Describe Describe
func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
metricCh := make(chan prometheus.Metric)
doneCh := make(chan struct{})
go func() {
for m := range metricCh {
ch <- m.Desc()
}
close(doneCh)
}()
e.Collect(metricCh)
close(metricCh)
<-doneCh
}
// Collect implements prometheus.Collector.
func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
e.scrape(ch)
ch <- e.totalScrapes
ch <- e.error
2018-01-23 18:16:19 +08:00
e.fsUse.Collect(ch)
e.memoryUse.Collect(ch)
e.scrapeErrors.Collect(ch)
ch <- e.workerUp
}
func (e *Exporter) scrape(ch chan<- prometheus.Metric) {
e.totalScrapes.Inc()
var err error
scrapeTime := time.Now()
services, err := e.dbmanager.TenantServiceDao().GetAllServices()
if err != nil {
logrus.Errorln("Error scraping for tenant service when select db :", err)
e.scrapeErrors.WithLabelValues("db.getservices").Inc()
e.error.Set(1)
}
status, err := e.statusManager.GetNeedBillingStatus()
if err != nil {
logrus.Errorln("Error scraping for tenant service when select db :", err)
e.scrapeErrors.WithLabelValues("db.getservices").Inc()
e.error.Set(1)
}
2018-01-23 18:32:52 +08:00
localPath := os.Getenv("LOCAL_DATA_PATH")
sharePath := os.Getenv("SHARE_DATA_PATH")
if localPath == "" {
localPath = "/grlocaldata"
}
if sharePath == "" {
sharePath = "/grdata"
}
//获取内存使用情况
for _, service := range services {
2018-03-16 18:11:39 +08:00
if _, ok := status[service.ServiceID]; ok {
2018-03-16 18:20:40 +08:00
e.memoryUse.WithLabelValues(service.TenantID, service.ServiceID, "running").Set(float64(service.ContainerMemory * service.Replicas))
}
}
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.memory")
scrapeTime = time.Now()
2018-01-31 17:04:05 +08:00
diskcache := e.cache.Get()
for k, v := range diskcache {
key := strings.Split(k, "_")
if len(key) == 2 {
e.fsUse.WithLabelValues(key[1], key[0], string(model.ShareFileVolumeType)).Set(v)
}
}
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.fs")
}
var namespace = "app_resource"
//New 创建一个收集器
func New(statusManager *status.AppRuntimeSyncClient, cache *cache.DiskCache) *Exporter {
return &Exporter{
totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: "exporter",
Name: "scrapes_total",
Help: "Total number of times Worker was scraped for metrics.",
}),
scrapeErrors: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: "exporter",
Name: "scrape_errors_total",
Help: "Total number of times an error occurred scraping a Worker.",
}, []string{"collector"}),
error: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "exporter",
Name: "last_scrape_error",
Help: "Whether the last scrape of metrics from Worker resulted in an error (1 for error, 0 for success).",
}),
workerUp: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "up",
Help: "Whether the Worker server is up.",
}),
memoryUse: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "appmemory",
Help: "tenant service memory used.",
2018-03-16 18:20:40 +08:00
}, []string{"tenant_id", "service_id", "service_status"}),
fsUse: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "appfs",
Help: "tenant service fs used.",
}, []string{"tenant_id", "service_id", "volume_type"}),
dbmanager: db.GetManager(),
statusManager: statusManager,
2018-01-31 17:04:05 +08:00
cache: cache,
}
}