2018-12-05 22:06:21 +08:00
|
|
|
// RAINBOND, Application Management Platform
|
|
|
|
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
|
|
|
|
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version. For any non-GPL usage of Rainbond,
|
|
|
|
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
|
|
|
|
// must be obtained first.
|
|
|
|
|
|
|
|
// This program is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
2018-11-08 17:22:26 +08:00
|
|
|
package controller
|
|
|
|
|
|
|
|
import (
|
2018-12-01 10:26:35 +08:00
|
|
|
"context"
|
2018-11-14 12:54:49 +08:00
|
|
|
"fmt"
|
2018-11-21 17:35:07 +08:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2019-09-18 17:56:52 +08:00
|
|
|
"github.com/goodrain/rainbond/gateway/cluster"
|
|
|
|
|
2018-12-01 10:26:35 +08:00
|
|
|
client "github.com/coreos/etcd/clientv3"
|
2018-11-08 17:22:26 +08:00
|
|
|
"github.com/eapache/channels"
|
|
|
|
"github.com/goodrain/rainbond/cmd/gateway/option"
|
|
|
|
"github.com/goodrain/rainbond/gateway/controller/openresty"
|
2019-03-07 21:38:47 +08:00
|
|
|
"github.com/goodrain/rainbond/gateway/metric"
|
2018-11-08 17:22:26 +08:00
|
|
|
"github.com/goodrain/rainbond/gateway/store"
|
2019-08-28 21:41:44 +08:00
|
|
|
v1 "github.com/goodrain/rainbond/gateway/v1"
|
2020-09-06 11:09:48 +08:00
|
|
|
"github.com/goodrain/rainbond/util/ingress-nginx/task"
|
|
|
|
"github.com/sirupsen/logrus"
|
2019-03-07 21:38:47 +08:00
|
|
|
"k8s.io/apimachinery/pkg/util/sets"
|
2020-01-18 21:26:46 +08:00
|
|
|
"k8s.io/client-go/kubernetes"
|
2018-11-13 20:26:41 +08:00
|
|
|
"k8s.io/client-go/util/flowcontrol"
|
2018-11-08 17:22:26 +08:00
|
|
|
)
|
|
|
|
|
2018-12-01 10:26:35 +08:00
|
|
|
// rainbond endpoints map
|
|
|
|
var rbdemap = make(map[string]struct{})
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
rbdemap["APISERVER_ENDPOINTS"] = struct{}{}
|
|
|
|
rbdemap["HUB_ENDPOINTS"] = struct{}{}
|
|
|
|
rbdemap["REPO_ENDPOINTS"] = struct{}{}
|
|
|
|
}
|
2018-11-13 17:22:22 +08:00
|
|
|
|
2018-11-22 21:16:00 +08:00
|
|
|
// GWController -
|
2018-11-08 17:22:26 +08:00
|
|
|
type GWController struct {
|
2018-11-13 20:26:41 +08:00
|
|
|
GWS GWServicer
|
|
|
|
store store.Storer
|
|
|
|
|
|
|
|
syncQueue *task.Queue
|
2020-01-14 18:55:58 +08:00
|
|
|
syncRateLimiter flowcontrol.RateLimiter
|
2018-11-13 20:26:41 +08:00
|
|
|
isShuttingDown bool
|
2018-11-08 17:22:26 +08:00
|
|
|
|
2018-11-14 12:54:49 +08:00
|
|
|
// stopLock is used to enforce that only a single call to Stop send at
|
|
|
|
// a given time. We allow stopping through an HTTP endpoint and
|
|
|
|
// allowing concurrent stoppers leads to stack traces.
|
|
|
|
stopLock *sync.Mutex
|
|
|
|
|
2020-02-03 23:18:37 +08:00
|
|
|
ocfg *option.Config
|
|
|
|
rcfg *v1.Config // running configuration
|
|
|
|
rrhp []*v1.Pool // running rainbond http pools
|
|
|
|
rrtp []*v1.Pool // running rainbond tcp or udp pools
|
2018-11-08 17:22:26 +08:00
|
|
|
|
|
|
|
stopCh chan struct{}
|
|
|
|
updateCh *channels.RingChannel
|
|
|
|
|
2018-12-01 10:26:35 +08:00
|
|
|
EtcdCli *client.Client
|
|
|
|
ctx context.Context
|
2018-12-21 17:21:52 +08:00
|
|
|
|
|
|
|
metricCollector metric.Collector
|
2018-11-08 17:22:26 +08:00
|
|
|
}
|
|
|
|
|
2018-11-22 21:16:00 +08:00
|
|
|
// Start starts Gateway
|
2018-12-02 21:09:32 +08:00
|
|
|
func (gwc *GWController) Start(errCh chan error) error {
|
|
|
|
// start plugin(eg: nginx, zeus and etc)
|
2019-08-28 21:41:44 +08:00
|
|
|
if err := gwc.GWS.Start(errCh); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-12-01 10:26:35 +08:00
|
|
|
// start informer
|
2018-11-08 17:22:26 +08:00
|
|
|
gwc.store.Run(gwc.stopCh)
|
|
|
|
|
2018-12-01 10:26:35 +08:00
|
|
|
// start task queue
|
2018-11-08 17:22:26 +08:00
|
|
|
go gwc.syncQueue.Run(1*time.Second, gwc.stopCh)
|
2018-12-01 10:26:35 +08:00
|
|
|
|
2018-11-08 17:22:26 +08:00
|
|
|
// force initial sync
|
|
|
|
gwc.syncQueue.EnqueueTask(task.GetDummyObject("initial-sync"))
|
|
|
|
|
2018-12-02 21:09:32 +08:00
|
|
|
go gwc.handleEvent()
|
2018-11-14 12:54:49 +08:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-12-02 21:09:32 +08:00
|
|
|
// Close stops Gateway
|
|
|
|
func (gwc *GWController) Close() error {
|
2018-12-01 10:26:35 +08:00
|
|
|
gwc.isShuttingDown = true
|
2019-03-07 21:38:47 +08:00
|
|
|
if gwc.EtcdCli != nil {
|
|
|
|
gwc.EtcdCli.Close()
|
|
|
|
}
|
2018-12-01 10:26:35 +08:00
|
|
|
gwc.stopLock.Lock()
|
|
|
|
defer gwc.stopLock.Unlock()
|
|
|
|
|
|
|
|
if gwc.syncQueue.IsShuttingDown() {
|
|
|
|
return fmt.Errorf("shutdown already in progress")
|
|
|
|
}
|
|
|
|
|
|
|
|
logrus.Infof("Shutting down controller queues")
|
|
|
|
close(gwc.stopCh) // stop the loop in *GWController#Start()
|
|
|
|
go gwc.syncQueue.Shutdown()
|
|
|
|
|
|
|
|
return gwc.GWS.Stop()
|
|
|
|
}
|
|
|
|
|
2018-12-02 21:09:32 +08:00
|
|
|
func (gwc *GWController) handleEvent() {
|
2018-11-08 17:22:26 +08:00
|
|
|
for {
|
|
|
|
select {
|
2018-12-01 10:26:35 +08:00
|
|
|
case event := <-gwc.updateCh.Out(): // received k8s events
|
2018-11-08 17:22:26 +08:00
|
|
|
if gwc.isShuttingDown {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if evt, ok := event.(store.Event); ok {
|
|
|
|
gwc.syncQueue.EnqueueSkippableTask(evt.Obj)
|
|
|
|
} else {
|
2018-12-01 10:26:35 +08:00
|
|
|
logrus.Warningf("Unexpected event type received %T", event)
|
2018-11-08 17:22:26 +08:00
|
|
|
}
|
|
|
|
case <-gwc.stopCh:
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-01 10:26:35 +08:00
|
|
|
func (gwc *GWController) syncGateway(key interface{}) error {
|
2020-01-14 18:55:58 +08:00
|
|
|
gwc.syncRateLimiter.Accept()
|
|
|
|
|
2018-12-01 10:26:35 +08:00
|
|
|
if gwc.syncQueue.IsShuttingDown() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
l7sv, l4sv := gwc.store.ListVirtualService()
|
|
|
|
httpPools, tcpPools := gwc.store.ListPool()
|
|
|
|
currentConfig := &v1.Config{
|
|
|
|
HTTPPools: httpPools,
|
|
|
|
TCPPools: tcpPools,
|
|
|
|
L7VS: l7sv,
|
|
|
|
L4VS: l4sv,
|
|
|
|
}
|
2020-02-03 23:18:37 +08:00
|
|
|
// refresh http tcp and udp pools dynamically
|
|
|
|
httpPools = append(httpPools, gwc.rrhp...)
|
|
|
|
tcpPools = append(tcpPools, gwc.rrtp...)
|
|
|
|
if err := gwc.GWS.UpdatePools(httpPools, tcpPools); err != nil {
|
|
|
|
logrus.Warningf("error updating pools: %v", err)
|
|
|
|
}
|
2018-12-01 10:26:35 +08:00
|
|
|
if gwc.rcfg.Equals(currentConfig) {
|
2020-01-14 18:55:58 +08:00
|
|
|
logrus.Debug("No need to update running configuration.")
|
2018-12-01 10:26:35 +08:00
|
|
|
return nil
|
2018-11-14 12:54:49 +08:00
|
|
|
}
|
2020-01-14 18:55:58 +08:00
|
|
|
logrus.Infof("update nginx server config file.")
|
2019-03-08 15:09:09 +08:00
|
|
|
err := gwc.GWS.PersistConfig(currentConfig)
|
2018-12-01 10:26:35 +08:00
|
|
|
if err != nil {
|
|
|
|
// TODO: if nginx is not ready, then stop gateway
|
|
|
|
logrus.Errorf("Fail to persist Nginx config: %v\n", err)
|
2018-12-21 17:21:52 +08:00
|
|
|
return nil
|
2018-12-01 10:26:35 +08:00
|
|
|
}
|
2019-03-08 15:09:09 +08:00
|
|
|
|
|
|
|
//set metric
|
|
|
|
remove, hosts := getHosts(gwc.rcfg, currentConfig)
|
2019-03-07 21:38:47 +08:00
|
|
|
gwc.metricCollector.SetHosts(hosts)
|
2019-03-08 15:09:09 +08:00
|
|
|
gwc.metricCollector.RemoveHostMetric(remove)
|
2018-12-21 17:21:52 +08:00
|
|
|
gwc.metricCollector.SetServerNum(len(httpPools), len(tcpPools))
|
|
|
|
|
2019-03-08 15:09:09 +08:00
|
|
|
gwc.rcfg = currentConfig
|
2018-12-01 10:26:35 +08:00
|
|
|
return nil
|
2018-11-14 12:54:49 +08:00
|
|
|
}
|
|
|
|
|
2018-11-21 17:35:07 +08:00
|
|
|
//NewGWController new Gateway controller
|
2020-01-03 16:12:56 +08:00
|
|
|
func NewGWController(ctx context.Context, clientset kubernetes.Interface, cfg *option.Config, mc metric.Collector, node *cluster.NodeManager) (*GWController, error) {
|
2018-11-08 17:22:26 +08:00
|
|
|
gwc := &GWController{
|
2018-12-25 11:26:22 +08:00
|
|
|
updateCh: channels.NewRingChannel(1024),
|
2020-01-14 18:55:58 +08:00
|
|
|
syncRateLimiter: flowcontrol.NewTokenBucketRateLimiter(cfg.SyncRateLimit, 1),
|
2018-12-25 11:26:22 +08:00
|
|
|
stopLock: &sync.Mutex{},
|
|
|
|
stopCh: make(chan struct{}),
|
|
|
|
ocfg: cfg,
|
|
|
|
ctx: ctx,
|
2018-12-21 17:21:52 +08:00
|
|
|
metricCollector: mc,
|
2018-11-08 17:22:26 +08:00
|
|
|
}
|
|
|
|
|
2018-12-01 10:26:35 +08:00
|
|
|
gwc.GWS = openresty.CreateOpenrestyService(cfg, &gwc.isShuttingDown)
|
2020-01-03 16:12:56 +08:00
|
|
|
|
2018-11-14 12:54:49 +08:00
|
|
|
gwc.store = store.New(
|
2020-01-03 16:12:56 +08:00
|
|
|
clientset,
|
2018-11-27 12:42:21 +08:00
|
|
|
gwc.updateCh,
|
2019-09-18 17:56:52 +08:00
|
|
|
cfg, node)
|
2018-11-08 17:22:26 +08:00
|
|
|
gwc.syncQueue = task.NewTaskQueue(gwc.syncGateway)
|
2018-12-21 17:21:52 +08:00
|
|
|
|
2018-12-02 21:09:32 +08:00
|
|
|
return gwc, nil
|
2018-11-08 17:22:26 +08:00
|
|
|
}
|
2018-12-01 10:26:35 +08:00
|
|
|
|
2020-02-03 23:18:37 +08:00
|
|
|
func poolsEqual(a []*v1.Pool, b []*v1.Pool) bool {
|
2019-04-15 18:35:29 +08:00
|
|
|
if len(a) != len(b) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for _, ap := range a {
|
|
|
|
flag := false
|
|
|
|
for _, bp := range b {
|
|
|
|
if ap.Equals(bp) {
|
|
|
|
flag = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !flag {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
2018-12-02 21:09:32 +08:00
|
|
|
}
|
2018-12-25 11:26:22 +08:00
|
|
|
|
2019-03-08 15:09:09 +08:00
|
|
|
// getHosts returns a list of the hostsnames and tobe remove hostname
|
|
|
|
// that are not associated anymore to the NGINX configuration.
|
|
|
|
func getHosts(rucfg, newcfg *v1.Config) (remove []string, current sets.String) {
|
|
|
|
old := sets.NewString()
|
|
|
|
new := sets.NewString()
|
|
|
|
if rucfg != nil {
|
|
|
|
for _, s := range rucfg.L7VS {
|
|
|
|
if !old.Has(s.ServerName) {
|
|
|
|
old.Insert(s.ServerName)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if newcfg != nil {
|
|
|
|
for _, s := range newcfg.L7VS {
|
|
|
|
if !new.Has(s.ServerName) {
|
|
|
|
new.Insert(s.ServerName)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return old.Difference(new).List(), new
|
|
|
|
}
|