Merge branch 'V5.2' into grdata

This commit is contained in:
黄润豪 2020-04-13 18:14:38 +08:00 committed by GitHub
commit 69bf95ca56
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
71 changed files with 7238 additions and 2531 deletions

View File

@ -53,6 +53,7 @@ Rainbond完成与龙芯、飞腾、麒麟操作系统等为代表的国产化计
| 应用描述模型 | 以应用为中心描述应用包含的组件特性,应用特性,部署运维特性,实现复杂应用的标准化交付。 |
更多功能特性详见:
[Rainbond功能特性说明](https://www.rainbond.com/docs/quick-start/edition/)
[Rainbond开发计划](https://www.rainbond.com/docs/quick-start/roadmap/)
@ -73,11 +74,12 @@ Rainbond完成与龙芯、飞腾、麒麟操作系统等为代表的国产化计
<center><img width="200px" src="https://grstatic.oss-cn-shanghai.aliyuncs.com/images/12141565594759_.pic_hd.jpg"/></center>
<center> 添加微信申请加入微信群了解Rainbond更多资讯 </center>
## 参与贡献
我们非常欢迎你参与Rainbond社区关于平台使用经验、标准化应用、插件分享等领域的贡献和分享。
若你是正在使用Rainbond的用户且对Rainbond有深入的了解和技术路线的认同在你的企业内部有较大的需求我们非欢迎你 [参与Rainbond项目开发贡献](https://www.rainbond.com/docs/contribution/)
若你是正在使用Rainbond的用户且对Rainbond有深入的了解和技术路线的认同在你的企业内部有较大的需求我们非欢迎你 [参与Rainbond项目开发贡献](https://www.rainbond.com/docs/contribution/)
## 相关项目

View File

@ -1,15 +1,22 @@
package controller
import (
"github.com/go-chi/chi"
"github.com/goodrain/rainbond/api/handler"
httputil "github.com/goodrain/rainbond/util/http"
"net/http"
"github.com/go-chi/chi"
"github.com/goodrain/rainbond/api/handler"
httputil "github.com/goodrain/rainbond/util/http"
)
//GetRunningServices list all running service ids
func GetRunningServices(w http.ResponseWriter, r *http.Request) {
enterpriseID := chi.URLParam(r, "enterprise_id")
runningList := handler.GetServiceManager().GetEnterpriseRunningServices(enterpriseID)
runningList, err := handler.GetServiceManager().GetEnterpriseRunningServices(enterpriseID)
if err != nil {
err.Handle(r, w)
return
}
httputil.ReturnNoFomart(r, w, 200, map[string]interface{}{"service_ids": runningList})
}

View File

@ -33,12 +33,13 @@ import (
"github.com/goodrain/rainbond/api/util"
"github.com/goodrain/rainbond/db"
dbmodel "github.com/goodrain/rainbond/db/model"
"github.com/goodrain/rainbond/event"
"github.com/goodrain/rainbond/worker/client"
dbmodel "github.com/goodrain/rainbond/db/model"
mqclient "github.com/goodrain/rainbond/mq/client"
core_util "github.com/goodrain/rainbond/util"
v1 "github.com/goodrain/rainbond/worker/appm/types/v1"
"github.com/goodrain/rainbond/worker/client"
)
//Backup GroupBackup
@ -205,6 +206,7 @@ type RegionServiceSnapshot struct {
ServiceRelation []*dbmodel.TenantServiceRelation
ServiceStatus string
ServiceVolume []*dbmodel.TenantServiceVolume
ServiceConfigFile []*dbmodel.TenantServiceConfigFile
ServicePort []*dbmodel.TenantServicesPort
Versions []*dbmodel.VersionInfo
@ -272,6 +274,11 @@ func (h *BackupHandle) snapshot(ids []string, sourceDir string, force bool) erro
return fmt.Errorf("Get service(%s) volume error %s", id, err)
}
data.ServiceVolume = serviceVolume
serviceConfigFile, err := db.GetManager().TenantServiceConfigFileDao().GetConfigFileByServiceID(id)
if err != nil && err != gorm.ErrRecordNotFound {
return fmt.Errorf("get service(%s) config file error: %s", id, err.Error())
}
data.ServiceConfigFile = serviceConfigFile
servicePorts, err := db.GetManager().TenantServicesPortDao().GetPortsByServiceID(id)
if err != nil && err.Error() != gorm.ErrRecordNotFound.Error() {
return fmt.Errorf("Get service(%s) ports error %s", id, err)

View File

@ -1683,23 +1683,23 @@ func (s *ServiceAction) GetServicesStatus(tenantID string, serviceIDs []string)
}
// GetEnterpriseRunningServices get running services
func (s *ServiceAction) GetEnterpriseRunningServices(enterpriseID string) []string {
func (s *ServiceAction) GetEnterpriseRunningServices(enterpriseID string) ([]string, *util.APIHandleError) {
var tenantIDs []string
tenants, err := db.GetManager().EnterpriseDao().GetEnterpriseTenants(enterpriseID)
if err != nil {
logrus.Errorf("list tenant failed: %s", err.Error())
return []string{}
return nil, util.CreateAPIHandleErrorFromDBError(fmt.Sprintf("enterprise[%s] get tenant failed", enterpriseID), err)
}
if len(tenants) == 0 {
return nil, util.CreateAPIHandleErrorf(400, "enterprise[%s] has not tenants", enterpriseID)
}
for _, tenant := range tenants {
tenantIDs = append(tenantIDs, tenant.UUID)
}
if len(tenantIDs) == 0 {
return []string{}
}
services, err := db.GetManager().TenantServiceDao().GetServicesByTenantIDs(tenantIDs)
if err != nil {
logrus.Errorf("list tenants servicee failed: %s", err.Error())
return []string{}
logrus.Errorf("list tenants service failed: %s", err.Error())
return nil, util.CreateAPIHandleErrorf(500, "get enterprise[%s] service failed: %s", enterpriseID, err.Error())
}
var serviceIDs []string
for _, svc := range services {
@ -1712,7 +1712,7 @@ func (s *ServiceAction) GetEnterpriseRunningServices(enterpriseID string) []stri
retServices = append(retServices, service)
}
}
return retServices
return retServices, nil
}
//CreateTenant create tenant

View File

@ -58,7 +58,7 @@ type ServiceHandler interface {
RollBack(rs *api_model.RollbackStruct) error
GetStatus(serviceID string) (*api_model.StatusList, error)
GetServicesStatus(tenantID string, services []string) []map[string]interface{}
GetEnterpriseRunningServices(enterpriseID string) []string
GetEnterpriseRunningServices(enterpriseID string) ([]string, *util.APIHandleError)
CreateTenant(*dbmodel.Tenants) error
CreateTenandIDAndName(eid string) (string, string, error)
GetPods(serviceID string) (*K8sPodInfos, error)

View File

@ -114,6 +114,7 @@ type RegionServiceSnapshot struct {
ServiceRelation []*dbmodel.TenantServiceRelation
ServiceStatus string
ServiceVolume []*dbmodel.TenantServiceVolume
ServiceConfigFile []*dbmodel.TenantServiceConfigFile
ServicePort []*dbmodel.TenantServicesPort
Versions []*dbmodel.VersionInfo

View File

@ -453,6 +453,9 @@ func (b *BackupAPPRestore) modify(appSnapshot *AppSnapshot) error {
for _, a := range app.ServiceVolume {
a.ServiceID = newServiceID
}
for _, a := range app.ServiceConfigFile {
a.ServiceID = newServiceID
}
for _, a := range app.ServicePort {
a.ServiceID = newServiceID
}
@ -588,6 +591,13 @@ func (b *BackupAPPRestore) restoreMetadata(appSnapshot *AppSnapshot) error {
}
b.volumeIDMap[oldVolumeID] = a.ID
}
for _, a := range app.ServiceConfigFile {
a.ID = 0
if err := db.GetManager().TenantServiceConfigFileDao().AddModel(a); err != nil {
tx.Rollback()
return fmt.Errorf("create app config file when restore backup errro. %s", err.Error())
}
}
for _, a := range app.ServicePort {
a.ID = 0
if err := db.GetManager().TenantServicesPortDaoTransactions(tx).AddModel(a); err != nil {

View File

@ -120,7 +120,7 @@ func (g *GWServer) AddFlags(fs *pflag.FlagSet) {
fs.BoolVar(&g.Debug, "debug", false, "enable pprof debug")
fs.Uint64Var(&g.ShareMemory, "max-config-share-memory", 128, "Nginx maximum Shared memory size, which should be increased for larger clusters.")
fs.Float32Var(&g.SyncRateLimit, "sync-rate-limit", 0.3, "Define the sync frequency upper limit")
fs.StringArrayVar(&g.IgnoreInterface, "ignore-interface", []string{"docker0", "tunl0"}, "The network interface name that ignore by gateway")
fs.StringArrayVar(&g.IgnoreInterface, "ignore-interface", []string{"docker0", "tunl0", "cni0", "kube-ipvs0", "flannel"}, "The network interface name that ignore by gateway")
}
// SetLog sets log

View File

@ -80,6 +80,9 @@ func Run(s *option.GWServer) error {
if err != nil {
return fmt.Errorf("create gateway node manage failure %s", err.Error())
}
if err := node.Start(); err != nil {
return fmt.Errorf("start node manager: %v", err)
}
defer node.Stop()
reg := prometheus.NewRegistry()

View File

@ -195,14 +195,18 @@ func (d *DependServiceHealthController) checkEDS() bool {
logrus.Infof("cluster name: %s; ready: %v", serviceName, ready)
readyClusters[serviceName] = ready
}
for _, ignoreCheckEndpointsClusterName := range d.ignoreCheckEndpointsClusterName {
clusterNameInfo := strings.Split(ignoreCheckEndpointsClusterName, "_")
if len(clusterNameInfo) == 4 {
readyClusters[clusterNameInfo[2]] = true
}
}
for _, cn := range d.dependServiceNames {
if ready := readyClusters[cn]; !ready {
logrus.Infof("%s not ready.", cn)
return false
}
}
logrus.Info("all dependent services have been started.")
return true

View File

@ -38,6 +38,14 @@ func main() {
if len(os.Args) > 1 && os.Args[1] == "version" {
cmd.ShowVersion("sidecar")
}
loggerFile, _ := os.Create("/var/log/sidecar.log")
if loggerFile != nil {
defer loggerFile.Close()
logrus.SetOutput(loggerFile)
}
if os.Getenv("DEBUG") == "true" {
logrus.SetLevel(logrus.DebugLevel)
}
if err := Run(); err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
@ -70,12 +78,10 @@ func run() {
configs := discoverConfig()
if configs != nil {
if hosts := getHosts(configs); hosts != nil {
if haveChange(hosts, oldHosts) {
if err := writeHosts(hosts); err != nil {
logrus.Errorf("write hosts failure %s", err.Error())
} else {
logrus.Infof("rewrite hosts file success")
}
logrus.Debugf("rewrite hosts file success, %+v", hosts)
oldHosts = hosts
}
}

View File

@ -271,6 +271,7 @@ type TenantServiceVolumeDao interface {
//TenantServiceConfigFileDao tenant service config file dao interface
type TenantServiceConfigFileDao interface {
Dao
GetConfigFileByServiceID(serviceID string) ([]*model.TenantServiceConfigFile, error)
GetByVolumeName(sid, volumeName string) (*model.TenantServiceConfigFile, error)
DelByVolumeID(sid string, volumeName string) error
DelByServiceID(sid string) error

View File

@ -1126,6 +1126,15 @@ func (t *TenantServiceConfigFileDaoImpl) UpdateModel(mo model.Interface) error {
Update(configFile).Error
}
// GetConfigFileByServiceID -
func (t *TenantServiceConfigFileDaoImpl) GetConfigFileByServiceID(serviceID string) ([]*model.TenantServiceConfigFile, error) {
var configFiles []*model.TenantServiceConfigFile
if err := t.DB.Where("service_id=?", serviceID).Find(&configFiles).Error; err != nil {
return nil, err
}
return configFiles, nil
}
// GetByVolumeName get config file by volume name
func (t *TenantServiceConfigFileDaoImpl) GetByVolumeName(sid string, volumeName string) (*model.TenantServiceConfigFile, error) {
var res model.TenantServiceConfigFile

View File

@ -25,7 +25,9 @@ import (
"github.com/goodrain/rainbond/db/config"
"github.com/goodrain/rainbond/db/model"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/mysql"
// import sql driver manually
_ "github.com/go-sql-driver/mysql"
_ "github.com/jinzhu/gorm/dialects/postgres"
)

View File

@ -86,12 +86,16 @@ func (i *ipManager) IPInCurrentHost(in net.IP) bool {
}
func (i *ipManager) Start() error {
logrus.Info("start ip manager.")
go i.IPPool.LoopCheckIPs()
i.IPPool.Ready()
logrus.Info("ip manager is ready.")
go i.syncIP()
return nil
}
func (i *ipManager) syncIP() {
logrus.Debugf("start syncronizing ip.")
ips := i.IPPool.GetHostIPs()
i.updateIP(ips...)
for ipevent := range i.IPPool.GetWatchIPChan() {

View File

@ -68,26 +68,10 @@ func (c *Cadvisor) Name() string {
}
func (c *Cadvisor) toScrape() *prometheus.ScrapeConfig {
ts := make([]string, 0, len(c.sortedEndpoints))
for _, end := range c.sortedEndpoints {
ts = append(ts, end)
}
return &prometheus.ScrapeConfig{
JobName: c.Name(),
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: model.Duration(10 * time.Second),
MetricsPath: "/metrics/cadvisor",
ServiceDiscoveryConfig: prometheus.ServiceDiscoveryConfig{
StaticConfigs: []*prometheus.Group{
{
Targets: ts,
Labels: map[model.LabelName]model.LabelValue{
"component": model.LabelValue(c.Name()),
},
},
},
},
Scheme: "https",
HTTPClientConfig: prometheus.HTTPClientConfig{
TLSConfig: prometheus.TLSConfig{
@ -96,6 +80,31 @@ func (c *Cadvisor) toScrape() *prometheus.ScrapeConfig {
},
BearerTokenFile: "/var/run/secrets/kubernetes.io/serviceaccount/token",
},
ServiceDiscoveryConfig: prometheus.ServiceDiscoveryConfig{
KubernetesSDConfigs: []*prometheus.SDConfig{
{
Role: "node",
},
},
},
RelabelConfigs: []*prometheus.RelabelConfig{
{
TargetLabel: "__address__",
Replacement: "kubernetes.default.svc:443",
},
{
SourceLabels: []model.LabelName{
"__meta_kubernetes_node_name",
},
Regex: prometheus.MustNewRegexp("(.+)"),
TargetLabel: "__metrics_path__",
Replacement: "/api/v1/nodes/${1}/proxy/metrics/cadvisor",
},
{
Action: prometheus.RelabelAction("labelmap"),
Regex: prometheus.MustNewRegexp("__meta_kubernetes_node_label_(.+)"),
},
},
}
}

View File

@ -0,0 +1,16 @@
package callback
import (
"gopkg.in/yaml.v2"
"testing"
)
func TestCadvisorYaml(t *testing.T) {
c := &Cadvisor{}
cfg := c.toScrape()
_, err := yaml.Marshal(cfg)
if err != nil {
t.Error(err)
t.FailNow()
}
}

View File

@ -184,7 +184,7 @@ func (d *Monitor) discoverEtcd(e *callback.Etcd, done <-chan struct{}) {
endpoints := make([]*config.Endpoint, 0, 5)
for _, member := range resp.Members {
if len(member.ClientURLs) > 1 {
if len(member.ClientURLs) >= 1 {
url := member.ClientURLs[0]
end := &config.Endpoint{
URL: url,
@ -192,7 +192,7 @@ func (d *Monitor) discoverEtcd(e *callback.Etcd, done <-chan struct{}) {
endpoints = append(endpoints, end)
}
}
logrus.Debugf("etcd endpoints: %+v", endpoints)
e.UpdateEndpoints(endpoints...)
}
}

View File

@ -181,6 +181,8 @@ type RelabelConfig struct {
type ServiceDiscoveryConfig struct {
// List of labeled target groups for this job.
StaticConfigs []*Group `yaml:"static_configs,omitempty"`
// List of Kubernetes service discovery configurations.
KubernetesSDConfigs []*SDConfig `yaml:"kubernetes_sd_configs,omitempty"`
}
type Group struct {
@ -194,12 +196,70 @@ type Group struct {
Source string `yaml:"source,omitempty"`
}
// SDConfig is the configuration for Kubernetes service discovery.
type SDConfig struct {
Role Role `yaml:"role"`
}
// Role is role of the service in Kubernetes.
type Role string
// The valid options for Role.
const (
RoleNode Role = "node"
RolePod Role = "pod"
RoleService Role = "service"
RoleEndpoint Role = "endpoints"
RoleIngress Role = "ingress"
)
// Regexp encapsulates a regexp.Regexp and makes it YAML marshallable.
type Regexp struct {
*regexp.Regexp
original string
}
// NewRegexp creates a new anchored Regexp and returns an error if the
// passed-in regular expression does not compile.
func NewRegexp(s string) (Regexp, error) {
regex, err := regexp.Compile("^(?:" + s + ")$")
return Regexp{
Regexp: regex,
original: s,
}, err
}
// MustNewRegexp works like NewRegexp, but panics if the regular expression does not compile.
func MustNewRegexp(s string) Regexp {
re, err := NewRegexp(s)
if err != nil {
panic(err)
}
return re
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (re *Regexp) UnmarshalYAML(unmarshal func(interface{}) error) error {
var s string
if err := unmarshal(&s); err != nil {
return err
}
r, err := NewRegexp(s)
if err != nil {
return err
}
*re = r
return nil
}
// MarshalYAML implements the yaml.Marshaler interface.
func (re Regexp) MarshalYAML() (interface{}, error) {
if re.original != "" {
return re.original, nil
}
return nil, nil
}
// RelabelAction is the action to be performed on relabeling.
type RelabelAction string

View File

@ -23,8 +23,11 @@ import (
"strings"
"github.com/Sirupsen/logrus"
"github.com/envoyproxy/go-control-plane/pkg/wellknown"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/duration"
apiv2 "github.com/envoyproxy/go-control-plane/envoy/api/v2"
v2 "github.com/envoyproxy/go-control-plane/envoy/api/v2"
auth "github.com/envoyproxy/go-control-plane/envoy/api/v2/auth"
cluster "github.com/envoyproxy/go-control-plane/envoy/api/v2/cluster"
core "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
@ -36,12 +39,11 @@ import (
tcp_proxy "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/tcp_proxy/v2"
envoy_config_filter_udp_udp_proxy_v2alpha "github.com/envoyproxy/go-control-plane/envoy/config/filter/udp/udp_proxy/v2alpha"
configratelimit "github.com/envoyproxy/go-control-plane/envoy/config/ratelimit/v2"
_type "github.com/envoyproxy/go-control-plane/envoy/type"
"github.com/envoyproxy/go-control-plane/pkg/wellknown"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/duration"
v1 "github.com/goodrain/rainbond/node/core/envoy/v1"
corev1 "k8s.io/api/core/v1"
_type "github.com/envoyproxy/go-control-plane/envoy/type"
v1 "github.com/goodrain/rainbond/node/core/envoy/v1"
)
//DefaultLocalhostListenerAddress -
@ -210,9 +212,7 @@ func CreateHTTPListener(name, address, statPrefix string, port uint32, rateOpt *
Filters: []*envoy_api_v2_listener.Filter{
&envoy_api_v2_listener.Filter{
Name: wellknown.HTTPConnectionManager,
ConfigType: &envoy_api_v2_listener.Filter_Config{
Config: MessageToStruct(hcm),
},
ConfigType: &envoy_api_v2_listener.Filter_TypedConfig{TypedConfig: Message2Any(hcm)},
},
},
},
@ -508,7 +508,7 @@ func getEndpointsByLables(endpoints []*corev1.Endpoints, slabels map[string]stri
}
//CreateDNSLoadAssignment create dns loadAssignment
func CreateDNSLoadAssignment(serviceAlias, namespace, domain string, service *corev1.Service) *v2.ClusterLoadAssignment {
func CreateDNSLoadAssignment(serviceAlias, namespace, domain string, service *corev1.Service) *apiv2.ClusterLoadAssignment {
destServiceAlias := GetServiceAliasByService(service)
if destServiceAlias == "" {
logrus.Errorf("service alias is empty in k8s service %s", service.Name)
@ -530,7 +530,7 @@ func CreateDNSLoadAssignment(serviceAlias, namespace, domain string, service *co
},
})
lendpoints = append(lendpoints, &endpoint.LocalityLbEndpoints{LbEndpoints: lbe})
cla := &v2.ClusterLoadAssignment{
cla := &apiv2.ClusterLoadAssignment{
ClusterName: clusterName,
Endpoints: lendpoints,
}

View File

@ -24,19 +24,21 @@ import (
"strconv"
"strings"
"github.com/Sirupsen/logrus"
"github.com/envoyproxy/go-control-plane/pkg/cache"
"github.com/envoyproxy/go-control-plane/pkg/conversion"
"github.com/gogo/protobuf/proto"
"github.com/gogo/protobuf/types"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
"github.com/golang/protobuf/ptypes/duration"
"github.com/golang/protobuf/ptypes/wrappers"
v2 "github.com/envoyproxy/go-control-plane/envoy/api/v2"
route "github.com/envoyproxy/go-control-plane/envoy/api/v2/route"
"github.com/envoyproxy/go-control-plane/pkg/cache"
"github.com/envoyproxy/go-control-plane/pkg/conversion"
"github.com/Sirupsen/logrus"
"github.com/gogo/protobuf/proto"
"github.com/gogo/protobuf/types"
any "github.com/golang/protobuf/ptypes/any"
"github.com/golang/protobuf/ptypes/duration"
_struct "github.com/golang/protobuf/ptypes/struct"
"github.com/golang/protobuf/ptypes/wrappers"
v1 "github.com/goodrain/rainbond/node/core/envoy/v1"
)
@ -50,6 +52,16 @@ func MessageToStruct(msg proto.Message) *_struct.Struct {
return s
}
// Message2Any converts from proto message to proto any
func Message2Any(msg proto.Message) *any.Any {
a, err := ptypes.MarshalAny(msg)
if err != nil {
logrus.Error(err.Error())
return &any.Any{}
}
return a
}
//ConversionUInt32 conversion uint32 to wrappers uint32
func ConversionUInt32(value uint32) *wrappers.UInt32Value {
return &wrappers.UInt32Value{

View File

@ -45,7 +45,8 @@ type IPPool struct {
EventCh chan IPEVENT
StopCh chan struct{}
ignoreInterfaceName []string
startReady bool
startReady chan struct{}
once sync.Once
}
const (
@ -64,21 +65,19 @@ func NewIPPool(ignoreInterfaceName []string) *IPPool {
ctx: ctx,
cancel: cancel,
HostIPs: map[string]net.IP{},
EventCh: make(chan IPEVENT, 20),
EventCh: make(chan IPEVENT, 1024),
StopCh: make(chan struct{}),
ignoreInterfaceName: ignoreInterfaceName,
startReady: false,
startReady: make(chan struct{}),
}
go ippool.LoopCheckIPs()
return ippool
}
//Ready ready
func (i *IPPool) Ready() bool {
for !i.startReady {
time.Sleep(time.Second * 1)
}
return i.startReady
logrus.Info("waiting ip pool start ready")
<-i.startReady
return true
}
//GetHostIPs get host ips
@ -105,6 +104,7 @@ func (i *IPPool) Close() {
//LoopCheckIPs loop check ips
func (i *IPPool) LoopCheckIPs() {
Exec(i.ctx, func() error {
logrus.Debugf("start loop watch ips from all interface")
ips, err := i.getInterfaceIPs()
if err != nil {
logrus.Errorf("get ip address from interface failure %s, will retry", err.Error())
@ -133,7 +133,9 @@ func (i *IPPool) LoopCheckIPs() {
}
logrus.Debugf("loop watch ips from all interface, find %d ips", len(newIP))
i.HostIPs = newIP
i.startReady = true
i.once.Do(func() {
close(i.startReady)
})
return nil
}, time.Second*5)
}

View File

@ -1,5 +0,0 @@
[client]
user = gotest
password = secret
host = 127.0.0.1
port = 3307

View File

@ -1,8 +0,0 @@
#!/bin/sh
while :
do
sleep 3
if mysql -e 'select version()'; then
break
fi
done

View File

@ -13,53 +13,77 @@
Aaron Hopkins <go-sql-driver at die.net>
Achille Roussel <achille.roussel at gmail.com>
Alexey Palazhchenko <alexey.palazhchenko at gmail.com>
Andrew Reid <andrew.reid at tixtrack.com>
Arne Hormann <arnehormann at gmail.com>
Asta Xie <xiemengjun at gmail.com>
Bulat Gaifullin <gaifullinbf at gmail.com>
Carlos Nieto <jose.carlos at menteslibres.net>
Chris Moos <chris at tech9computers.com>
Craig Wilson <craiggwilson at gmail.com>
Daniel Montoya <dsmontoyam at gmail.com>
Daniel Nichter <nil at codenode.com>
Daniël van Eeden <git at myname.nl>
Dave Protasowski <dprotaso at gmail.com>
DisposaBoy <disposaboy at dby.me>
Egor Smolyakov <egorsmkv at gmail.com>
Erwan Martin <hello at erwan.io>
Evan Shaw <evan at vendhq.com>
Frederick Mayle <frederickmayle at gmail.com>
Gustavo Kristic <gkristic at gmail.com>
Hajime Nakagami <nakagami at gmail.com>
Hanno Braun <mail at hannobraun.com>
Henri Yandell <flamefew at gmail.com>
Hirotaka Yamamoto <ymmt2005 at gmail.com>
Huyiguang <hyg at webterren.com>
ICHINOSE Shogo <shogo82148 at gmail.com>
Ilia Cimpoes <ichimpoesh at gmail.com>
INADA Naoki <songofacandy at gmail.com>
Jacek Szwec <szwec.jacek at gmail.com>
James Harr <james.harr at gmail.com>
Jeff Hodges <jeff at somethingsimilar.com>
Jeffrey Charles <jeffreycharles at gmail.com>
Jerome Meyer <jxmeyer at gmail.com>
Jiajia Zhong <zhong2plus at gmail.com>
Jian Zhen <zhenjl at gmail.com>
Joshua Prunier <joshua.prunier at gmail.com>
Julien Lefevre <julien.lefevr at gmail.com>
Julien Schmidt <go-sql-driver at julienschmidt.com>
Justin Li <jli at j-li.net>
Justin Nuß <nuss.justin at gmail.com>
Kamil Dziedzic <kamil at klecza.pl>
Kevin Malachowski <kevin at chowski.com>
Kieron Woodhouse <kieron.woodhouse at infosum.com>
Lennart Rudolph <lrudolph at hmc.edu>
Leonardo YongUk Kim <dalinaum at gmail.com>
Linh Tran Tuan <linhduonggnu at gmail.com>
Lion Yang <lion at aosc.xyz>
Luca Looz <luca.looz92 at gmail.com>
Lucas Liu <extrafliu at gmail.com>
Luke Scott <luke at webconnex.com>
Maciej Zimnoch <maciej.zimnoch@codilime.com>
Maciej Zimnoch <maciej.zimnoch at codilime.com>
Michael Woolnough <michael.woolnough at gmail.com>
Nathanial Murphy <nathanial.murphy at gmail.com>
Nicola Peduzzi <thenikso at gmail.com>
Olivier Mengué <dolmen at cpan.org>
oscarzhao <oscarzhaosl at gmail.com>
Paul Bonser <misterpib at gmail.com>
Peter Schultz <peter.schultz at classmarkets.com>
Rebecca Chin <rchin at pivotal.io>
Reed Allman <rdallman10 at gmail.com>
Richard Wilkes <wilkes at me.com>
Robert Russell <robert at rrbrussell.com>
Runrioter Wung <runrioter at gmail.com>
Shuode Li <elemount at qq.com>
Simon J Mudd <sjmudd at pobox.com>
Soroush Pour <me at soroushjp.com>
Stan Putrya <root.vagner at gmail.com>
Stanley Gunawan <gunawan.stanley at gmail.com>
Steven Hartland <steven.hartland at multiplay.co.uk>
Thomas Wodarek <wodarekwebpage at gmail.com>
Tim Ruffles <timruffles at gmail.com>
Tom Jenkinson <tom at tjenkinson.me>
Vladimir Kovpak <cn007b at gmail.com>
Xiangyu Hu <xiangyu.hu at outlook.com>
Xiaobing Jiang <s7v7nislands at gmail.com>
Xiuming Chen <cc at cxm.cc>
@ -68,7 +92,14 @@ Zhenye Xie <xiezhenye at gmail.com>
# Organizations
Barracuda Networks, Inc.
Counting Ltd.
DigitalOcean Inc.
Facebook Inc.
GitHub Inc.
Google Inc.
InfoSum Ltd.
Keybase Inc.
Multiplay Ltd.
Percona LLC
Pivotal Inc.
Stripe Inc.

206
vendor/github.com/go-sql-driver/mysql/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,206 @@
## Version 1.5 (2020-01-07)
Changes:
- Dropped support Go 1.9 and lower (#823, #829, #886, #1016, #1017)
- Improve buffer handling (#890)
- Document potentially insecure TLS configs (#901)
- Use a double-buffering scheme to prevent data races (#943)
- Pass uint64 values without converting them to string (#838, #955)
- Update collations and make utf8mb4 default (#877, #1054)
- Make NullTime compatible with sql.NullTime in Go 1.13+ (#995)
- Removed CloudSQL support (#993, #1007)
- Add Go Module support (#1003)
New Features:
- Implement support of optional TLS (#900)
- Check connection liveness (#934, #964, #997, #1048, #1051, #1052)
- Implement Connector Interface (#941, #958, #1020, #1035)
Bugfixes:
- Mark connections as bad on error during ping (#875)
- Mark connections as bad on error during dial (#867)
- Fix connection leak caused by rapid context cancellation (#1024)
- Mark connections as bad on error during Conn.Prepare (#1030)
## Version 1.4.1 (2018-11-14)
Bugfixes:
- Fix TIME format for binary columns (#818)
- Fix handling of empty auth plugin names (#835)
- Fix caching_sha2_password with empty password (#826)
- Fix canceled context broke mysqlConn (#862)
- Fix OldAuthSwitchRequest support (#870)
- Fix Auth Response packet for cleartext password (#887)
## Version 1.4 (2018-06-03)
Changes:
- Documentation fixes (#530, #535, #567)
- Refactoring (#575, #579, #580, #581, #603, #615, #704)
- Cache column names (#444)
- Sort the DSN parameters in DSNs generated from a config (#637)
- Allow native password authentication by default (#644)
- Use the default port if it is missing in the DSN (#668)
- Removed the `strict` mode (#676)
- Do not query `max_allowed_packet` by default (#680)
- Dropped support Go 1.6 and lower (#696)
- Updated `ConvertValue()` to match the database/sql/driver implementation (#760)
- Document the usage of `0000-00-00T00:00:00` as the time.Time zero value (#783)
- Improved the compatibility of the authentication system (#807)
New Features:
- Multi-Results support (#537)
- `rejectReadOnly` DSN option (#604)
- `context.Context` support (#608, #612, #627, #761)
- Transaction isolation level support (#619, #744)
- Read-Only transactions support (#618, #634)
- `NewConfig` function which initializes a config with default values (#679)
- Implemented the `ColumnType` interfaces (#667, #724)
- Support for custom string types in `ConvertValue` (#623)
- Implemented `NamedValueChecker`, improving support for uint64 with high bit set (#690, #709, #710)
- `caching_sha2_password` authentication plugin support (#794, #800, #801, #802)
- Implemented `driver.SessionResetter` (#779)
- `sha256_password` authentication plugin support (#808)
Bugfixes:
- Use the DSN hostname as TLS default ServerName if `tls=true` (#564, #718)
- Fixed LOAD LOCAL DATA INFILE for empty files (#590)
- Removed columns definition cache since it sometimes cached invalid data (#592)
- Don't mutate registered TLS configs (#600)
- Make RegisterTLSConfig concurrency-safe (#613)
- Handle missing auth data in the handshake packet correctly (#646)
- Do not retry queries when data was written to avoid data corruption (#302, #736)
- Cache the connection pointer for error handling before invalidating it (#678)
- Fixed imports for appengine/cloudsql (#700)
- Fix sending STMT_LONG_DATA for 0 byte data (#734)
- Set correct capacity for []bytes read from length-encoded strings (#766)
- Make RegisterDial concurrency-safe (#773)
## Version 1.3 (2016-12-01)
Changes:
- Go 1.1 is no longer supported
- Use decimals fields in MySQL to format time types (#249)
- Buffer optimizations (#269)
- TLS ServerName defaults to the host (#283)
- Refactoring (#400, #410, #437)
- Adjusted documentation for second generation CloudSQL (#485)
- Documented DSN system var quoting rules (#502)
- Made statement.Close() calls idempotent to avoid errors in Go 1.6+ (#512)
New Features:
- Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
- Support for returning table alias on Columns() (#289, #359, #382)
- Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490)
- Support for uint64 parameters with high bit set (#332, #345)
- Cleartext authentication plugin support (#327)
- Exported ParseDSN function and the Config struct (#403, #419, #429)
- Read / Write timeouts (#401)
- Support for JSON field type (#414)
- Support for multi-statements and multi-results (#411, #431)
- DSN parameter to set the driver-side max_allowed_packet value manually (#489)
- Native password authentication plugin support (#494, #524)
Bugfixes:
- Fixed handling of queries without columns and rows (#255)
- Fixed a panic when SetKeepAlive() failed (#298)
- Handle ERR packets while reading rows (#321)
- Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349)
- Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356)
- Actually zero out bytes in handshake response (#378)
- Fixed race condition in registering LOAD DATA INFILE handler (#383)
- Fixed tests with MySQL 5.7.9+ (#380)
- QueryUnescape TLS config names (#397)
- Fixed "broken pipe" error by writing to closed socket (#390)
- Fixed LOAD LOCAL DATA INFILE buffering (#424)
- Fixed parsing of floats into float64 when placeholders are used (#434)
- Fixed DSN tests with Go 1.7+ (#459)
- Handle ERR packets while waiting for EOF (#473)
- Invalidate connection on error while discarding additional results (#513)
- Allow terminating packets of length 0 (#516)
## Version 1.2 (2014-06-03)
Changes:
- We switched back to a "rolling release". `go get` installs the current master branch again
- Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver
- Exported errors to allow easy checking from application code
- Enabled TCP Keepalives on TCP connections
- Optimized INFILE handling (better buffer size calculation, lazy init, ...)
- The DSN parser also checks for a missing separating slash
- Faster binary date / datetime to string formatting
- Also exported the MySQLWarning type
- mysqlConn.Close returns the first error encountered instead of ignoring all errors
- writePacket() automatically writes the packet size to the header
- readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets
New Features:
- `RegisterDial` allows the usage of a custom dial function to establish the network connection
- Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter
- Logging of critical errors is configurable with `SetLogger`
- Google CloudSQL support
Bugfixes:
- Allow more than 32 parameters in prepared statements
- Various old_password fixes
- Fixed TestConcurrent test to pass Go's race detection
- Fixed appendLengthEncodedInteger for large numbers
- Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo)
## Version 1.1 (2013-11-02)
Changes:
- Go-MySQL-Driver now requires Go 1.1
- Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore
- Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors
- `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")`
- DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'.
- Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries
- Optimized the buffer for reading
- stmt.Query now caches column metadata
- New Logo
- Changed the copyright header to include all contributors
- Improved the LOAD INFILE documentation
- The driver struct is now exported to make the driver directly accessible
- Refactored the driver tests
- Added more benchmarks and moved all to a separate file
- Other small refactoring
New Features:
- Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure
- Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs
- Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used
Bugfixes:
- Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification
- Convert to DB timezone when inserting `time.Time`
- Splitted packets (more than 16MB) are now merged correctly
- Fixed false positive `io.EOF` errors when the data was fully read
- Avoid panics on reuse of closed connections
- Fixed empty string producing false nil values
- Fixed sign byte for positive TIME fields
## Version 1.0 (2013-05-14)
Initial Release

373
vendor/github.com/go-sql-driver/mysql/LICENSE generated vendored Normal file
View File

@ -0,0 +1,373 @@
Mozilla Public License Version 2.0
==================================
1. Definitions
--------------
1.1. "Contributor"
means each individual or legal entity that creates, contributes to
the creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used
by a Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached
the notice in Exhibit A, the Executable Form of such Source Code
Form, and Modifications of such Source Code Form, in each case
including portions thereof.
1.5. "Incompatible With Secondary Licenses"
means
(a) that the initial Contributor has attached the notice described
in Exhibit B to the Covered Software; or
(b) that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the
terms of a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in
a separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible,
whether at the time of the initial grant or subsequently, any and
all of the rights conveyed by this License.
1.10. "Modifications"
means any of the following:
(a) any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered
Software; or
(b) any new file in Source Code Form that contains any Covered
Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the
License, by the making, using, selling, offering for sale, having
made, import, or transfer of either its Contributions or its
Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU
Lesser General Public License, Version 2.1, the GNU Affero General
Public License, Version 3.0, or any later versions of those
licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that
controls, is controlled by, or is under common control with You. For
purposes of this definition, "control" means (a) the power, direct
or indirect, to cause the direction or management of such entity,
whether by contract or otherwise, or (b) ownership of more than
fifty percent (50%) of the outstanding shares or beneficial
ownership of such entity.
2. License Grants and Conditions
--------------------------------
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
(a) under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
(b) under Patent Claims of such Contributor to make, use, sell, offer
for sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
(a) for any code that a Contributor has removed from Covered Software;
or
(b) for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
(c) under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights
to grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
in Section 2.1.
3. Responsibilities
-------------------
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
(a) such Covered Software must also be made available in Source Code
Form, as described in Section 3.1, and You must inform recipients of
the Executable Form how they can obtain a copy of such Source Code
Form by reasonable means in a timely manner, at a charge no more
than the cost of distribution to the recipient; and
(b) You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter
the recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty,
or limitations of liability) contained within the Source Code Form of
the Covered Software, except that You may alter any license notices to
the extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
---------------------------------------------------
If it is impossible for You to comply with any of the terms of this
License with respect to some or all of the Covered Software due to
statute, judicial order, or regulation then You must: (a) comply with
the terms of this License to the maximum extent possible; and (b)
describe the limitations and the code they affect. Such description must
be placed in a text file included with all distributions of the Covered
Software under this License. Except to the extent prohibited by statute
or regulation, such description must be sufficiently detailed for a
recipient of ordinary skill to be able to understand it.
5. Termination
--------------
5.1. The rights granted under this License will terminate automatically
if You fail to comply with any of its terms. However, if You become
compliant, then the rights granted under this License from a particular
Contributor are reinstated (a) provisionally, unless and until such
Contributor explicitly and finally terminates Your grants, and (b) on an
ongoing basis, if such Contributor fails to notify You of the
non-compliance by some reasonable means prior to 60 days after You have
come back into compliance. Moreover, Your grants from a particular
Contributor are reinstated on an ongoing basis if such Contributor
notifies You of the non-compliance by some reasonable means, this is the
first time You have received notice of non-compliance with this License
from such Contributor, and You become compliant prior to 30 days after
Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
end user license agreements (excluding distributors and resellers) which
have been validly granted by You or Your distributors under this License
prior to termination shall survive termination.
************************************************************************
* *
* 6. Disclaimer of Warranty *
* ------------------------- *
* *
* Covered Software is provided under this License on an "as is" *
* basis, without warranty of any kind, either expressed, implied, or *
* statutory, including, without limitation, warranties that the *
* Covered Software is free of defects, merchantable, fit for a *
* particular purpose or non-infringing. The entire risk as to the *
* quality and performance of the Covered Software is with You. *
* Should any Covered Software prove defective in any respect, You *
* (not any Contributor) assume the cost of any necessary servicing, *
* repair, or correction. This disclaimer of warranty constitutes an *
* essential part of this License. No use of any Covered Software is *
* authorized under this License except under this disclaimer. *
* *
************************************************************************
************************************************************************
* *
* 7. Limitation of Liability *
* -------------------------- *
* *
* Under no circumstances and under no legal theory, whether tort *
* (including negligence), contract, or otherwise, shall any *
* Contributor, or anyone who distributes Covered Software as *
* permitted above, be liable to You for any direct, indirect, *
* special, incidental, or consequential damages of any character *
* including, without limitation, damages for lost profits, loss of *
* goodwill, work stoppage, computer failure or malfunction, or any *
* and all other commercial damages or losses, even if such party *
* shall have been informed of the possibility of such damages. This *
* limitation of liability shall not apply to liability for death or *
* personal injury resulting from such party's negligence to the *
* extent applicable law prohibits such limitation. Some *
* jurisdictions do not allow the exclusion or limitation of *
* incidental or consequential damages, so this exclusion and *
* limitation may not apply to You. *
* *
************************************************************************
8. Litigation
-------------
Any litigation relating to this License may be brought only in the
courts of a jurisdiction where the defendant maintains its principal
place of business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions.
Nothing in this Section shall prevent a party's ability to bring
cross-claims or counter-claims.
9. Miscellaneous
----------------
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides
that the language of a contract shall be construed against the drafter
shall not be used to construe this License against a Contributor.
10. Versions of the License
---------------------------
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
-------------------------------------------
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular
file, then You may include the notice in a location (such as a LICENSE
file in a relevant directory) where a recipient would be likely to look
for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
---------------------------------------------------------
This Source Code Form is "Incompatible With Secondary Licenses", as
defined by the Mozilla Public License, v. 2.0.

501
vendor/github.com/go-sql-driver/mysql/README.md generated vendored Normal file
View File

@ -0,0 +1,501 @@
# Go-MySQL-Driver
A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) package
![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin")
---------------------------------------
* [Features](#features)
* [Requirements](#requirements)
* [Installation](#installation)
* [Usage](#usage)
* [DSN (Data Source Name)](#dsn-data-source-name)
* [Password](#password)
* [Protocol](#protocol)
* [Address](#address)
* [Parameters](#parameters)
* [Examples](#examples)
* [Connection pool and timeouts](#connection-pool-and-timeouts)
* [context.Context Support](#contextcontext-support)
* [ColumnType Support](#columntype-support)
* [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support)
* [time.Time support](#timetime-support)
* [Unicode support](#unicode-support)
* [Testing / Development](#testing--development)
* [License](#license)
---------------------------------------
## Features
* Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance")
* Native Go implementation. No C-bindings, just pure Go
* Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](https://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
* Automatic handling of broken connections
* Automatic Connection Pooling *(by database/sql package)*
* Supports queries larger than 16MB
* Full [`sql.RawBytes`](https://golang.org/pkg/database/sql/#RawBytes) support.
* Intelligent `LONG DATA` handling in prepared statements
* Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support
* Optional `time.Time` parsing
* Optional placeholder interpolation
## Requirements
* Go 1.10 or higher. We aim to support the 3 latest versions of Go.
* MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
---------------------------------------
## Installation
Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
```bash
$ go get -u github.com/go-sql-driver/mysql
```
Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
## Usage
_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](https://golang.org/pkg/database/sql/) API then.
Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`:
```go
import "database/sql"
import _ "github.com/go-sql-driver/mysql"
db, err := sql.Open("mysql", "user:password@/dbname")
```
[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples").
### DSN (Data Source Name)
The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets):
```
[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...&paramN=valueN]
```
A DSN in its fullest form:
```
username:password@protocol(address)/dbname?param=value
```
Except for the databasename, all values are optional. So the minimal DSN is:
```
/dbname
```
If you do not want to preselect a database, leave `dbname` empty:
```
/
```
This has the same effect as an empty DSN string:
```
```
Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct.
#### Password
Passwords can consist of any character. Escaping is **not** necessary.
#### Protocol
See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available.
In general you should use an Unix domain socket if available and TCP otherwise for best performance.
#### Address
For TCP and UDP networks, addresses have the form `host[:port]`.
If `port` is omitted, the default port will be used.
If `host` is a literal IPv6 address, it must be enclosed in square brackets.
The functions [net.JoinHostPort](https://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](https://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`.
#### Parameters
*Parameters are case-sensitive!*
Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`.
##### `allowAllFiles`
```
Type: bool
Valid Values: true, false
Default: false
```
`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files.
[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)
##### `allowCleartextPasswords`
```
Type: bool
Valid Values: true, false
Default: false
```
`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
##### `allowNativePasswords`
```
Type: bool
Valid Values: true, false
Default: true
```
`allowNativePasswords=false` disallows the usage of MySQL native password method.
##### `allowOldPasswords`
```
Type: bool
Valid Values: true, false
Default: false
```
`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords).
##### `charset`
```
Type: string
Valid Values: <name>
Default: none
```
Sets the charset used for client-server interaction (`"SET NAMES <value>"`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
Usage of the `charset` parameter is discouraged because it issues additional queries to the server.
Unless you need the fallback behavior, please use `collation` instead.
##### `checkConnLiveness`
```
Type: bool
Valid Values: true, false
Default: true
```
On supported platforms connections retrieved from the connection pool are checked for liveness before using them. If the check fails, the respective connection is marked as bad and the query retried with another connection.
`checkConnLiveness=false` disables this liveness check of connections.
##### `collation`
```
Type: string
Valid Values: <name>
Default: utf8mb4_general_ci
```
Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail.
A list of valid charsets for a server is retrievable with `SHOW COLLATION`.
The default collation (`utf8mb4_general_ci`) is supported from MySQL 5.5. You should use an older collation (e.g. `utf8_general_ci`) for older MySQL.
Collations for charset "ucs2", "utf16", "utf16le", and "utf32" can not be used ([ref](https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset)).
##### `clientFoundRows`
```
Type: bool
Valid Values: true, false
Default: false
```
`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed.
##### `columnsWithAlias`
```
Type: bool
Valid Values: true, false
Default: false
```
When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example:
```
SELECT u.id FROM users as u
```
will return `u.id` instead of just `id` if `columnsWithAlias=true`.
##### `interpolateParams`
```
Type: bool
Valid Values: true, false
Default: false
```
If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`.
*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!*
##### `loc`
```
Type: string
Valid Values: <escaped name>
Default: UTC
```
Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](https://golang.org/pkg/time/#LoadLocation) for details.
Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter.
Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
##### `maxAllowedPacket`
```
Type: decimal number
Default: 4194304
```
Max packet size allowed in bytes. The default value is 4 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*.
##### `multiStatements`
```
Type: bool
Valid Values: true, false
Default: false
```
Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded.
When `multiStatements` is used, `?` parameters must only be used in the first statement.
##### `parseTime`
```
Type: bool
Valid Values: true, false
Default: false
```
`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string`
The date or datetime like `0000-00-00 00:00:00` is converted into zero value of `time.Time`.
##### `readTimeout`
```
Type: duration
Default: 0
```
I/O read timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
##### `rejectReadOnly`
```
Type: bool
Valid Values: true, false
Default: false
```
`rejectReadOnly=true` causes the driver to reject read-only connections. This
is for a possible race condition during an automatic failover, where the mysql
client gets connected to a read-only replica after the failover.
Note that this should be a fairly rare case, as an automatic failover normally
happens when the primary is down, and the race condition shouldn't happen
unless it comes back up online as soon as the failover is kicked off. On the
other hand, when this happens, a MySQL application can get stuck on a
read-only connection until restarted. It is however fairly easy to reproduce,
for example, using a manual failover on AWS Aurora's MySQL-compatible cluster.
If you are not relying on read-only transactions to reject writes that aren't
supposed to happen, setting this on some MySQL providers (such as AWS Aurora)
is safer for failovers.
Note that ERROR 1290 can be returned for a `read-only` server and this option will
cause a retry for that error. However the same error number is used for some
other cases. You should ensure your application will never cause an ERROR 1290
except for `read-only` mode when enabling this option.
##### `serverPubKey`
```
Type: string
Valid Values: <name>
Default: none
```
Server public keys can be registered with [`mysql.RegisterServerPubKey`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterServerPubKey), which can then be used by the assigned name in the DSN.
Public keys are used to transmit encrypted data, e.g. for authentication.
If the server's public key is known, it should be set manually to avoid expensive and potentially insecure transmissions of the public key from the server to the client each time it is required.
##### `timeout`
```
Type: duration
Default: OS default
```
Timeout for establishing connections, aka dial timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
##### `tls`
```
Type: bool / string
Valid Values: true, false, skip-verify, preferred, <name>
Default: false
```
`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side) or use `preferred` to use TLS only when advertised by the server. This is similar to `skip-verify`, but additionally allows a fallback to a connection which is not encrypted. Neither `skip-verify` nor `preferred` add any reliable security. You can use a custom TLS config after registering it with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
##### `writeTimeout`
```
Type: duration
Default: 0
```
I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
##### System Variables
Any other parameters are interpreted as system variables:
* `<boolean_var>=<value>`: `SET <boolean_var>=<value>`
* `<enum_var>=<value>`: `SET <enum_var>=<value>`
* `<string_var>=%27<value>%27`: `SET <string_var>='<value>'`
Rules:
* The values for string variables must be quoted with `'`.
* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!
(which implies values of string variables must be wrapped with `%27`).
Examples:
* `autocommit=1`: `SET autocommit=1`
* [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'`
* [`tx_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `SET tx_isolation='REPEATABLE-READ'`
#### Examples
```
user@unix(/path/to/socket)/dbname
```
```
root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local
```
```
user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true
```
Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html):
```
user:password@/dbname?sql_mode=TRADITIONAL
```
TCP via IPv6:
```
user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci
```
TCP on a remote host, e.g. Amazon RDS:
```
id:password@tcp(your-amazonaws-uri.com:3306)/dbname
```
Google Cloud SQL on App Engine:
```
user:password@unix(/cloudsql/project-id:region-name:instance-name)/dbname
```
TCP using default port (3306) on localhost:
```
user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped
```
Use the default protocol (tcp) and host (localhost:3306):
```
user:password@/dbname
```
No Database preselected:
```
user:password@/
```
### Connection pool and timeouts
The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively.
## `ColumnType` Support
This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported.
## `context.Context` Support
Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details.
### `LOAD DATA LOCAL INFILE` support
For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
```go
import "github.com/go-sql-driver/mysql"
```
Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)).
To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::<name>` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
### `time.Time` support
The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your program.
However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical equivalent in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes).
Alternatively you can use the [`NullTime`](https://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`.
### Unicode support
Since version 1.5 Go-MySQL-Driver automatically uses the collation ` utf8mb4_general_ci` by default.
Other collations / charsets can be set using the [`collation`](#collation) DSN parameter.
Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default.
See http://dev.mysql.com/doc/refman/8.0/en/charset-unicode.html for more details on MySQL's Unicode support.
## Testing / Development
To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details.
Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated.
If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls).
See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CONTRIBUTING.md) for details.
---------------------------------------
## License
Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
Mozilla summarizes the license scope as follows:
> MPL: The copyleft applies to any files containing MPLed code.
That means:
* You can **use** the **unchanged** source code both in private and commercially.
* When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0).
* You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**.
Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you have further questions regarding the license.
You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE).
![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow")

View File

@ -1,19 +0,0 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
// +build appengine
package mysql
import (
"appengine/cloudsql"
)
func init() {
RegisterDial("cloudsql", cloudsql.Dial)
}

422
vendor/github.com/go-sql-driver/mysql/auth.go generated vendored Normal file
View File

@ -0,0 +1,422 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
import (
"crypto/rand"
"crypto/rsa"
"crypto/sha1"
"crypto/sha256"
"crypto/x509"
"encoding/pem"
"sync"
)
// server pub keys registry
var (
serverPubKeyLock sync.RWMutex
serverPubKeyRegistry map[string]*rsa.PublicKey
)
// RegisterServerPubKey registers a server RSA public key which can be used to
// send data in a secure manner to the server without receiving the public key
// in a potentially insecure way from the server first.
// Registered keys can afterwards be used adding serverPubKey=<name> to the DSN.
//
// Note: The provided rsa.PublicKey instance is exclusively owned by the driver
// after registering it and may not be modified.
//
// data, err := ioutil.ReadFile("mykey.pem")
// if err != nil {
// log.Fatal(err)
// }
//
// block, _ := pem.Decode(data)
// if block == nil || block.Type != "PUBLIC KEY" {
// log.Fatal("failed to decode PEM block containing public key")
// }
//
// pub, err := x509.ParsePKIXPublicKey(block.Bytes)
// if err != nil {
// log.Fatal(err)
// }
//
// if rsaPubKey, ok := pub.(*rsa.PublicKey); ok {
// mysql.RegisterServerPubKey("mykey", rsaPubKey)
// } else {
// log.Fatal("not a RSA public key")
// }
//
func RegisterServerPubKey(name string, pubKey *rsa.PublicKey) {
serverPubKeyLock.Lock()
if serverPubKeyRegistry == nil {
serverPubKeyRegistry = make(map[string]*rsa.PublicKey)
}
serverPubKeyRegistry[name] = pubKey
serverPubKeyLock.Unlock()
}
// DeregisterServerPubKey removes the public key registered with the given name.
func DeregisterServerPubKey(name string) {
serverPubKeyLock.Lock()
if serverPubKeyRegistry != nil {
delete(serverPubKeyRegistry, name)
}
serverPubKeyLock.Unlock()
}
func getServerPubKey(name string) (pubKey *rsa.PublicKey) {
serverPubKeyLock.RLock()
if v, ok := serverPubKeyRegistry[name]; ok {
pubKey = v
}
serverPubKeyLock.RUnlock()
return
}
// Hash password using pre 4.1 (old password) method
// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c
type myRnd struct {
seed1, seed2 uint32
}
const myRndMaxVal = 0x3FFFFFFF
// Pseudo random number generator
func newMyRnd(seed1, seed2 uint32) *myRnd {
return &myRnd{
seed1: seed1 % myRndMaxVal,
seed2: seed2 % myRndMaxVal,
}
}
// Tested to be equivalent to MariaDB's floating point variant
// http://play.golang.org/p/QHvhd4qved
// http://play.golang.org/p/RG0q4ElWDx
func (r *myRnd) NextByte() byte {
r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal
r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal
return byte(uint64(r.seed1) * 31 / myRndMaxVal)
}
// Generate binary hash from byte string using insecure pre 4.1 method
func pwHash(password []byte) (result [2]uint32) {
var add uint32 = 7
var tmp uint32
result[0] = 1345345333
result[1] = 0x12345671
for _, c := range password {
// skip spaces and tabs in password
if c == ' ' || c == '\t' {
continue
}
tmp = uint32(c)
result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8)
result[1] += (result[1] << 8) ^ result[0]
add += tmp
}
// Remove sign bit (1<<31)-1)
result[0] &= 0x7FFFFFFF
result[1] &= 0x7FFFFFFF
return
}
// Hash password using insecure pre 4.1 method
func scrambleOldPassword(scramble []byte, password string) []byte {
if len(password) == 0 {
return nil
}
scramble = scramble[:8]
hashPw := pwHash([]byte(password))
hashSc := pwHash(scramble)
r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1])
var out [8]byte
for i := range out {
out[i] = r.NextByte() + 64
}
mask := r.NextByte()
for i := range out {
out[i] ^= mask
}
return out[:]
}
// Hash password using 4.1+ method (SHA1)
func scramblePassword(scramble []byte, password string) []byte {
if len(password) == 0 {
return nil
}
// stage1Hash = SHA1(password)
crypt := sha1.New()
crypt.Write([]byte(password))
stage1 := crypt.Sum(nil)
// scrambleHash = SHA1(scramble + SHA1(stage1Hash))
// inner Hash
crypt.Reset()
crypt.Write(stage1)
hash := crypt.Sum(nil)
// outer Hash
crypt.Reset()
crypt.Write(scramble)
crypt.Write(hash)
scramble = crypt.Sum(nil)
// token = scrambleHash XOR stage1Hash
for i := range scramble {
scramble[i] ^= stage1[i]
}
return scramble
}
// Hash password using MySQL 8+ method (SHA256)
func scrambleSHA256Password(scramble []byte, password string) []byte {
if len(password) == 0 {
return nil
}
// XOR(SHA256(password), SHA256(SHA256(SHA256(password)), scramble))
crypt := sha256.New()
crypt.Write([]byte(password))
message1 := crypt.Sum(nil)
crypt.Reset()
crypt.Write(message1)
message1Hash := crypt.Sum(nil)
crypt.Reset()
crypt.Write(message1Hash)
crypt.Write(scramble)
message2 := crypt.Sum(nil)
for i := range message1 {
message1[i] ^= message2[i]
}
return message1
}
func encryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte, error) {
plain := make([]byte, len(password)+1)
copy(plain, password)
for i := range plain {
j := i % len(seed)
plain[i] ^= seed[j]
}
sha1 := sha1.New()
return rsa.EncryptOAEP(sha1, rand.Reader, pub, plain, nil)
}
func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) error {
enc, err := encryptPassword(mc.cfg.Passwd, seed, pub)
if err != nil {
return err
}
return mc.writeAuthSwitchPacket(enc)
}
func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, error) {
switch plugin {
case "caching_sha2_password":
authResp := scrambleSHA256Password(authData, mc.cfg.Passwd)
return authResp, nil
case "mysql_old_password":
if !mc.cfg.AllowOldPasswords {
return nil, ErrOldPassword
}
// Note: there are edge cases where this should work but doesn't;
// this is currently "wontfix":
// https://github.com/go-sql-driver/mysql/issues/184
authResp := append(scrambleOldPassword(authData[:8], mc.cfg.Passwd), 0)
return authResp, nil
case "mysql_clear_password":
if !mc.cfg.AllowCleartextPasswords {
return nil, ErrCleartextPassword
}
// http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
// http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html
return append([]byte(mc.cfg.Passwd), 0), nil
case "mysql_native_password":
if !mc.cfg.AllowNativePasswords {
return nil, ErrNativePassword
}
// https://dev.mysql.com/doc/internals/en/secure-password-authentication.html
// Native password authentication only need and will need 20-byte challenge.
authResp := scramblePassword(authData[:20], mc.cfg.Passwd)
return authResp, nil
case "sha256_password":
if len(mc.cfg.Passwd) == 0 {
return []byte{0}, nil
}
if mc.cfg.tls != nil || mc.cfg.Net == "unix" {
// write cleartext auth packet
return append([]byte(mc.cfg.Passwd), 0), nil
}
pubKey := mc.cfg.pubKey
if pubKey == nil {
// request public key from server
return []byte{1}, nil
}
// encrypted password
enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey)
return enc, err
default:
errLog.Print("unknown auth plugin:", plugin)
return nil, ErrUnknownPlugin
}
}
func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
// Read Result Packet
authData, newPlugin, err := mc.readAuthResult()
if err != nil {
return err
}
// handle auth plugin switch, if requested
if newPlugin != "" {
// If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is
// sent and we have to keep using the cipher sent in the init packet.
if authData == nil {
authData = oldAuthData
} else {
// copy data from read buffer to owned slice
copy(oldAuthData, authData)
}
plugin = newPlugin
authResp, err := mc.auth(authData, plugin)
if err != nil {
return err
}
if err = mc.writeAuthSwitchPacket(authResp); err != nil {
return err
}
// Read Result Packet
authData, newPlugin, err = mc.readAuthResult()
if err != nil {
return err
}
// Do not allow to change the auth plugin more than once
if newPlugin != "" {
return ErrMalformPkt
}
}
switch plugin {
// https://insidemysql.com/preparing-your-community-connector-for-mysql-8-part-2-sha256/
case "caching_sha2_password":
switch len(authData) {
case 0:
return nil // auth successful
case 1:
switch authData[0] {
case cachingSha2PasswordFastAuthSuccess:
if err = mc.readResultOK(); err == nil {
return nil // auth successful
}
case cachingSha2PasswordPerformFullAuthentication:
if mc.cfg.tls != nil || mc.cfg.Net == "unix" {
// write cleartext auth packet
err = mc.writeAuthSwitchPacket(append([]byte(mc.cfg.Passwd), 0))
if err != nil {
return err
}
} else {
pubKey := mc.cfg.pubKey
if pubKey == nil {
// request public key from server
data, err := mc.buf.takeSmallBuffer(4 + 1)
if err != nil {
return err
}
data[4] = cachingSha2PasswordRequestPublicKey
mc.writePacket(data)
// parse public key
if data, err = mc.readPacket(); err != nil {
return err
}
block, _ := pem.Decode(data[1:])
pkix, err := x509.ParsePKIXPublicKey(block.Bytes)
if err != nil {
return err
}
pubKey = pkix.(*rsa.PublicKey)
}
// send encrypted password
err = mc.sendEncryptedPassword(oldAuthData, pubKey)
if err != nil {
return err
}
}
return mc.readResultOK()
default:
return ErrMalformPkt
}
default:
return ErrMalformPkt
}
case "sha256_password":
switch len(authData) {
case 0:
return nil // auth successful
default:
block, _ := pem.Decode(authData)
pub, err := x509.ParsePKIXPublicKey(block.Bytes)
if err != nil {
return err
}
// send encrypted password
err = mc.sendEncryptedPassword(oldAuthData, pub.(*rsa.PublicKey))
if err != nil {
return err
}
return mc.readResultOK()
}
default:
return nil // auth successful
}
return err
}

1330
vendor/github.com/go-sql-driver/mysql/auth_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,93 +0,0 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
// +build go1.8
package mysql
import (
"context"
"database/sql"
"fmt"
"runtime"
"testing"
)
func benchmarkQueryContext(b *testing.B, db *sql.DB, p int) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
db.SetMaxIdleConns(p * runtime.GOMAXPROCS(0))
tb := (*TB)(b)
stmt := tb.checkStmt(db.PrepareContext(ctx, "SELECT val FROM foo WHERE id=?"))
defer stmt.Close()
b.SetParallelism(p)
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
var got string
for pb.Next() {
tb.check(stmt.QueryRow(1).Scan(&got))
if got != "one" {
b.Fatalf("query = %q; want one", got)
}
}
})
}
func BenchmarkQueryContext(b *testing.B) {
db := initDB(b,
"DROP TABLE IF EXISTS foo",
"CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
`INSERT INTO foo VALUES (1, "one")`,
`INSERT INTO foo VALUES (2, "two")`,
)
defer db.Close()
for _, p := range []int{1, 2, 3, 4} {
b.Run(fmt.Sprintf("%d", p), func(b *testing.B) {
benchmarkQueryContext(b, db, p)
})
}
}
func benchmarkExecContext(b *testing.B, db *sql.DB, p int) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
db.SetMaxIdleConns(p * runtime.GOMAXPROCS(0))
tb := (*TB)(b)
stmt := tb.checkStmt(db.PrepareContext(ctx, "DO 1"))
defer stmt.Close()
b.SetParallelism(p)
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
if _, err := stmt.ExecContext(ctx); err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkExecContext(b *testing.B) {
db := initDB(b,
"DROP TABLE IF EXISTS foo",
"CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
`INSERT INTO foo VALUES (1, "one")`,
`INSERT INTO foo VALUES (2, "two")`,
)
defer db.Close()
for _, p := range []int{1, 2, 3, 4} {
b.Run(fmt.Sprintf("%d", p), func(b *testing.B) {
benchmarkQueryContext(b, db, p)
})
}
}

View File

@ -10,9 +10,12 @@ package mysql
import (
"bytes"
"context"
"database/sql"
"database/sql/driver"
"fmt"
"math"
"runtime"
"strings"
"sync"
"sync/atomic"
@ -48,13 +51,9 @@ func initDB(b *testing.B, queries ...string) *sql.DB {
db := tb.checkDB(sql.Open("mysql", dsn))
for _, query := range queries {
if _, err := db.Exec(query); err != nil {
if w, ok := err.(MySQLWarnings); ok {
b.Logf("warning on %q: %v", query, w)
} else {
b.Fatalf("error on %q: %v", query, err)
}
}
}
return db
}
@ -244,3 +243,131 @@ func BenchmarkInterpolation(b *testing.B) {
}
}
}
func benchmarkQueryContext(b *testing.B, db *sql.DB, p int) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
db.SetMaxIdleConns(p * runtime.GOMAXPROCS(0))
tb := (*TB)(b)
stmt := tb.checkStmt(db.PrepareContext(ctx, "SELECT val FROM foo WHERE id=?"))
defer stmt.Close()
b.SetParallelism(p)
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
var got string
for pb.Next() {
tb.check(stmt.QueryRow(1).Scan(&got))
if got != "one" {
b.Fatalf("query = %q; want one", got)
}
}
})
}
func BenchmarkQueryContext(b *testing.B) {
db := initDB(b,
"DROP TABLE IF EXISTS foo",
"CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
`INSERT INTO foo VALUES (1, "one")`,
`INSERT INTO foo VALUES (2, "two")`,
)
defer db.Close()
for _, p := range []int{1, 2, 3, 4} {
b.Run(fmt.Sprintf("%d", p), func(b *testing.B) {
benchmarkQueryContext(b, db, p)
})
}
}
func benchmarkExecContext(b *testing.B, db *sql.DB, p int) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
db.SetMaxIdleConns(p * runtime.GOMAXPROCS(0))
tb := (*TB)(b)
stmt := tb.checkStmt(db.PrepareContext(ctx, "DO 1"))
defer stmt.Close()
b.SetParallelism(p)
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
if _, err := stmt.ExecContext(ctx); err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkExecContext(b *testing.B) {
db := initDB(b,
"DROP TABLE IF EXISTS foo",
"CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
`INSERT INTO foo VALUES (1, "one")`,
`INSERT INTO foo VALUES (2, "two")`,
)
defer db.Close()
for _, p := range []int{1, 2, 3, 4} {
b.Run(fmt.Sprintf("%d", p), func(b *testing.B) {
benchmarkQueryContext(b, db, p)
})
}
}
// BenchmarkQueryRawBytes benchmarks fetching 100 blobs using sql.RawBytes.
// "size=" means size of each blobs.
func BenchmarkQueryRawBytes(b *testing.B) {
var sizes []int = []int{100, 1000, 2000, 4000, 8000, 12000, 16000, 32000, 64000, 256000}
db := initDB(b,
"DROP TABLE IF EXISTS bench_rawbytes",
"CREATE TABLE bench_rawbytes (id INT PRIMARY KEY, val LONGBLOB)",
)
defer db.Close()
blob := make([]byte, sizes[len(sizes)-1])
for i := range blob {
blob[i] = 42
}
for i := 0; i < 100; i++ {
_, err := db.Exec("INSERT INTO bench_rawbytes VALUES (?, ?)", i, blob)
if err != nil {
b.Fatal(err)
}
}
for _, s := range sizes {
b.Run(fmt.Sprintf("size=%v", s), func(b *testing.B) {
db.SetMaxIdleConns(0)
db.SetMaxIdleConns(1)
b.ReportAllocs()
b.ResetTimer()
for j := 0; j < b.N; j++ {
rows, err := db.Query("SELECT LEFT(val, ?) as v FROM bench_rawbytes", s)
if err != nil {
b.Fatal(err)
}
nrows := 0
for rows.Next() {
var buf sql.RawBytes
err := rows.Scan(&buf)
if err != nil {
b.Fatal(err)
}
if len(buf) != s {
b.Fatalf("size mismatch: expected %v, got %v", s, len(buf))
}
nrows++
}
rows.Close()
if nrows != 100 {
b.Fatalf("numbers of rows mismatch: expected %v, got %v", 100, nrows)
}
}
})
}
}

View File

@ -15,47 +15,69 @@ import (
)
const defaultBufSize = 4096
const maxCachedBufSize = 256 * 1024
// A buffer which is used for both reading and writing.
// This is possible since communication on each connection is synchronous.
// In other words, we can't write and read simultaneously on the same connection.
// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
// Also highly optimized for this particular use case.
// This buffer is backed by two byte slices in a double-buffering scheme
type buffer struct {
buf []byte
buf []byte // buf is a byte buffer who's length and capacity are equal.
nc net.Conn
idx int
length int
timeout time.Duration
dbuf [2][]byte // dbuf is an array with the two byte slices that back this buffer
flipcnt uint // flipccnt is the current buffer counter for double-buffering
}
// newBuffer allocates and returns a new buffer.
func newBuffer(nc net.Conn) buffer {
var b [defaultBufSize]byte
fg := make([]byte, defaultBufSize)
return buffer{
buf: b[:],
buf: fg,
nc: nc,
dbuf: [2][]byte{fg, nil},
}
}
// flip replaces the active buffer with the background buffer
// this is a delayed flip that simply increases the buffer counter;
// the actual flip will be performed the next time we call `buffer.fill`
func (b *buffer) flip() {
b.flipcnt += 1
}
// fill reads into the buffer until at least _need_ bytes are in it
func (b *buffer) fill(need int) error {
n := b.length
// fill data into its double-buffering target: if we've called
// flip on this buffer, we'll be copying to the background buffer,
// and then filling it with network data; otherwise we'll just move
// the contents of the current buffer to the front before filling it
dest := b.dbuf[b.flipcnt&1]
// move existing data to the beginning
if n > 0 && b.idx > 0 {
copy(b.buf[0:n], b.buf[b.idx:])
}
// grow buffer if necessary
// TODO: let the buffer shrink again at some point
// Maybe keep the org buf slice and swap back?
if need > len(b.buf) {
// grow buffer if necessary to fit the whole packet.
if need > len(dest) {
// Round up to the next multiple of the default size
newBuf := make([]byte, ((need/defaultBufSize)+1)*defaultBufSize)
copy(newBuf, b.buf)
b.buf = newBuf
dest = make([]byte, ((need/defaultBufSize)+1)*defaultBufSize)
// if the allocated buffer is not too large, move it to backing storage
// to prevent extra allocations on applications that perform large reads
if len(dest) <= maxCachedBufSize {
b.dbuf[b.flipcnt&1] = dest
}
}
// if we're filling the fg buffer, move the existing data to the start of it.
// if we're filling the bg buffer, copy over the data
if n > 0 {
copy(dest[:n], b.buf[b.idx:])
}
b.buf = dest
b.idx = 0
for {
@ -105,43 +127,56 @@ func (b *buffer) readNext(need int) ([]byte, error) {
return b.buf[offset:b.idx], nil
}
// returns a buffer with the requested size.
// takeBuffer returns a buffer with the requested size.
// If possible, a slice from the existing buffer is returned.
// Otherwise a bigger buffer is made.
// Only one buffer (total) can be used at a time.
func (b *buffer) takeBuffer(length int) []byte {
func (b *buffer) takeBuffer(length int) ([]byte, error) {
if b.length > 0 {
return nil
return nil, ErrBusyBuffer
}
// test (cheap) general case first
if length <= defaultBufSize || length <= cap(b.buf) {
return b.buf[:length]
if length <= cap(b.buf) {
return b.buf[:length], nil
}
if length < maxPacketSize {
b.buf = make([]byte, length)
return b.buf
}
return make([]byte, length)
return b.buf, nil
}
// shortcut which can be used if the requested buffer is guaranteed to be
// smaller than defaultBufSize
// Only one buffer (total) can be used at a time.
func (b *buffer) takeSmallBuffer(length int) []byte {
if b.length == 0 {
return b.buf[:length]
// buffer is larger than we want to store.
return make([]byte, length), nil
}
return nil
// takeSmallBuffer is shortcut which can be used if length is
// known to be smaller than defaultBufSize.
// Only one buffer (total) can be used at a time.
func (b *buffer) takeSmallBuffer(length int) ([]byte, error) {
if b.length > 0 {
return nil, ErrBusyBuffer
}
return b.buf[:length], nil
}
// takeCompleteBuffer returns the complete existing buffer.
// This can be used if the necessary buffer size is unknown.
// cap and len of the returned buffer will be equal.
// Only one buffer (total) can be used at a time.
func (b *buffer) takeCompleteBuffer() []byte {
if b.length == 0 {
return b.buf
func (b *buffer) takeCompleteBuffer() ([]byte, error) {
if b.length > 0 {
return nil, ErrBusyBuffer
}
return b.buf, nil
}
// store stores buf, an updated buffer, if its suitable to do so.
func (b *buffer) store(buf []byte) error {
if b.length > 0 {
return ErrBusyBuffer
} else if cap(buf) <= maxPacketSize && cap(buf) > cap(b.buf) {
b.buf = buf[:cap(buf)]
}
return nil
}

View File

@ -8,11 +8,18 @@
package mysql
const defaultCollation = "utf8_general_ci"
const defaultCollation = "utf8mb4_general_ci"
const binaryCollation = "binary"
// A list of available collations mapped to the internal ID.
// To update this map use the following MySQL query:
// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS
// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS WHERE ID<256 ORDER BY ID
//
// Handshake packet have only 1 byte for collation_id. So we can't use collations with ID > 255.
//
// ucs2, utf16, and utf32 can't be used for connection charset.
// https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset
// They are commented out to reduce this map.
var collations = map[string]byte{
"big5_chinese_ci": 1,
"latin2_czech_cs": 2,
@ -47,7 +54,7 @@ var collations = map[string]byte{
"armscii8_general_ci": 32,
"utf8_general_ci": 33,
"cp1250_czech_cs": 34,
"ucs2_general_ci": 35,
//"ucs2_general_ci": 35,
"cp866_general_ci": 36,
"keybcs2_general_ci": 37,
"macce_general_ci": 38,
@ -66,15 +73,15 @@ var collations = map[string]byte{
"cp1251_general_ci": 51,
"cp1251_general_cs": 52,
"macroman_bin": 53,
"utf16_general_ci": 54,
"utf16_bin": 55,
"utf16le_general_ci": 56,
//"utf16_general_ci": 54,
//"utf16_bin": 55,
//"utf16le_general_ci": 56,
"cp1256_general_ci": 57,
"cp1257_bin": 58,
"cp1257_general_ci": 59,
"utf32_general_ci": 60,
"utf32_bin": 61,
"utf16le_bin": 62,
//"utf32_general_ci": 60,
//"utf32_bin": 61,
//"utf16le_bin": 62,
"binary": 63,
"armscii8_bin": 64,
"ascii_bin": 65,
@ -88,6 +95,7 @@ var collations = map[string]byte{
"keybcs2_bin": 73,
"koi8r_bin": 74,
"koi8u_bin": 75,
"utf8_tolower_ci": 76,
"latin2_bin": 77,
"latin5_bin": 78,
"latin7_bin": 79,
@ -101,7 +109,7 @@ var collations = map[string]byte{
"gbk_bin": 87,
"sjis_bin": 88,
"tis620_bin": 89,
"ucs2_bin": 90,
//"ucs2_bin": 90,
"ujis_bin": 91,
"geostd8_general_ci": 92,
"geostd8_bin": 93,
@ -111,79 +119,79 @@ var collations = map[string]byte{
"eucjpms_japanese_ci": 97,
"eucjpms_bin": 98,
"cp1250_polish_ci": 99,
"utf16_unicode_ci": 101,
"utf16_icelandic_ci": 102,
"utf16_latvian_ci": 103,
"utf16_romanian_ci": 104,
"utf16_slovenian_ci": 105,
"utf16_polish_ci": 106,
"utf16_estonian_ci": 107,
"utf16_spanish_ci": 108,
"utf16_swedish_ci": 109,
"utf16_turkish_ci": 110,
"utf16_czech_ci": 111,
"utf16_danish_ci": 112,
"utf16_lithuanian_ci": 113,
"utf16_slovak_ci": 114,
"utf16_spanish2_ci": 115,
"utf16_roman_ci": 116,
"utf16_persian_ci": 117,
"utf16_esperanto_ci": 118,
"utf16_hungarian_ci": 119,
"utf16_sinhala_ci": 120,
"utf16_german2_ci": 121,
"utf16_croatian_ci": 122,
"utf16_unicode_520_ci": 123,
"utf16_vietnamese_ci": 124,
"ucs2_unicode_ci": 128,
"ucs2_icelandic_ci": 129,
"ucs2_latvian_ci": 130,
"ucs2_romanian_ci": 131,
"ucs2_slovenian_ci": 132,
"ucs2_polish_ci": 133,
"ucs2_estonian_ci": 134,
"ucs2_spanish_ci": 135,
"ucs2_swedish_ci": 136,
"ucs2_turkish_ci": 137,
"ucs2_czech_ci": 138,
"ucs2_danish_ci": 139,
"ucs2_lithuanian_ci": 140,
"ucs2_slovak_ci": 141,
"ucs2_spanish2_ci": 142,
"ucs2_roman_ci": 143,
"ucs2_persian_ci": 144,
"ucs2_esperanto_ci": 145,
"ucs2_hungarian_ci": 146,
"ucs2_sinhala_ci": 147,
"ucs2_german2_ci": 148,
"ucs2_croatian_ci": 149,
"ucs2_unicode_520_ci": 150,
"ucs2_vietnamese_ci": 151,
"ucs2_general_mysql500_ci": 159,
"utf32_unicode_ci": 160,
"utf32_icelandic_ci": 161,
"utf32_latvian_ci": 162,
"utf32_romanian_ci": 163,
"utf32_slovenian_ci": 164,
"utf32_polish_ci": 165,
"utf32_estonian_ci": 166,
"utf32_spanish_ci": 167,
"utf32_swedish_ci": 168,
"utf32_turkish_ci": 169,
"utf32_czech_ci": 170,
"utf32_danish_ci": 171,
"utf32_lithuanian_ci": 172,
"utf32_slovak_ci": 173,
"utf32_spanish2_ci": 174,
"utf32_roman_ci": 175,
"utf32_persian_ci": 176,
"utf32_esperanto_ci": 177,
"utf32_hungarian_ci": 178,
"utf32_sinhala_ci": 179,
"utf32_german2_ci": 180,
"utf32_croatian_ci": 181,
"utf32_unicode_520_ci": 182,
"utf32_vietnamese_ci": 183,
//"utf16_unicode_ci": 101,
//"utf16_icelandic_ci": 102,
//"utf16_latvian_ci": 103,
//"utf16_romanian_ci": 104,
//"utf16_slovenian_ci": 105,
//"utf16_polish_ci": 106,
//"utf16_estonian_ci": 107,
//"utf16_spanish_ci": 108,
//"utf16_swedish_ci": 109,
//"utf16_turkish_ci": 110,
//"utf16_czech_ci": 111,
//"utf16_danish_ci": 112,
//"utf16_lithuanian_ci": 113,
//"utf16_slovak_ci": 114,
//"utf16_spanish2_ci": 115,
//"utf16_roman_ci": 116,
//"utf16_persian_ci": 117,
//"utf16_esperanto_ci": 118,
//"utf16_hungarian_ci": 119,
//"utf16_sinhala_ci": 120,
//"utf16_german2_ci": 121,
//"utf16_croatian_ci": 122,
//"utf16_unicode_520_ci": 123,
//"utf16_vietnamese_ci": 124,
//"ucs2_unicode_ci": 128,
//"ucs2_icelandic_ci": 129,
//"ucs2_latvian_ci": 130,
//"ucs2_romanian_ci": 131,
//"ucs2_slovenian_ci": 132,
//"ucs2_polish_ci": 133,
//"ucs2_estonian_ci": 134,
//"ucs2_spanish_ci": 135,
//"ucs2_swedish_ci": 136,
//"ucs2_turkish_ci": 137,
//"ucs2_czech_ci": 138,
//"ucs2_danish_ci": 139,
//"ucs2_lithuanian_ci": 140,
//"ucs2_slovak_ci": 141,
//"ucs2_spanish2_ci": 142,
//"ucs2_roman_ci": 143,
//"ucs2_persian_ci": 144,
//"ucs2_esperanto_ci": 145,
//"ucs2_hungarian_ci": 146,
//"ucs2_sinhala_ci": 147,
//"ucs2_german2_ci": 148,
//"ucs2_croatian_ci": 149,
//"ucs2_unicode_520_ci": 150,
//"ucs2_vietnamese_ci": 151,
//"ucs2_general_mysql500_ci": 159,
//"utf32_unicode_ci": 160,
//"utf32_icelandic_ci": 161,
//"utf32_latvian_ci": 162,
//"utf32_romanian_ci": 163,
//"utf32_slovenian_ci": 164,
//"utf32_polish_ci": 165,
//"utf32_estonian_ci": 166,
//"utf32_spanish_ci": 167,
//"utf32_swedish_ci": 168,
//"utf32_turkish_ci": 169,
//"utf32_czech_ci": 170,
//"utf32_danish_ci": 171,
//"utf32_lithuanian_ci": 172,
//"utf32_slovak_ci": 173,
//"utf32_spanish2_ci": 174,
//"utf32_roman_ci": 175,
//"utf32_persian_ci": 176,
//"utf32_esperanto_ci": 177,
//"utf32_hungarian_ci": 178,
//"utf32_sinhala_ci": 179,
//"utf32_german2_ci": 180,
//"utf32_croatian_ci": 181,
//"utf32_unicode_520_ci": 182,
//"utf32_vietnamese_ci": 183,
"utf8_unicode_ci": 192,
"utf8_icelandic_ci": 193,
"utf8_latvian_ci": 194,
@ -233,6 +241,10 @@ var collations = map[string]byte{
"utf8mb4_croatian_ci": 245,
"utf8mb4_unicode_520_ci": 246,
"utf8mb4_vietnamese_ci": 247,
"gb18030_chinese_ci": 248,
"gb18030_bin": 249,
"gb18030_unicode_520_ci": 250,
"utf8mb4_0900_ai_ci": 255,
}
// A blacklist of collations which is unsafe to interpolate parameters.
@ -247,4 +259,7 @@ var unsafeCollations = map[string]bool{
"sjis_bin": true,
"cp932_japanese_ci": true,
"cp932_bin": true,
"gb18030_chinese_ci": true,
"gb18030_bin": true,
"gb18030_unicode_520_ci": true,
}

54
vendor/github.com/go-sql-driver/mysql/conncheck.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
// +build linux darwin dragonfly freebsd netbsd openbsd solaris illumos
package mysql
import (
"errors"
"io"
"net"
"syscall"
)
var errUnexpectedRead = errors.New("unexpected read from socket")
func connCheck(conn net.Conn) error {
var sysErr error
sysConn, ok := conn.(syscall.Conn)
if !ok {
return nil
}
rawConn, err := sysConn.SyscallConn()
if err != nil {
return err
}
err = rawConn.Read(func(fd uintptr) bool {
var buf [1]byte
n, err := syscall.Read(int(fd), buf[:])
switch {
case n == 0 && err == nil:
sysErr = io.EOF
case n > 0:
sysErr = errUnexpectedRead
case err == syscall.EAGAIN || err == syscall.EWOULDBLOCK:
sysErr = nil
default:
sysErr = err
}
return true
})
if err != nil {
return err
}
return sysErr
}

View File

@ -1,18 +1,17 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
// +build !go1.7
// +build !linux,!darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!illumos
package mysql
import "crypto/tls"
import "net"
func cloneTLSConfig(c *tls.Config) *tls.Config {
clone := *c
return &clone
func connCheck(conn net.Conn) error {
return nil
}

View File

@ -0,0 +1,38 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
// +build linux darwin dragonfly freebsd netbsd openbsd solaris illumos
package mysql
import (
"testing"
"time"
)
func TestStaleConnectionChecks(t *testing.T) {
runTests(t, dsn, func(dbt *DBTest) {
dbt.mustExec("SET @@SESSION.wait_timeout = 2")
if err := dbt.db.Ping(); err != nil {
dbt.Fatal(err)
}
// wait for MySQL to close our connection
time.Sleep(3 * time.Second)
tx, err := dbt.db.Begin()
if err != nil {
dbt.Fatal(err)
}
if err := tx.Rollback(); err != nil {
dbt.Fatal(err)
}
})
}

View File

@ -9,6 +9,8 @@
package mysql
import (
"context"
"database/sql"
"database/sql/driver"
"io"
"net"
@ -17,19 +19,10 @@ import (
"time"
)
// a copy of context.Context for Go 1.7 and earlier
type mysqlContext interface {
Done() <-chan struct{}
Err() error
// defined in context.Context, but not used in this driver:
// Deadline() (deadline time.Time, ok bool)
// Value(key interface{}) interface{}
}
type mysqlConn struct {
buf buffer
netConn net.Conn
rawConn net.Conn // underlying connection when netConn is TLS connection.
affectedRows uint64
insertId uint64
cfg *Config
@ -40,11 +33,11 @@ type mysqlConn struct {
status statusFlag
sequence uint8
parseTime bool
strict bool
reset bool // set when the Go SQL package calls ResetSession
// for context support (Go 1.8+)
watching bool
watcher chan<- mysqlContext
watcher chan<- context.Context
closech chan struct{}
finished chan<- struct{}
canceled atomicError // set non-nil if conn is canceled
@ -92,11 +85,21 @@ func (mc *mysqlConn) markBadConn(err error) error {
}
func (mc *mysqlConn) Begin() (driver.Tx, error) {
return mc.begin(false)
}
func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) {
if mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
err := mc.exec("START TRANSACTION")
var q string
if readOnly {
q = "START TRANSACTION READ ONLY"
} else {
q = "START TRANSACTION"
}
err := mc.exec(q)
if err == nil {
return &mysqlTx{mc}, err
}
@ -151,7 +154,9 @@ func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
// Send command
err := mc.writeCommandPacketStr(comStmtPrepare, query)
if err != nil {
return nil, mc.markBadConn(err)
// STMT_PREPARE is safe to retry. So we can return ErrBadConn here.
errLog.Print(err)
return nil, driver.ErrBadConn
}
stmt := &mysqlStmt{
@ -181,10 +186,10 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
return "", driver.ErrSkip
}
buf := mc.buf.takeCompleteBuffer()
if buf == nil {
buf, err := mc.buf.takeCompleteBuffer()
if err != nil {
// can not take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
errLog.Print(err)
return "", ErrInvalidConn
}
buf = buf[:0]
@ -210,6 +215,9 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
switch v := arg.(type) {
case int64:
buf = strconv.AppendInt(buf, v, 10)
case uint64:
// Handle uint64 explicitly because our custom ConvertValue emits unsigned values
buf = strconv.AppendUint(buf, v, 10)
case float64:
buf = strconv.AppendFloat(buf, v, 'g', -1, 64)
case bool:
@ -394,6 +402,7 @@ func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error)
return nil, err
}
}
// Columns
rows.rs.columns, err = mc.readColumns(resLen)
return rows, err
@ -449,3 +458,194 @@ func (mc *mysqlConn) finish() {
case <-mc.closech:
}
}
// Ping implements driver.Pinger interface
func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
if mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return driver.ErrBadConn
}
if err = mc.watchCancel(ctx); err != nil {
return
}
defer mc.finish()
if err = mc.writeCommandPacket(comPing); err != nil {
return mc.markBadConn(err)
}
return mc.readResultOK()
}
// BeginTx implements driver.ConnBeginTx interface
func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
if err := mc.watchCancel(ctx); err != nil {
return nil, err
}
defer mc.finish()
if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault {
level, err := mapIsolationLevel(opts.Isolation)
if err != nil {
return nil, err
}
err = mc.exec("SET TRANSACTION ISOLATION LEVEL " + level)
if err != nil {
return nil, err
}
}
return mc.begin(opts.ReadOnly)
}
func (mc *mysqlConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
dargs, err := namedValueToValue(args)
if err != nil {
return nil, err
}
if err := mc.watchCancel(ctx); err != nil {
return nil, err
}
rows, err := mc.query(query, dargs)
if err != nil {
mc.finish()
return nil, err
}
rows.finish = mc.finish
return rows, err
}
func (mc *mysqlConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
dargs, err := namedValueToValue(args)
if err != nil {
return nil, err
}
if err := mc.watchCancel(ctx); err != nil {
return nil, err
}
defer mc.finish()
return mc.Exec(query, dargs)
}
func (mc *mysqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
if err := mc.watchCancel(ctx); err != nil {
return nil, err
}
stmt, err := mc.Prepare(query)
mc.finish()
if err != nil {
return nil, err
}
select {
default:
case <-ctx.Done():
stmt.Close()
return nil, ctx.Err()
}
return stmt, nil
}
func (stmt *mysqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
dargs, err := namedValueToValue(args)
if err != nil {
return nil, err
}
if err := stmt.mc.watchCancel(ctx); err != nil {
return nil, err
}
rows, err := stmt.query(dargs)
if err != nil {
stmt.mc.finish()
return nil, err
}
rows.finish = stmt.mc.finish
return rows, err
}
func (stmt *mysqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
dargs, err := namedValueToValue(args)
if err != nil {
return nil, err
}
if err := stmt.mc.watchCancel(ctx); err != nil {
return nil, err
}
defer stmt.mc.finish()
return stmt.Exec(dargs)
}
func (mc *mysqlConn) watchCancel(ctx context.Context) error {
if mc.watching {
// Reach here if canceled,
// so the connection is already invalid
mc.cleanup()
return nil
}
// When ctx is already cancelled, don't watch it.
if err := ctx.Err(); err != nil {
return err
}
// When ctx is not cancellable, don't watch it.
if ctx.Done() == nil {
return nil
}
// When watcher is not alive, can't watch it.
if mc.watcher == nil {
return nil
}
mc.watching = true
mc.watcher <- ctx
return nil
}
func (mc *mysqlConn) startWatcher() {
watcher := make(chan context.Context, 1)
mc.watcher = watcher
finished := make(chan struct{})
mc.finished = finished
go func() {
for {
var ctx context.Context
select {
case ctx = <-watcher:
case <-mc.closech:
return
}
select {
case <-ctx.Done():
mc.cancel(ctx.Err())
case <-finished:
case <-mc.closech:
return
}
}
}()
}
func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) {
nv.Value, err = converter{}.ConvertValue(nv.Value)
return
}
// ResetSession implements driver.SessionResetter.
// (From Go 1.10)
func (mc *mysqlConn) ResetSession(ctx context.Context) error {
if mc.closed.IsSet() {
return driver.ErrBadConn
}
mc.reset = true
return nil
}

View File

@ -1,204 +0,0 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
// +build go1.8
package mysql
import (
"context"
"database/sql"
"database/sql/driver"
"errors"
)
// Ping implements driver.Pinger interface
func (mc *mysqlConn) Ping(ctx context.Context) error {
if mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return driver.ErrBadConn
}
if err := mc.watchCancel(ctx); err != nil {
return err
}
defer mc.finish()
if err := mc.writeCommandPacket(comPing); err != nil {
return err
}
if _, err := mc.readResultOK(); err != nil {
return err
}
return nil
}
// BeginTx implements driver.ConnBeginTx interface
func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
if opts.ReadOnly {
// TODO: support read-only transactions
return nil, errors.New("mysql: read-only transactions not supported")
}
if err := mc.watchCancel(ctx); err != nil {
return nil, err
}
defer mc.finish()
if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault {
level, err := mapIsolationLevel(opts.Isolation)
if err != nil {
return nil, err
}
err = mc.exec("SET TRANSACTION ISOLATION LEVEL " + level)
if err != nil {
return nil, err
}
}
return mc.Begin()
}
func (mc *mysqlConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
dargs, err := namedValueToValue(args)
if err != nil {
return nil, err
}
if err := mc.watchCancel(ctx); err != nil {
return nil, err
}
rows, err := mc.query(query, dargs)
if err != nil {
mc.finish()
return nil, err
}
rows.finish = mc.finish
return rows, err
}
func (mc *mysqlConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
dargs, err := namedValueToValue(args)
if err != nil {
return nil, err
}
if err := mc.watchCancel(ctx); err != nil {
return nil, err
}
defer mc.finish()
return mc.Exec(query, dargs)
}
func (mc *mysqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
if err := mc.watchCancel(ctx); err != nil {
return nil, err
}
stmt, err := mc.Prepare(query)
mc.finish()
if err != nil {
return nil, err
}
select {
default:
case <-ctx.Done():
stmt.Close()
return nil, ctx.Err()
}
return stmt, nil
}
func (stmt *mysqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
dargs, err := namedValueToValue(args)
if err != nil {
return nil, err
}
if err := stmt.mc.watchCancel(ctx); err != nil {
return nil, err
}
rows, err := stmt.query(dargs)
if err != nil {
stmt.mc.finish()
return nil, err
}
rows.finish = stmt.mc.finish
return rows, err
}
func (stmt *mysqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
dargs, err := namedValueToValue(args)
if err != nil {
return nil, err
}
if err := stmt.mc.watchCancel(ctx); err != nil {
return nil, err
}
defer stmt.mc.finish()
return stmt.Exec(dargs)
}
func (mc *mysqlConn) watchCancel(ctx context.Context) error {
if mc.watching {
// Reach here if canceled,
// so the connection is already invalid
mc.cleanup()
return nil
}
if ctx.Done() == nil {
return nil
}
mc.watching = true
select {
default:
case <-ctx.Done():
return ctx.Err()
}
if mc.watcher == nil {
return nil
}
mc.watcher <- ctx
return nil
}
func (mc *mysqlConn) startWatcher() {
watcher := make(chan mysqlContext, 1)
mc.watcher = watcher
finished := make(chan struct{})
mc.finished = finished
go func() {
for {
var ctx mysqlContext
select {
case ctx = <-watcher:
case <-mc.closech:
return
}
select {
case <-ctx.Done():
mc.cancel(ctx.Err())
case <-finished:
case <-mc.closech:
return
}
}
}()
}

View File

@ -9,7 +9,10 @@
package mysql
import (
"context"
"database/sql/driver"
"errors"
"net"
"testing"
)
@ -65,3 +68,108 @@ func TestInterpolateParamsPlaceholderInString(t *testing.T) {
t.Errorf("Expected err=driver.ErrSkip, got err=%#v, q=%#v", err, q)
}
}
func TestInterpolateParamsUint64(t *testing.T) {
mc := &mysqlConn{
buf: newBuffer(nil),
maxAllowedPacket: maxPacketSize,
cfg: &Config{
InterpolateParams: true,
},
}
q, err := mc.interpolateParams("SELECT ?", []driver.Value{uint64(42)})
if err != nil {
t.Errorf("Expected err=nil, got err=%#v, q=%#v", err, q)
}
if q != "SELECT 42" {
t.Errorf("Expected uint64 interpolation to work, got q=%#v", q)
}
}
func TestCheckNamedValue(t *testing.T) {
value := driver.NamedValue{Value: ^uint64(0)}
x := &mysqlConn{}
err := x.CheckNamedValue(&value)
if err != nil {
t.Fatal("uint64 high-bit not convertible", err)
}
if value.Value != ^uint64(0) {
t.Fatalf("uint64 high-bit converted, got %#v %T", value.Value, value.Value)
}
}
// TestCleanCancel tests passed context is cancelled at start.
// No packet should be sent. Connection should keep current status.
func TestCleanCancel(t *testing.T) {
mc := &mysqlConn{
closech: make(chan struct{}),
}
mc.startWatcher()
defer mc.cleanup()
ctx, cancel := context.WithCancel(context.Background())
cancel()
for i := 0; i < 3; i++ { // Repeat same behavior
err := mc.Ping(ctx)
if err != context.Canceled {
t.Errorf("expected context.Canceled, got %#v", err)
}
if mc.closed.IsSet() {
t.Error("expected mc is not closed, closed actually")
}
if mc.watching {
t.Error("expected watching is false, but true")
}
}
}
func TestPingMarkBadConnection(t *testing.T) {
nc := badConnection{err: errors.New("boom")}
ms := &mysqlConn{
netConn: nc,
buf: newBuffer(nc),
maxAllowedPacket: defaultMaxAllowedPacket,
}
err := ms.Ping(context.Background())
if err != driver.ErrBadConn {
t.Errorf("expected driver.ErrBadConn, got %#v", err)
}
}
func TestPingErrInvalidConn(t *testing.T) {
nc := badConnection{err: errors.New("failed to write"), n: 10}
ms := &mysqlConn{
netConn: nc,
buf: newBuffer(nc),
maxAllowedPacket: defaultMaxAllowedPacket,
closech: make(chan struct{}),
}
err := ms.Ping(context.Background())
if err != ErrInvalidConn {
t.Errorf("expected ErrInvalidConn, got %#v", err)
}
}
type badConnection struct {
n int
err error
net.Conn
}
func (bc badConnection) Write(b []byte) (n int, err error) {
return bc.n, bc.err
}
func (bc badConnection) Close() error {
return nil
}

146
vendor/github.com/go-sql-driver/mysql/connector.go generated vendored Normal file
View File

@ -0,0 +1,146 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
import (
"context"
"database/sql/driver"
"net"
)
type connector struct {
cfg *Config // immutable private copy.
}
// Connect implements driver.Connector interface.
// Connect returns a connection to the database.
func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
var err error
// New mysqlConn
mc := &mysqlConn{
maxAllowedPacket: maxPacketSize,
maxWriteSize: maxPacketSize - 1,
closech: make(chan struct{}),
cfg: c.cfg,
}
mc.parseTime = mc.cfg.ParseTime
// Connect to Server
dialsLock.RLock()
dial, ok := dials[mc.cfg.Net]
dialsLock.RUnlock()
if ok {
dctx := ctx
if mc.cfg.Timeout > 0 {
var cancel context.CancelFunc
dctx, cancel = context.WithTimeout(ctx, c.cfg.Timeout)
defer cancel()
}
mc.netConn, err = dial(dctx, mc.cfg.Addr)
} else {
nd := net.Dialer{Timeout: mc.cfg.Timeout}
mc.netConn, err = nd.DialContext(ctx, mc.cfg.Net, mc.cfg.Addr)
}
if err != nil {
return nil, err
}
// Enable TCP Keepalives on TCP connections
if tc, ok := mc.netConn.(*net.TCPConn); ok {
if err := tc.SetKeepAlive(true); err != nil {
// Don't send COM_QUIT before handshake.
mc.netConn.Close()
mc.netConn = nil
return nil, err
}
}
// Call startWatcher for context support (From Go 1.8)
mc.startWatcher()
if err := mc.watchCancel(ctx); err != nil {
mc.cleanup()
return nil, err
}
defer mc.finish()
mc.buf = newBuffer(mc.netConn)
// Set I/O timeouts
mc.buf.timeout = mc.cfg.ReadTimeout
mc.writeTimeout = mc.cfg.WriteTimeout
// Reading Handshake Initialization Packet
authData, plugin, err := mc.readHandshakePacket()
if err != nil {
mc.cleanup()
return nil, err
}
if plugin == "" {
plugin = defaultAuthPlugin
}
// Send Client Authentication Packet
authResp, err := mc.auth(authData, plugin)
if err != nil {
// try the default auth plugin, if using the requested plugin failed
errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error())
plugin = defaultAuthPlugin
authResp, err = mc.auth(authData, plugin)
if err != nil {
mc.cleanup()
return nil, err
}
}
if err = mc.writeHandshakeResponsePacket(authResp, plugin); err != nil {
mc.cleanup()
return nil, err
}
// Handle response to auth packet, switch methods if possible
if err = mc.handleAuthResult(authData, plugin); err != nil {
// Authentication failed and MySQL has already closed the connection
// (https://dev.mysql.com/doc/internals/en/authentication-fails.html).
// Do not send COM_QUIT, just cleanup and return the error.
mc.cleanup()
return nil, err
}
if mc.cfg.MaxAllowedPacket > 0 {
mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket
} else {
// Get max allowed packet size
maxap, err := mc.getSystemVar("max_allowed_packet")
if err != nil {
mc.Close()
return nil, err
}
mc.maxAllowedPacket = stringToInt(maxap) - 1
}
if mc.maxAllowedPacket < maxPacketSize {
mc.maxWriteSize = mc.maxAllowedPacket
}
// Handle DSN Params
err = mc.handleParams()
if err != nil {
mc.Close()
return nil, err
}
return mc, nil
}
// Driver implements driver.Connector interface.
// Driver returns &MySQLDriver{}.
func (c *connector) Driver() driver.Driver {
return &MySQLDriver{}
}

View File

@ -0,0 +1,30 @@
package mysql
import (
"context"
"net"
"testing"
"time"
)
func TestConnectorReturnsTimeout(t *testing.T) {
connector := &connector{&Config{
Net: "tcp",
Addr: "1.1.1.1:1234",
Timeout: 10 * time.Millisecond,
}}
_, err := connector.Connect(context.Background())
if err == nil {
t.Fatal("error expected")
}
if nerr, ok := err.(*net.OpError); ok {
expected := "dial tcp 1.1.1.1:1234: i/o timeout"
if nerr.Error() != expected {
t.Fatalf("expected %q, got %q", expected, nerr.Error())
}
} else {
t.Fatalf("expected %T, got %T", nerr, err)
}
}

View File

@ -9,7 +9,9 @@
package mysql
const (
minProtocolVersion byte = 10
defaultAuthPlugin = "mysql_native_password"
defaultMaxAllowedPacket = 4 << 20 // 4 MiB
minProtocolVersion = 10
maxPacketSize = 1<<24 - 1
timeFormat = "2006-01-02 15:04:05.999999"
)
@ -19,6 +21,7 @@ const (
const (
iOK byte = 0x00
iAuthMoreData byte = 0x01
iLocalInFile byte = 0xfb
iEOF byte = 0xfe
iERR byte = 0xff
@ -87,8 +90,10 @@ const (
)
// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType
type fieldType byte
const (
fieldTypeDecimal byte = iota
fieldTypeDecimal fieldType = iota
fieldTypeTiny
fieldTypeShort
fieldTypeLong
@ -107,7 +112,7 @@ const (
fieldTypeBit
)
const (
fieldTypeJSON byte = iota + 0xf5
fieldTypeJSON fieldType = iota + 0xf5
fieldTypeNewDecimal
fieldTypeEnum
fieldTypeSet
@ -161,3 +166,9 @@ const (
statusInTransReadonly
statusSessionStateChanged
)
const (
cachingSha2PasswordRequestPublicKey = 2
cachingSha2PasswordFastAuthSuccess = 3
cachingSha2PasswordPerformFullAuthentication = 4
)

View File

@ -17,178 +17,91 @@
package mysql
import (
"context"
"database/sql"
"database/sql/driver"
"net"
"sync"
)
// watcher interface is used for context support (From Go 1.8)
type watcher interface {
startWatcher()
}
// MySQLDriver is exported to make the driver directly accessible.
// In general the driver is used via the database/sql package.
type MySQLDriver struct{}
// DialFunc is a function which can be used to establish the network connection.
// Custom dial functions must be registered with RegisterDial
//
// Deprecated: users should register a DialContextFunc instead
type DialFunc func(addr string) (net.Conn, error)
var dials map[string]DialFunc
// DialContextFunc is a function which can be used to establish the network connection.
// Custom dial functions must be registered with RegisterDialContext
type DialContextFunc func(ctx context.Context, addr string) (net.Conn, error)
// RegisterDial registers a custom dial function. It can then be used by the
var (
dialsLock sync.RWMutex
dials map[string]DialContextFunc
)
// RegisterDialContext registers a custom dial function. It can then be used by the
// network address mynet(addr), where mynet is the registered new network.
// addr is passed as a parameter to the dial function.
func RegisterDial(net string, dial DialFunc) {
// The current context for the connection and its address is passed to the dial function.
func RegisterDialContext(net string, dial DialContextFunc) {
dialsLock.Lock()
defer dialsLock.Unlock()
if dials == nil {
dials = make(map[string]DialFunc)
dials = make(map[string]DialContextFunc)
}
dials[net] = dial
}
// RegisterDial registers a custom dial function. It can then be used by the
// network address mynet(addr), where mynet is the registered new network.
// addr is passed as a parameter to the dial function.
//
// Deprecated: users should call RegisterDialContext instead
func RegisterDial(network string, dial DialFunc) {
RegisterDialContext(network, func(_ context.Context, addr string) (net.Conn, error) {
return dial(addr)
})
}
// Open new Connection.
// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how
// the DSN string is formated
// the DSN string is formatted
func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
var err error
// New mysqlConn
mc := &mysqlConn{
maxAllowedPacket: maxPacketSize,
maxWriteSize: maxPacketSize - 1,
closech: make(chan struct{}),
}
mc.cfg, err = ParseDSN(dsn)
cfg, err := ParseDSN(dsn)
if err != nil {
return nil, err
}
mc.parseTime = mc.cfg.ParseTime
mc.strict = mc.cfg.Strict
// Connect to Server
if dial, ok := dials[mc.cfg.Net]; ok {
mc.netConn, err = dial(mc.cfg.Addr)
} else {
nd := net.Dialer{Timeout: mc.cfg.Timeout}
mc.netConn, err = nd.Dial(mc.cfg.Net, mc.cfg.Addr)
c := &connector{
cfg: cfg,
}
if err != nil {
return nil, err
}
// Enable TCP Keepalives on TCP connections
if tc, ok := mc.netConn.(*net.TCPConn); ok {
if err := tc.SetKeepAlive(true); err != nil {
// Don't send COM_QUIT before handshake.
mc.netConn.Close()
mc.netConn = nil
return nil, err
}
}
// Call startWatcher for context support (From Go 1.8)
if s, ok := interface{}(mc).(watcher); ok {
s.startWatcher()
}
mc.buf = newBuffer(mc.netConn)
// Set I/O timeouts
mc.buf.timeout = mc.cfg.ReadTimeout
mc.writeTimeout = mc.cfg.WriteTimeout
// Reading Handshake Initialization Packet
cipher, err := mc.readInitPacket()
if err != nil {
mc.cleanup()
return nil, err
}
// Send Client Authentication Packet
if err = mc.writeAuthPacket(cipher); err != nil {
mc.cleanup()
return nil, err
}
// Handle response to auth packet, switch methods if possible
if err = handleAuthResult(mc, cipher); err != nil {
// Authentication failed and MySQL has already closed the connection
// (https://dev.mysql.com/doc/internals/en/authentication-fails.html).
// Do not send COM_QUIT, just cleanup and return the error.
mc.cleanup()
return nil, err
}
if mc.cfg.MaxAllowedPacket > 0 {
mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket
} else {
// Get max allowed packet size
maxap, err := mc.getSystemVar("max_allowed_packet")
if err != nil {
mc.Close()
return nil, err
}
mc.maxAllowedPacket = stringToInt(maxap) - 1
}
if mc.maxAllowedPacket < maxPacketSize {
mc.maxWriteSize = mc.maxAllowedPacket
}
// Handle DSN Params
err = mc.handleParams()
if err != nil {
mc.Close()
return nil, err
}
return mc, nil
}
func handleAuthResult(mc *mysqlConn, oldCipher []byte) error {
// Read Result Packet
cipher, err := mc.readResultOK()
if err == nil {
return nil // auth successful
}
if mc.cfg == nil {
return err // auth failed and retry not possible
}
// Retry auth if configured to do so.
if mc.cfg.AllowOldPasswords && err == ErrOldPassword {
// Retry with old authentication method. Note: there are edge cases
// where this should work but doesn't; this is currently "wontfix":
// https://github.com/go-sql-driver/mysql/issues/184
// If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is
// sent and we have to keep using the cipher sent in the init packet.
if cipher == nil {
cipher = oldCipher
}
if err = mc.writeOldAuthPacket(cipher); err != nil {
return err
}
_, err = mc.readResultOK()
} else if mc.cfg.AllowCleartextPasswords && err == ErrCleartextPassword {
// Retry with clear text password for
// http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
// http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html
if err = mc.writeClearAuthPacket(); err != nil {
return err
}
_, err = mc.readResultOK()
} else if mc.cfg.AllowNativePasswords && err == ErrNativePassword {
if err = mc.writeNativeAuthPacket(cipher); err != nil {
return err
}
_, err = mc.readResultOK()
}
return err
return c.Connect(context.Background())
}
func init() {
sql.Register("mysql", &MySQLDriver{})
}
// NewConnector returns new driver.Connector.
func NewConnector(cfg *Config) (driver.Connector, error) {
cfg = cfg.Clone()
// normalize the contents of cfg so calls to NewConnector have the same
// behavior as MySQLDriver.OpenConnector
if err := cfg.normalize(); err != nil {
return nil, err
}
return &connector{cfg: cfg}, nil
}
// OpenConnector implements driver.DriverContext.
func (d MySQLDriver) OpenConnector(dsn string) (driver.Connector, error) {
cfg, err := ParseDSN(dsn)
if err != nil {
return nil, err
}
return &connector{
cfg: cfg,
}, nil
}

View File

@ -1,522 +0,0 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
// +build go1.8
package mysql
import (
"context"
"database/sql"
"database/sql/driver"
"fmt"
"reflect"
"testing"
"time"
)
// static interface implementation checks of mysqlConn
var (
_ driver.ConnBeginTx = &mysqlConn{}
_ driver.ConnPrepareContext = &mysqlConn{}
_ driver.ExecerContext = &mysqlConn{}
_ driver.Pinger = &mysqlConn{}
_ driver.QueryerContext = &mysqlConn{}
)
// static interface implementation checks of mysqlStmt
var (
_ driver.StmtExecContext = &mysqlStmt{}
_ driver.StmtQueryContext = &mysqlStmt{}
)
func TestMultiResultSet(t *testing.T) {
type result struct {
values [][]int
columns []string
}
// checkRows is a helper test function to validate rows containing 3 result
// sets with specific values and columns. The basic query would look like this:
//
// SELECT 1 AS col1, 2 AS col2 UNION SELECT 3, 4;
// SELECT 0 UNION SELECT 1;
// SELECT 1 AS col1, 2 AS col2, 3 AS col3 UNION SELECT 4, 5, 6;
//
// to distinguish test cases the first string argument is put in front of
// every error or fatal message.
checkRows := func(desc string, rows *sql.Rows, dbt *DBTest) {
expected := []result{
{
values: [][]int{{1, 2}, {3, 4}},
columns: []string{"col1", "col2"},
},
{
values: [][]int{{1, 2, 3}, {4, 5, 6}},
columns: []string{"col1", "col2", "col3"},
},
}
var res1 result
for rows.Next() {
var res [2]int
if err := rows.Scan(&res[0], &res[1]); err != nil {
dbt.Fatal(err)
}
res1.values = append(res1.values, res[:])
}
cols, err := rows.Columns()
if err != nil {
dbt.Fatal(desc, err)
}
res1.columns = cols
if !reflect.DeepEqual(expected[0], res1) {
dbt.Error(desc, "want =", expected[0], "got =", res1)
}
if !rows.NextResultSet() {
dbt.Fatal(desc, "expected next result set")
}
// ignoring one result set
if !rows.NextResultSet() {
dbt.Fatal(desc, "expected next result set")
}
var res2 result
cols, err = rows.Columns()
if err != nil {
dbt.Fatal(desc, err)
}
res2.columns = cols
for rows.Next() {
var res [3]int
if err := rows.Scan(&res[0], &res[1], &res[2]); err != nil {
dbt.Fatal(desc, err)
}
res2.values = append(res2.values, res[:])
}
if !reflect.DeepEqual(expected[1], res2) {
dbt.Error(desc, "want =", expected[1], "got =", res2)
}
if rows.NextResultSet() {
dbt.Error(desc, "unexpected next result set")
}
if err := rows.Err(); err != nil {
dbt.Error(desc, err)
}
}
runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
rows := dbt.mustQuery(`DO 1;
SELECT 1 AS col1, 2 AS col2 UNION SELECT 3, 4;
DO 1;
SELECT 0 UNION SELECT 1;
SELECT 1 AS col1, 2 AS col2, 3 AS col3 UNION SELECT 4, 5, 6;`)
defer rows.Close()
checkRows("query: ", rows, dbt)
})
runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
queries := []string{
`
DROP PROCEDURE IF EXISTS test_mrss;
CREATE PROCEDURE test_mrss()
BEGIN
DO 1;
SELECT 1 AS col1, 2 AS col2 UNION SELECT 3, 4;
DO 1;
SELECT 0 UNION SELECT 1;
SELECT 1 AS col1, 2 AS col2, 3 AS col3 UNION SELECT 4, 5, 6;
END
`,
`
DROP PROCEDURE IF EXISTS test_mrss;
CREATE PROCEDURE test_mrss()
BEGIN
SELECT 1 AS col1, 2 AS col2 UNION SELECT 3, 4;
SELECT 0 UNION SELECT 1;
SELECT 1 AS col1, 2 AS col2, 3 AS col3 UNION SELECT 4, 5, 6;
END
`,
}
defer dbt.mustExec("DROP PROCEDURE IF EXISTS test_mrss")
for i, query := range queries {
dbt.mustExec(query)
stmt, err := dbt.db.Prepare("CALL test_mrss()")
if err != nil {
dbt.Fatalf("%v (i=%d)", err, i)
}
defer stmt.Close()
for j := 0; j < 2; j++ {
rows, err := stmt.Query()
if err != nil {
dbt.Fatalf("%v (i=%d) (j=%d)", err, i, j)
}
checkRows(fmt.Sprintf("prepared stmt query (i=%d) (j=%d): ", i, j), rows, dbt)
}
}
})
}
func TestMultiResultSetNoSelect(t *testing.T) {
runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
rows := dbt.mustQuery("DO 1; DO 2;")
defer rows.Close()
if rows.Next() {
dbt.Error("unexpected row")
}
if rows.NextResultSet() {
dbt.Error("unexpected next result set")
}
if err := rows.Err(); err != nil {
dbt.Error("expected nil; got ", err)
}
})
}
// tests if rows are set in a proper state if some results were ignored before
// calling rows.NextResultSet.
func TestSkipResults(t *testing.T) {
runTests(t, dsn, func(dbt *DBTest) {
rows := dbt.mustQuery("SELECT 1, 2")
defer rows.Close()
if !rows.Next() {
dbt.Error("expected row")
}
if rows.NextResultSet() {
dbt.Error("unexpected next result set")
}
if err := rows.Err(); err != nil {
dbt.Error("expected nil; got ", err)
}
})
}
func TestPingContext(t *testing.T) {
runTests(t, dsn, func(dbt *DBTest) {
ctx, cancel := context.WithCancel(context.Background())
cancel()
if err := dbt.db.PingContext(ctx); err != context.Canceled {
dbt.Errorf("expected context.Canceled, got %v", err)
}
})
}
func TestContextCancelExec(t *testing.T) {
runTests(t, dsn, func(dbt *DBTest) {
dbt.mustExec("CREATE TABLE test (v INTEGER)")
ctx, cancel := context.WithCancel(context.Background())
// Delay execution for just a bit until db.ExecContext has begun.
defer time.AfterFunc(100*time.Millisecond, cancel).Stop()
// This query will be canceled.
startTime := time.Now()
if _, err := dbt.db.ExecContext(ctx, "INSERT INTO test VALUES (SLEEP(1))"); err != context.Canceled {
dbt.Errorf("expected context.Canceled, got %v", err)
}
if d := time.Since(startTime); d > 500*time.Millisecond {
dbt.Errorf("too long execution time: %s", d)
}
// Wait for the INSERT query has done.
time.Sleep(time.Second)
// Check how many times the query is executed.
var v int
if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
dbt.Fatalf("%s", err.Error())
}
if v != 1 { // TODO: need to kill the query, and v should be 0.
dbt.Errorf("expected val to be 1, got %d", v)
}
// Context is already canceled, so error should come before execution.
if _, err := dbt.db.ExecContext(ctx, "INSERT INTO test VALUES (1)"); err == nil {
dbt.Error("expected error")
} else if err.Error() != "context canceled" {
dbt.Fatalf("unexpected error: %s", err)
}
// The second insert query will fail, so the table has no changes.
if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
dbt.Fatalf("%s", err.Error())
}
if v != 1 {
dbt.Errorf("expected val to be 1, got %d", v)
}
})
}
func TestContextCancelQuery(t *testing.T) {
runTests(t, dsn, func(dbt *DBTest) {
dbt.mustExec("CREATE TABLE test (v INTEGER)")
ctx, cancel := context.WithCancel(context.Background())
// Delay execution for just a bit until db.ExecContext has begun.
defer time.AfterFunc(100*time.Millisecond, cancel).Stop()
// This query will be canceled.
startTime := time.Now()
if _, err := dbt.db.QueryContext(ctx, "INSERT INTO test VALUES (SLEEP(1))"); err != context.Canceled {
dbt.Errorf("expected context.Canceled, got %v", err)
}
if d := time.Since(startTime); d > 500*time.Millisecond {
dbt.Errorf("too long execution time: %s", d)
}
// Wait for the INSERT query has done.
time.Sleep(time.Second)
// Check how many times the query is executed.
var v int
if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
dbt.Fatalf("%s", err.Error())
}
if v != 1 { // TODO: need to kill the query, and v should be 0.
dbt.Errorf("expected val to be 1, got %d", v)
}
// Context is already canceled, so error should come before execution.
if _, err := dbt.db.QueryContext(ctx, "INSERT INTO test VALUES (1)"); err != context.Canceled {
dbt.Errorf("expected context.Canceled, got %v", err)
}
// The second insert query will fail, so the table has no changes.
if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
dbt.Fatalf("%s", err.Error())
}
if v != 1 {
dbt.Errorf("expected val to be 1, got %d", v)
}
})
}
func TestContextCancelQueryRow(t *testing.T) {
runTests(t, dsn, func(dbt *DBTest) {
dbt.mustExec("CREATE TABLE test (v INTEGER)")
dbt.mustExec("INSERT INTO test VALUES (1), (2), (3)")
ctx, cancel := context.WithCancel(context.Background())
rows, err := dbt.db.QueryContext(ctx, "SELECT v FROM test")
if err != nil {
dbt.Fatalf("%s", err.Error())
}
// the first row will be succeed.
var v int
if !rows.Next() {
dbt.Fatalf("unexpected end")
}
if err := rows.Scan(&v); err != nil {
dbt.Fatalf("%s", err.Error())
}
cancel()
// make sure the driver recieve cancel request.
time.Sleep(100 * time.Millisecond)
if rows.Next() {
dbt.Errorf("expected end, but not")
}
if err := rows.Err(); err != context.Canceled {
dbt.Errorf("expected context.Canceled, got %v", err)
}
})
}
func TestContextCancelPrepare(t *testing.T) {
runTests(t, dsn, func(dbt *DBTest) {
ctx, cancel := context.WithCancel(context.Background())
cancel()
if _, err := dbt.db.PrepareContext(ctx, "SELECT 1"); err != context.Canceled {
dbt.Errorf("expected context.Canceled, got %v", err)
}
})
}
func TestContextCancelStmtExec(t *testing.T) {
runTests(t, dsn, func(dbt *DBTest) {
dbt.mustExec("CREATE TABLE test (v INTEGER)")
ctx, cancel := context.WithCancel(context.Background())
stmt, err := dbt.db.PrepareContext(ctx, "INSERT INTO test VALUES (SLEEP(1))")
if err != nil {
dbt.Fatalf("unexpected error: %v", err)
}
// Delay execution for just a bit until db.ExecContext has begun.
defer time.AfterFunc(100*time.Millisecond, cancel).Stop()
// This query will be canceled.
startTime := time.Now()
if _, err := stmt.ExecContext(ctx); err != context.Canceled {
dbt.Errorf("expected context.Canceled, got %v", err)
}
if d := time.Since(startTime); d > 500*time.Millisecond {
dbt.Errorf("too long execution time: %s", d)
}
// Wait for the INSERT query has done.
time.Sleep(time.Second)
// Check how many times the query is executed.
var v int
if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
dbt.Fatalf("%s", err.Error())
}
if v != 1 { // TODO: need to kill the query, and v should be 0.
dbt.Errorf("expected val to be 1, got %d", v)
}
})
}
func TestContextCancelStmtQuery(t *testing.T) {
runTests(t, dsn, func(dbt *DBTest) {
dbt.mustExec("CREATE TABLE test (v INTEGER)")
ctx, cancel := context.WithCancel(context.Background())
stmt, err := dbt.db.PrepareContext(ctx, "INSERT INTO test VALUES (SLEEP(1))")
if err != nil {
dbt.Fatalf("unexpected error: %v", err)
}
// Delay execution for just a bit until db.ExecContext has begun.
defer time.AfterFunc(100*time.Millisecond, cancel).Stop()
// This query will be canceled.
startTime := time.Now()
if _, err := stmt.QueryContext(ctx); err != context.Canceled {
dbt.Errorf("expected context.Canceled, got %v", err)
}
if d := time.Since(startTime); d > 500*time.Millisecond {
dbt.Errorf("too long execution time: %s", d)
}
// Wait for the INSERT query has done.
time.Sleep(time.Second)
// Check how many times the query is executed.
var v int
if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
dbt.Fatalf("%s", err.Error())
}
if v != 1 { // TODO: need to kill the query, and v should be 0.
dbt.Errorf("expected val to be 1, got %d", v)
}
})
}
func TestContextCancelBegin(t *testing.T) {
runTests(t, dsn, func(dbt *DBTest) {
dbt.mustExec("CREATE TABLE test (v INTEGER)")
ctx, cancel := context.WithCancel(context.Background())
tx, err := dbt.db.BeginTx(ctx, nil)
if err != nil {
dbt.Fatal(err)
}
// Delay execution for just a bit until db.ExecContext has begun.
defer time.AfterFunc(100*time.Millisecond, cancel).Stop()
// This query will be canceled.
startTime := time.Now()
if _, err := tx.ExecContext(ctx, "INSERT INTO test VALUES (SLEEP(1))"); err != context.Canceled {
dbt.Errorf("expected context.Canceled, got %v", err)
}
if d := time.Since(startTime); d > 500*time.Millisecond {
dbt.Errorf("too long execution time: %s", d)
}
// Transaction is canceled, so expect an error.
switch err := tx.Commit(); err {
case sql.ErrTxDone:
// because the transaction has already been rollbacked.
// the database/sql package watches ctx
// and rollbacks when ctx is canceled.
case context.Canceled:
// the database/sql package rollbacks on another goroutine,
// so the transaction may not be rollbacked depending on goroutine scheduling.
default:
dbt.Errorf("expected sql.ErrTxDone or context.Canceled, got %v", err)
}
// Context is canceled, so cannot begin a transaction.
if _, err := dbt.db.BeginTx(ctx, nil); err != context.Canceled {
dbt.Errorf("expected context.Canceled, got %v", err)
}
})
}
func TestContextBeginIsolationLevel(t *testing.T) {
runTests(t, dsn, func(dbt *DBTest) {
dbt.mustExec("CREATE TABLE test (v INTEGER)")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tx1, err := dbt.db.BeginTx(ctx, &sql.TxOptions{
Isolation: sql.LevelRepeatableRead,
})
if err != nil {
dbt.Fatal(err)
}
tx2, err := dbt.db.BeginTx(ctx, &sql.TxOptions{
Isolation: sql.LevelReadCommitted,
})
if err != nil {
dbt.Fatal(err)
}
_, err = tx1.ExecContext(ctx, "INSERT INTO test VALUES (1)")
if err != nil {
dbt.Fatal(err)
}
var v int
row := tx2.QueryRowContext(ctx, "SELECT COUNT(*) FROM test")
if err := row.Scan(&v); err != nil {
dbt.Fatal(err)
}
// Because writer transaction wasn't commited yet, it should be available
if v != 0 {
dbt.Errorf("expected val to be 0, got %d", v)
}
err = tx1.Commit()
if err != nil {
dbt.Fatal(err)
}
row = tx2.QueryRowContext(ctx, "SELECT COUNT(*) FROM test")
if err := row.Scan(&v); err != nil {
dbt.Fatal(err)
}
// Data written by writer transaction is already commited, it should be selectable
if v != 1 {
dbt.Errorf("expected val to be 1, got %d", v)
}
tx2.Commit()
})
}

File diff suppressed because it is too large Load Diff

View File

@ -10,9 +10,11 @@ package mysql
import (
"bytes"
"crypto/rsa"
"crypto/tls"
"errors"
"fmt"
"math/big"
"net"
"net/url"
"sort"
@ -28,7 +30,9 @@ var (
errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations")
)
// Config is a configuration parsed from a DSN string
// Config is a configuration parsed from a DSN string.
// If a new Config is created instead of being parsed from a DSN string,
// the NewConfig function should be used, which sets default values.
type Config struct {
User string // Username
Passwd string // Password (requires User)
@ -39,6 +43,8 @@ type Config struct {
Collation string // Connection collation
Loc *time.Location // Location for time.Time values
MaxAllowedPacket int // Max packet size allowed
ServerPubKey string // Server public key name
pubKey *rsa.PublicKey // Server public key
TLSConfig string // TLS configuration name
tls *tls.Config // TLS configuration
Timeout time.Duration // Dial timeout
@ -49,13 +55,112 @@ type Config struct {
AllowCleartextPasswords bool // Allows the cleartext client side plugin
AllowNativePasswords bool // Allows the native password authentication method
AllowOldPasswords bool // Allows the old insecure password method
CheckConnLiveness bool // Check connections for liveness before using them
ClientFoundRows bool // Return number of matching rows instead of rows changed
ColumnsWithAlias bool // Prepend table alias to column names
InterpolateParams bool // Interpolate placeholders into query string
MultiStatements bool // Allow multiple statements in one query
ParseTime bool // Parse time values to time.Time
RejectReadOnly bool // Reject read-only connections
Strict bool // Return warnings as errors
}
// NewConfig creates a new Config and sets default values.
func NewConfig() *Config {
return &Config{
Collation: defaultCollation,
Loc: time.UTC,
MaxAllowedPacket: defaultMaxAllowedPacket,
AllowNativePasswords: true,
CheckConnLiveness: true,
}
}
func (cfg *Config) Clone() *Config {
cp := *cfg
if cp.tls != nil {
cp.tls = cfg.tls.Clone()
}
if len(cp.Params) > 0 {
cp.Params = make(map[string]string, len(cfg.Params))
for k, v := range cfg.Params {
cp.Params[k] = v
}
}
if cfg.pubKey != nil {
cp.pubKey = &rsa.PublicKey{
N: new(big.Int).Set(cfg.pubKey.N),
E: cfg.pubKey.E,
}
}
return &cp
}
func (cfg *Config) normalize() error {
if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
return errInvalidDSNUnsafeCollation
}
// Set default network if empty
if cfg.Net == "" {
cfg.Net = "tcp"
}
// Set default address if empty
if cfg.Addr == "" {
switch cfg.Net {
case "tcp":
cfg.Addr = "127.0.0.1:3306"
case "unix":
cfg.Addr = "/tmp/mysql.sock"
default:
return errors.New("default addr for network '" + cfg.Net + "' unknown")
}
} else if cfg.Net == "tcp" {
cfg.Addr = ensureHavePort(cfg.Addr)
}
switch cfg.TLSConfig {
case "false", "":
// don't set anything
case "true":
cfg.tls = &tls.Config{}
case "skip-verify", "preferred":
cfg.tls = &tls.Config{InsecureSkipVerify: true}
default:
cfg.tls = getTLSConfigClone(cfg.TLSConfig)
if cfg.tls == nil {
return errors.New("invalid value / unknown config name: " + cfg.TLSConfig)
}
}
if cfg.tls != nil && cfg.tls.ServerName == "" && !cfg.tls.InsecureSkipVerify {
host, _, err := net.SplitHostPort(cfg.Addr)
if err == nil {
cfg.tls.ServerName = host
}
}
if cfg.ServerPubKey != "" {
cfg.pubKey = getServerPubKey(cfg.ServerPubKey)
if cfg.pubKey == nil {
return errors.New("invalid value / unknown server pub key name: " + cfg.ServerPubKey)
}
}
return nil
}
func writeDSNParam(buf *bytes.Buffer, hasParam *bool, name, value string) {
buf.Grow(1 + len(name) + 1 + len(value))
if !*hasParam {
*hasParam = true
buf.WriteByte('?')
} else {
buf.WriteByte('&')
}
buf.WriteString(name)
buf.WriteByte('=')
buf.WriteString(value)
}
// FormatDSN formats the given Config into a DSN string which can be passed to
@ -96,164 +201,75 @@ func (cfg *Config) FormatDSN() string {
}
if cfg.AllowCleartextPasswords {
if hasParam {
buf.WriteString("&allowCleartextPasswords=true")
} else {
hasParam = true
buf.WriteString("?allowCleartextPasswords=true")
}
writeDSNParam(&buf, &hasParam, "allowCleartextPasswords", "true")
}
if !cfg.AllowNativePasswords {
if hasParam {
buf.WriteString("&allowNativePasswords=false")
} else {
hasParam = true
buf.WriteString("?allowNativePasswords=false")
}
writeDSNParam(&buf, &hasParam, "allowNativePasswords", "false")
}
if cfg.AllowOldPasswords {
if hasParam {
buf.WriteString("&allowOldPasswords=true")
} else {
hasParam = true
buf.WriteString("?allowOldPasswords=true")
writeDSNParam(&buf, &hasParam, "allowOldPasswords", "true")
}
if !cfg.CheckConnLiveness {
writeDSNParam(&buf, &hasParam, "checkConnLiveness", "false")
}
if cfg.ClientFoundRows {
if hasParam {
buf.WriteString("&clientFoundRows=true")
} else {
hasParam = true
buf.WriteString("?clientFoundRows=true")
}
writeDSNParam(&buf, &hasParam, "clientFoundRows", "true")
}
if col := cfg.Collation; col != defaultCollation && len(col) > 0 {
if hasParam {
buf.WriteString("&collation=")
} else {
hasParam = true
buf.WriteString("?collation=")
}
buf.WriteString(col)
writeDSNParam(&buf, &hasParam, "collation", col)
}
if cfg.ColumnsWithAlias {
if hasParam {
buf.WriteString("&columnsWithAlias=true")
} else {
hasParam = true
buf.WriteString("?columnsWithAlias=true")
}
writeDSNParam(&buf, &hasParam, "columnsWithAlias", "true")
}
if cfg.InterpolateParams {
if hasParam {
buf.WriteString("&interpolateParams=true")
} else {
hasParam = true
buf.WriteString("?interpolateParams=true")
}
writeDSNParam(&buf, &hasParam, "interpolateParams", "true")
}
if cfg.Loc != time.UTC && cfg.Loc != nil {
if hasParam {
buf.WriteString("&loc=")
} else {
hasParam = true
buf.WriteString("?loc=")
}
buf.WriteString(url.QueryEscape(cfg.Loc.String()))
writeDSNParam(&buf, &hasParam, "loc", url.QueryEscape(cfg.Loc.String()))
}
if cfg.MultiStatements {
if hasParam {
buf.WriteString("&multiStatements=true")
} else {
hasParam = true
buf.WriteString("?multiStatements=true")
}
writeDSNParam(&buf, &hasParam, "multiStatements", "true")
}
if cfg.ParseTime {
if hasParam {
buf.WriteString("&parseTime=true")
} else {
hasParam = true
buf.WriteString("?parseTime=true")
}
writeDSNParam(&buf, &hasParam, "parseTime", "true")
}
if cfg.ReadTimeout > 0 {
if hasParam {
buf.WriteString("&readTimeout=")
} else {
hasParam = true
buf.WriteString("?readTimeout=")
}
buf.WriteString(cfg.ReadTimeout.String())
writeDSNParam(&buf, &hasParam, "readTimeout", cfg.ReadTimeout.String())
}
if cfg.RejectReadOnly {
if hasParam {
buf.WriteString("&rejectReadOnly=true")
} else {
hasParam = true
buf.WriteString("?rejectReadOnly=true")
}
writeDSNParam(&buf, &hasParam, "rejectReadOnly", "true")
}
if cfg.Strict {
if hasParam {
buf.WriteString("&strict=true")
} else {
hasParam = true
buf.WriteString("?strict=true")
}
if len(cfg.ServerPubKey) > 0 {
writeDSNParam(&buf, &hasParam, "serverPubKey", url.QueryEscape(cfg.ServerPubKey))
}
if cfg.Timeout > 0 {
if hasParam {
buf.WriteString("&timeout=")
} else {
hasParam = true
buf.WriteString("?timeout=")
}
buf.WriteString(cfg.Timeout.String())
writeDSNParam(&buf, &hasParam, "timeout", cfg.Timeout.String())
}
if len(cfg.TLSConfig) > 0 {
if hasParam {
buf.WriteString("&tls=")
} else {
hasParam = true
buf.WriteString("?tls=")
}
buf.WriteString(url.QueryEscape(cfg.TLSConfig))
writeDSNParam(&buf, &hasParam, "tls", url.QueryEscape(cfg.TLSConfig))
}
if cfg.WriteTimeout > 0 {
if hasParam {
buf.WriteString("&writeTimeout=")
} else {
hasParam = true
buf.WriteString("?writeTimeout=")
}
buf.WriteString(cfg.WriteTimeout.String())
writeDSNParam(&buf, &hasParam, "writeTimeout", cfg.WriteTimeout.String())
}
if cfg.MaxAllowedPacket > 0 {
if hasParam {
buf.WriteString("&maxAllowedPacket=")
} else {
hasParam = true
buf.WriteString("?maxAllowedPacket=")
}
buf.WriteString(strconv.Itoa(cfg.MaxAllowedPacket))
if cfg.MaxAllowedPacket != defaultMaxAllowedPacket {
writeDSNParam(&buf, &hasParam, "maxAllowedPacket", strconv.Itoa(cfg.MaxAllowedPacket))
}
// other params
@ -264,16 +280,7 @@ func (cfg *Config) FormatDSN() string {
}
sort.Strings(params)
for _, param := range params {
if hasParam {
buf.WriteByte('&')
} else {
hasParam = true
buf.WriteByte('?')
}
buf.WriteString(param)
buf.WriteByte('=')
buf.WriteString(url.QueryEscape(cfg.Params[param]))
writeDSNParam(&buf, &hasParam, param, url.QueryEscape(cfg.Params[param]))
}
}
@ -283,11 +290,7 @@ func (cfg *Config) FormatDSN() string {
// ParseDSN parses the DSN string to a Config
func ParseDSN(dsn string) (cfg *Config, err error) {
// New config with some default values
cfg = &Config{
Loc: time.UTC,
Collation: defaultCollation,
AllowNativePasswords: true,
}
cfg = NewConfig()
// [user[:password]@][net[(addr)]]/dbname[?param1=value1&paramN=valueN]
// Find the last '/' (since the password or the net addr might contain a '/')
@ -355,28 +358,9 @@ func ParseDSN(dsn string) (cfg *Config, err error) {
return nil, errInvalidDSNNoSlash
}
if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
return nil, errInvalidDSNUnsafeCollation
if err = cfg.normalize(); err != nil {
return nil, err
}
// Set default network if empty
if cfg.Net == "" {
cfg.Net = "tcp"
}
// Set default address if empty
if cfg.Addr == "" {
switch cfg.Net {
case "tcp":
cfg.Addr = "127.0.0.1:3306"
case "unix":
cfg.Addr = "/tmp/mysql.sock"
default:
return nil, errors.New("default addr for network '" + cfg.Net + "' unknown")
}
}
return
}
@ -391,7 +375,6 @@ func parseDSNParams(cfg *Config, params string) (err error) {
// cfg params
switch value := param[1]; param[0] {
// Disable INFILE whitelist / enable all files
case "allowAllFiles":
var isBool bool
@ -424,6 +407,14 @@ func parseDSNParams(cfg *Config, params string) (err error) {
return errors.New("invalid bool value: " + value)
}
// Check connections for Liveness before using them
case "checkConnLiveness":
var isBool bool
cfg.CheckConnLiveness, isBool = readBool(value)
if !isBool {
return errors.New("invalid bool value: " + value)
}
// Switch "rowsAffected" mode
case "clientFoundRows":
var isBool bool
@ -497,13 +488,17 @@ func parseDSNParams(cfg *Config, params string) (err error) {
return errors.New("invalid bool value: " + value)
}
// Server public key
case "serverPubKey":
name, err := url.QueryUnescape(value)
if err != nil {
return fmt.Errorf("invalid value for server pub key name: %v", err)
}
cfg.ServerPubKey = name
// Strict mode
case "strict":
var isBool bool
cfg.Strict, isBool = readBool(value)
if !isBool {
return errors.New("invalid bool value: " + value)
}
panic("strict mode has been removed. See https://github.com/go-sql-driver/mysql/wiki/strict-mode")
// Dial Timeout
case "timeout":
@ -518,36 +513,17 @@ func parseDSNParams(cfg *Config, params string) (err error) {
if isBool {
if boolValue {
cfg.TLSConfig = "true"
cfg.tls = &tls.Config{}
host, _, err := net.SplitHostPort(cfg.Addr)
if err == nil {
cfg.tls.ServerName = host
}
} else {
cfg.TLSConfig = "false"
}
} else if vl := strings.ToLower(value); vl == "skip-verify" {
} else if vl := strings.ToLower(value); vl == "skip-verify" || vl == "preferred" {
cfg.TLSConfig = vl
cfg.tls = &tls.Config{InsecureSkipVerify: true}
} else {
name, err := url.QueryUnescape(value)
if err != nil {
return fmt.Errorf("invalid value for TLS config name: %v", err)
}
if tlsConfig := getTLSConfigClone(name); tlsConfig != nil {
if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify {
host, _, err := net.SplitHostPort(cfg.Addr)
if err == nil {
tlsConfig.ServerName = host
}
}
cfg.TLSConfig = name
cfg.tls = tlsConfig
} else {
return errors.New("invalid value / unknown config name: " + name)
}
}
// I/O write Timeout
@ -575,3 +551,10 @@ func parseDSNParams(cfg *Config, params string) (err error) {
return
}
func ensureHavePort(addr string) string {
if _, _, err := net.SplitHostPort(addr); err != nil {
return net.JoinHostPort(addr, "3306")
}
return addr
}

View File

@ -22,50 +22,57 @@ var testDSNs = []struct {
out *Config
}{{
"username:password@protocol(address)/dbname?param=value",
&Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8_general_ci", Loc: time.UTC, AllowNativePasswords: true},
&Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"username:password@protocol(address)/dbname?param=value&columnsWithAlias=true",
&Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8_general_ci", Loc: time.UTC, AllowNativePasswords: true, ColumnsWithAlias: true},
&Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true, ColumnsWithAlias: true},
}, {
"username:password@protocol(address)/dbname?param=value&columnsWithAlias=true&multiStatements=true",
&Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8_general_ci", Loc: time.UTC, AllowNativePasswords: true, ColumnsWithAlias: true, MultiStatements: true},
&Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true, ColumnsWithAlias: true, MultiStatements: true},
}, {
"user@unix(/path/to/socket)/dbname?charset=utf8",
&Config{User: "user", Net: "unix", Addr: "/path/to/socket", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8_general_ci", Loc: time.UTC, AllowNativePasswords: true},
&Config{User: "user", Net: "unix", Addr: "/path/to/socket", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"user:password@tcp(localhost:5555)/dbname?charset=utf8&tls=true",
&Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8_general_ci", Loc: time.UTC, AllowNativePasswords: true, TLSConfig: "true"},
&Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true, TLSConfig: "true"},
}, {
"user:password@tcp(localhost:5555)/dbname?charset=utf8mb4,utf8&tls=skip-verify",
&Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8mb4,utf8"}, Collation: "utf8_general_ci", Loc: time.UTC, AllowNativePasswords: true, TLSConfig: "skip-verify"},
&Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8mb4,utf8"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true, TLSConfig: "skip-verify"},
}, {
"user:password@/dbname?loc=UTC&timeout=30s&readTimeout=1s&writeTimeout=1s&allowAllFiles=1&clientFoundRows=true&allowOldPasswords=TRUE&collation=utf8mb4_unicode_ci&maxAllowedPacket=16777216",
&Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_unicode_ci", Loc: time.UTC, AllowNativePasswords: true, Timeout: 30 * time.Second, ReadTimeout: time.Second, WriteTimeout: time.Second, AllowAllFiles: true, AllowOldPasswords: true, ClientFoundRows: true, MaxAllowedPacket: 16777216},
"user:password@/dbname?loc=UTC&timeout=30s&readTimeout=1s&writeTimeout=1s&allowAllFiles=1&clientFoundRows=true&allowOldPasswords=TRUE&collation=utf8mb4_unicode_ci&maxAllowedPacket=16777216&tls=false&allowCleartextPasswords=true&parseTime=true&rejectReadOnly=true",
&Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_unicode_ci", Loc: time.UTC, TLSConfig: "false", AllowCleartextPasswords: true, AllowNativePasswords: true, Timeout: 30 * time.Second, ReadTimeout: time.Second, WriteTimeout: time.Second, AllowAllFiles: true, AllowOldPasswords: true, CheckConnLiveness: true, ClientFoundRows: true, MaxAllowedPacket: 16777216, ParseTime: true, RejectReadOnly: true},
}, {
"user:password@/dbname?allowNativePasswords=false",
&Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8_general_ci", Loc: time.UTC, AllowNativePasswords: false},
"user:password@/dbname?allowNativePasswords=false&checkConnLiveness=false&maxAllowedPacket=0",
&Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: 0, AllowNativePasswords: false, CheckConnLiveness: false},
}, {
"user:p@ss(word)@tcp([de:ad:be:ef::ca:fe]:80)/dbname?loc=Local",
&Config{User: "user", Passwd: "p@ss(word)", Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:80", DBName: "dbname", Collation: "utf8_general_ci", Loc: time.Local, AllowNativePasswords: true},
&Config{User: "user", Passwd: "p@ss(word)", Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:80", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.Local, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"/dbname",
&Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8_general_ci", Loc: time.UTC, AllowNativePasswords: true},
&Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"@/",
&Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC, AllowNativePasswords: true},
&Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"/",
&Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC, AllowNativePasswords: true},
&Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"",
&Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC, AllowNativePasswords: true},
&Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"user:p@/ssword@/",
&Config{User: "user", Passwd: "p@/ssword", Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC, AllowNativePasswords: true},
&Config{User: "user", Passwd: "p@/ssword", Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"unix/?arg=%2Fsome%2Fpath.ext",
&Config{Net: "unix", Addr: "/tmp/mysql.sock", Params: map[string]string{"arg": "/some/path.ext"}, Collation: "utf8_general_ci", Loc: time.UTC, AllowNativePasswords: true},
}}
&Config{Net: "unix", Addr: "/tmp/mysql.sock", Params: map[string]string{"arg": "/some/path.ext"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"tcp(127.0.0.1)/dbname",
&Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"tcp(de:ad:be:ef::ca:fe)/dbname",
&Config{Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
},
}
func TestDSNParser(t *testing.T) {
for i, tst := range testDSNs {
@ -91,6 +98,7 @@ func TestDSNParserInvalid(t *testing.T) {
"(/", // no closing brace
"net(addr)//", // unescaped
"User:pass@tcp(1.2.3.4:3306)", // no trailing slash
"net()/", // unknown default addr
//"/dbname?arg=/some/unescaped/path",
}
@ -127,11 +135,56 @@ func TestDSNReformat(t *testing.T) {
}
}
func TestDSNServerPubKey(t *testing.T) {
baseDSN := "User:password@tcp(localhost:5555)/dbname?serverPubKey="
RegisterServerPubKey("testKey", testPubKeyRSA)
defer DeregisterServerPubKey("testKey")
tst := baseDSN + "testKey"
cfg, err := ParseDSN(tst)
if err != nil {
t.Error(err.Error())
}
if cfg.ServerPubKey != "testKey" {
t.Errorf("unexpected cfg.ServerPubKey value: %v", cfg.ServerPubKey)
}
if cfg.pubKey != testPubKeyRSA {
t.Error("pub key pointer doesn't match")
}
// Key is missing
tst = baseDSN + "invalid_name"
cfg, err = ParseDSN(tst)
if err == nil {
t.Errorf("invalid name in DSN (%s) but did not error. Got config: %#v", tst, cfg)
}
}
func TestDSNServerPubKeyQueryEscape(t *testing.T) {
const name = "&%!:"
dsn := "User:password@tcp(localhost:5555)/dbname?serverPubKey=" + url.QueryEscape(name)
RegisterServerPubKey(name, testPubKeyRSA)
defer DeregisterServerPubKey(name)
cfg, err := ParseDSN(dsn)
if err != nil {
t.Error(err.Error())
}
if cfg.pubKey != testPubKeyRSA {
t.Error("pub key pointer doesn't match")
}
}
func TestDSNWithCustomTLS(t *testing.T) {
baseDSN := "User:password@tcp(localhost:5555)/dbname?tls="
tlsCfg := tls.Config{}
RegisterTLSConfig("utils_test", &tlsCfg)
defer DeregisterTLSConfig("utils_test")
// Custom TLS is missing
tst := baseDSN + "invalid_tls"
@ -165,8 +218,34 @@ func TestDSNWithCustomTLS(t *testing.T) {
} else if tlsCfg.ServerName != "" {
t.Errorf("tlsCfg was mutated ServerName (%s) should be empty parsing DSN (%s).", name, tst)
}
}
DeregisterTLSConfig("utils_test")
func TestDSNTLSConfig(t *testing.T) {
expectedServerName := "example.com"
dsn := "tcp(example.com:1234)/?tls=true"
cfg, err := ParseDSN(dsn)
if err != nil {
t.Error(err.Error())
}
if cfg.tls == nil {
t.Error("cfg.tls should not be nil")
}
if cfg.tls.ServerName != expectedServerName {
t.Errorf("cfg.tls.ServerName should be %q, got %q (host with port)", expectedServerName, cfg.tls.ServerName)
}
dsn = "tcp(example.com)/?tls=true"
cfg, err = ParseDSN(dsn)
if err != nil {
t.Error(err.Error())
}
if cfg.tls == nil {
t.Error("cfg.tls should not be nil")
}
if cfg.tls.ServerName != expectedServerName {
t.Errorf("cfg.tls.ServerName should be %q, got %q (host without port)", expectedServerName, cfg.tls.ServerName)
}
}
func TestDSNWithCustomTLSQueryEscape(t *testing.T) {
@ -176,6 +255,7 @@ func TestDSNWithCustomTLSQueryEscape(t *testing.T) {
tlsCfg := tls.Config{ServerName: name}
RegisterTLSConfig(configKey, &tlsCfg)
defer DeregisterTLSConfig(configKey)
cfg, err := ParseDSN(dsn)
@ -225,21 +305,103 @@ func TestDSNUnsafeCollation(t *testing.T) {
func TestParamsAreSorted(t *testing.T) {
expected := "/dbname?interpolateParams=true&foobar=baz&quux=loo"
dsn := &Config{
DBName: "dbname",
InterpolateParams: true,
AllowNativePasswords: true,
Params: map[string]string{
cfg := NewConfig()
cfg.DBName = "dbname"
cfg.InterpolateParams = true
cfg.Params = map[string]string{
"quux": "loo",
"foobar": "baz",
},
}
actual := dsn.FormatDSN()
actual := cfg.FormatDSN()
if actual != expected {
t.Errorf("generic Config.Params were not sorted: want %#v, got %#v", expected, actual)
}
}
func TestCloneConfig(t *testing.T) {
RegisterServerPubKey("testKey", testPubKeyRSA)
defer DeregisterServerPubKey("testKey")
expectedServerName := "example.com"
dsn := "tcp(example.com:1234)/?tls=true&foobar=baz&serverPubKey=testKey"
cfg, err := ParseDSN(dsn)
if err != nil {
t.Fatal(err.Error())
}
cfg2 := cfg.Clone()
if cfg == cfg2 {
t.Errorf("Config.Clone did not create a separate config struct")
}
if cfg2.tls.ServerName != expectedServerName {
t.Errorf("cfg.tls.ServerName should be %q, got %q (host with port)", expectedServerName, cfg.tls.ServerName)
}
cfg2.tls.ServerName = "example2.com"
if cfg.tls.ServerName == cfg2.tls.ServerName {
t.Errorf("changed cfg.tls.Server name should not propagate to original Config")
}
if _, ok := cfg2.Params["foobar"]; !ok {
t.Errorf("cloned Config is missing custom params")
}
delete(cfg2.Params, "foobar")
if _, ok := cfg.Params["foobar"]; !ok {
t.Errorf("custom params in cloned Config should not propagate to original Config")
}
if !reflect.DeepEqual(cfg.pubKey, cfg2.pubKey) {
t.Errorf("public key in Config should be identical")
}
}
func TestNormalizeTLSConfig(t *testing.T) {
tt := []struct {
tlsConfig string
want *tls.Config
}{
{"", nil},
{"false", nil},
{"true", &tls.Config{ServerName: "myserver"}},
{"skip-verify", &tls.Config{InsecureSkipVerify: true}},
{"preferred", &tls.Config{InsecureSkipVerify: true}},
{"test_tls_config", &tls.Config{ServerName: "myServerName"}},
}
RegisterTLSConfig("test_tls_config", &tls.Config{ServerName: "myServerName"})
defer func() { DeregisterTLSConfig("test_tls_config") }()
for _, tc := range tt {
t.Run(tc.tlsConfig, func(t *testing.T) {
cfg := &Config{
Addr: "myserver:3306",
TLSConfig: tc.tlsConfig,
}
cfg.normalize()
if cfg.tls == nil {
if tc.want != nil {
t.Fatal("wanted a tls config but got nil instead")
}
return
}
if cfg.tls.ServerName != tc.want.ServerName {
t.Errorf("tls.ServerName doesn't match (want: '%s', got: '%s')",
tc.want.ServerName, cfg.tls.ServerName)
}
if cfg.tls.InsecureSkipVerify != tc.want.InsecureSkipVerify {
t.Errorf("tls.InsecureSkipVerify doesn't match (want: %T, got :%T)",
tc.want.InsecureSkipVerify, cfg.tls.InsecureSkipVerify)
}
})
}
}
func BenchmarkParseDSN(b *testing.B) {
b.ReportAllocs()

View File

@ -9,10 +9,8 @@
package mysql
import (
"database/sql/driver"
"errors"
"fmt"
"io"
"log"
"os"
)
@ -65,74 +63,3 @@ type MySQLError struct {
func (me *MySQLError) Error() string {
return fmt.Sprintf("Error %d: %s", me.Number, me.Message)
}
// MySQLWarnings is an error type which represents a group of one or more MySQL
// warnings
type MySQLWarnings []MySQLWarning
func (mws MySQLWarnings) Error() string {
var msg string
for i, warning := range mws {
if i > 0 {
msg += "\r\n"
}
msg += fmt.Sprintf(
"%s %s: %s",
warning.Level,
warning.Code,
warning.Message,
)
}
return msg
}
// MySQLWarning is an error type which represents a single MySQL warning.
// Warnings are returned in groups only. See MySQLWarnings
type MySQLWarning struct {
Level string
Code string
Message string
}
func (mc *mysqlConn) getWarnings() (err error) {
rows, err := mc.Query("SHOW WARNINGS", nil)
if err != nil {
return
}
var warnings = MySQLWarnings{}
var values = make([]driver.Value, 3)
for {
err = rows.Next(values)
switch err {
case nil:
warning := MySQLWarning{}
if raw, ok := values[0].([]byte); ok {
warning.Level = string(raw)
} else {
warning.Level = fmt.Sprintf("%s", values[0])
}
if raw, ok := values[1].([]byte); ok {
warning.Code = string(raw)
} else {
warning.Code = fmt.Sprintf("%s", values[1])
}
if raw, ok := values[2].([]byte); ok {
warning.Message = string(raw)
} else {
warning.Message = fmt.Sprintf("%s", values[0])
}
warnings = append(warnings, warning)
case io.EOF:
return warnings
default:
rows.Close()
return
}
}
}

194
vendor/github.com/go-sql-driver/mysql/fields.go generated vendored Normal file
View File

@ -0,0 +1,194 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
import (
"database/sql"
"reflect"
)
func (mf *mysqlField) typeDatabaseName() string {
switch mf.fieldType {
case fieldTypeBit:
return "BIT"
case fieldTypeBLOB:
if mf.charSet != collations[binaryCollation] {
return "TEXT"
}
return "BLOB"
case fieldTypeDate:
return "DATE"
case fieldTypeDateTime:
return "DATETIME"
case fieldTypeDecimal:
return "DECIMAL"
case fieldTypeDouble:
return "DOUBLE"
case fieldTypeEnum:
return "ENUM"
case fieldTypeFloat:
return "FLOAT"
case fieldTypeGeometry:
return "GEOMETRY"
case fieldTypeInt24:
return "MEDIUMINT"
case fieldTypeJSON:
return "JSON"
case fieldTypeLong:
return "INT"
case fieldTypeLongBLOB:
if mf.charSet != collations[binaryCollation] {
return "LONGTEXT"
}
return "LONGBLOB"
case fieldTypeLongLong:
return "BIGINT"
case fieldTypeMediumBLOB:
if mf.charSet != collations[binaryCollation] {
return "MEDIUMTEXT"
}
return "MEDIUMBLOB"
case fieldTypeNewDate:
return "DATE"
case fieldTypeNewDecimal:
return "DECIMAL"
case fieldTypeNULL:
return "NULL"
case fieldTypeSet:
return "SET"
case fieldTypeShort:
return "SMALLINT"
case fieldTypeString:
if mf.charSet == collations[binaryCollation] {
return "BINARY"
}
return "CHAR"
case fieldTypeTime:
return "TIME"
case fieldTypeTimestamp:
return "TIMESTAMP"
case fieldTypeTiny:
return "TINYINT"
case fieldTypeTinyBLOB:
if mf.charSet != collations[binaryCollation] {
return "TINYTEXT"
}
return "TINYBLOB"
case fieldTypeVarChar:
if mf.charSet == collations[binaryCollation] {
return "VARBINARY"
}
return "VARCHAR"
case fieldTypeVarString:
if mf.charSet == collations[binaryCollation] {
return "VARBINARY"
}
return "VARCHAR"
case fieldTypeYear:
return "YEAR"
default:
return ""
}
}
var (
scanTypeFloat32 = reflect.TypeOf(float32(0))
scanTypeFloat64 = reflect.TypeOf(float64(0))
scanTypeInt8 = reflect.TypeOf(int8(0))
scanTypeInt16 = reflect.TypeOf(int16(0))
scanTypeInt32 = reflect.TypeOf(int32(0))
scanTypeInt64 = reflect.TypeOf(int64(0))
scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
scanTypeNullTime = reflect.TypeOf(NullTime{})
scanTypeUint8 = reflect.TypeOf(uint8(0))
scanTypeUint16 = reflect.TypeOf(uint16(0))
scanTypeUint32 = reflect.TypeOf(uint32(0))
scanTypeUint64 = reflect.TypeOf(uint64(0))
scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{})
scanTypeUnknown = reflect.TypeOf(new(interface{}))
)
type mysqlField struct {
tableName string
name string
length uint32
flags fieldFlag
fieldType fieldType
decimals byte
charSet uint8
}
func (mf *mysqlField) scanType() reflect.Type {
switch mf.fieldType {
case fieldTypeTiny:
if mf.flags&flagNotNULL != 0 {
if mf.flags&flagUnsigned != 0 {
return scanTypeUint8
}
return scanTypeInt8
}
return scanTypeNullInt
case fieldTypeShort, fieldTypeYear:
if mf.flags&flagNotNULL != 0 {
if mf.flags&flagUnsigned != 0 {
return scanTypeUint16
}
return scanTypeInt16
}
return scanTypeNullInt
case fieldTypeInt24, fieldTypeLong:
if mf.flags&flagNotNULL != 0 {
if mf.flags&flagUnsigned != 0 {
return scanTypeUint32
}
return scanTypeInt32
}
return scanTypeNullInt
case fieldTypeLongLong:
if mf.flags&flagNotNULL != 0 {
if mf.flags&flagUnsigned != 0 {
return scanTypeUint64
}
return scanTypeInt64
}
return scanTypeNullInt
case fieldTypeFloat:
if mf.flags&flagNotNULL != 0 {
return scanTypeFloat32
}
return scanTypeNullFloat
case fieldTypeDouble:
if mf.flags&flagNotNULL != 0 {
return scanTypeFloat64
}
return scanTypeNullFloat
case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON,
fieldTypeTime:
return scanTypeRawBytes
case fieldTypeDate, fieldTypeNewDate,
fieldTypeTimestamp, fieldTypeDateTime:
// NullTime is always returned for more consistent behavior as it can
// handle both cases of parseTime regardless if the field is nullable.
return scanTypeNullTime
default:
return scanTypeUnknown
}
}

3
vendor/github.com/go-sql-driver/mysql/go.mod generated vendored Normal file
View File

@ -0,0 +1,3 @@
module github.com/go-sql-driver/mysql
go 1.10

View File

@ -174,8 +174,7 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
// read OK packet
if err == nil {
_, err = mc.readResultOK()
return err
return mc.readResultOK()
}
mc.readPacket()

50
vendor/github.com/go-sql-driver/mysql/nulltime.go generated vendored Normal file
View File

@ -0,0 +1,50 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
import (
"database/sql/driver"
"fmt"
"time"
)
// Scan implements the Scanner interface.
// The value type must be time.Time or string / []byte (formatted time-string),
// otherwise Scan fails.
func (nt *NullTime) Scan(value interface{}) (err error) {
if value == nil {
nt.Time, nt.Valid = time.Time{}, false
return
}
switch v := value.(type) {
case time.Time:
nt.Time, nt.Valid = v, true
return
case []byte:
nt.Time, err = parseDateTime(string(v), time.UTC)
nt.Valid = (err == nil)
return
case string:
nt.Time, err = parseDateTime(v, time.UTC)
nt.Valid = (err == nil)
return
}
nt.Valid = false
return fmt.Errorf("Can't convert %T to time.Time", value)
}
// Value implements the driver Valuer interface.
func (nt NullTime) Value() (driver.Value, error) {
if !nt.Valid {
return nil, nil
}
return nt.Time, nil
}

View File

@ -0,0 +1,31 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
// +build go1.13
package mysql
import (
"database/sql"
)
// NullTime represents a time.Time that may be NULL.
// NullTime implements the Scanner interface so
// it can be used as a scan destination:
//
// var nt NullTime
// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt)
// ...
// if nt.Valid {
// // use nt.Time
// } else {
// // NULL value
// }
//
// This NullTime implementation is not driver-specific
type NullTime sql.NullTime

View File

@ -0,0 +1,34 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
// +build !go1.13
package mysql
import (
"time"
)
// NullTime represents a time.Time that may be NULL.
// NullTime implements the Scanner interface so
// it can be used as a scan destination:
//
// var nt NullTime
// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt)
// ...
// if nt.Valid {
// // use nt.Time
// } else {
// // NULL value
// }
//
// This NullTime implementation is not driver-specific
type NullTime struct {
Time time.Time
Valid bool // Valid is true if Time is not NULL
}

62
vendor/github.com/go-sql-driver/mysql/nulltime_test.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
import (
"database/sql"
"database/sql/driver"
"testing"
"time"
)
var (
// Check implementation of interfaces
_ driver.Valuer = NullTime{}
_ sql.Scanner = (*NullTime)(nil)
)
func TestScanNullTime(t *testing.T) {
var scanTests = []struct {
in interface{}
error bool
valid bool
time time.Time
}{
{tDate, false, true, tDate},
{sDate, false, true, tDate},
{[]byte(sDate), false, true, tDate},
{tDateTime, false, true, tDateTime},
{sDateTime, false, true, tDateTime},
{[]byte(sDateTime), false, true, tDateTime},
{tDate0, false, true, tDate0},
{sDate0, false, true, tDate0},
{[]byte(sDate0), false, true, tDate0},
{sDateTime0, false, true, tDate0},
{[]byte(sDateTime0), false, true, tDate0},
{"", true, false, tDate0},
{"1234", true, false, tDate0},
{0, true, false, tDate0},
}
var nt = NullTime{}
var err error
for _, tst := range scanTests {
err = nt.Scan(tst.in)
if (err != nil) != tst.error {
t.Errorf("%v: expected error status %t, got %t", tst.in, tst.error, (err != nil))
}
if nt.Valid != tst.valid {
t.Errorf("%v: expected valid status %t, got %t", tst.in, tst.valid, nt.Valid)
}
if nt.Time != tst.time {
t.Errorf("%v: expected time %v, got %v", tst.in, tst.time, nt.Time)
}
}
}

View File

@ -51,7 +51,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
mc.sequence++
// packets with length 0 terminate a previous packet which is a
// multiple of (2^24)1 bytes long
// multiple of (2^24)-1 bytes long
if pktLen == 0 {
// there was no previous packet
if prevData == nil {
@ -96,6 +96,35 @@ func (mc *mysqlConn) writePacket(data []byte) error {
return ErrPktTooLarge
}
// Perform a stale connection check. We only perform this check for
// the first query on a connection that has been checked out of the
// connection pool: a fresh connection from the pool is more likely
// to be stale, and it has not performed any previous writes that
// could cause data corruption, so it's safe to return ErrBadConn
// if the check fails.
if mc.reset {
mc.reset = false
conn := mc.netConn
if mc.rawConn != nil {
conn = mc.rawConn
}
var err error
// If this connection has a ReadTimeout which we've been setting on
// reads, reset it to its default value before we attempt a non-blocking
// read, otherwise the scheduler will just time us out before we can read
if mc.cfg.ReadTimeout != 0 {
err = conn.SetReadDeadline(time.Time{})
}
if err == nil && mc.cfg.CheckConnLiveness {
err = connCheck(conn)
}
if err != nil {
errLog.Print("closing bad idle connection: ", err)
mc.Close()
return driver.ErrBadConn
}
}
for {
var size int
if pktLen >= maxPacketSize {
@ -149,24 +178,29 @@ func (mc *mysqlConn) writePacket(data []byte) error {
}
/******************************************************************************
* Initialisation Process *
* Initialization Process *
******************************************************************************/
// Handshake Initialization Packet
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake
func (mc *mysqlConn) readInitPacket() ([]byte, error) {
data, err := mc.readPacket()
func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err error) {
data, err = mc.readPacket()
if err != nil {
return nil, err
// for init we can rewrite this to ErrBadConn for sql.Driver to retry, since
// in connection initialization we don't risk retrying non-idempotent actions.
if err == ErrInvalidConn {
return nil, "", driver.ErrBadConn
}
return
}
if data[0] == iERR {
return nil, mc.handleErrorPacket(data)
return nil, "", mc.handleErrorPacket(data)
}
// protocol version [1 byte]
if data[0] < minProtocolVersion {
return nil, fmt.Errorf(
return nil, "", fmt.Errorf(
"unsupported protocol version %d. Version %d or higher is required",
data[0],
minProtocolVersion,
@ -178,7 +212,7 @@ func (mc *mysqlConn) readInitPacket() ([]byte, error) {
pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4
// first part of the password cipher [8 bytes]
cipher := data[pos : pos+8]
authData := data[pos : pos+8]
// (filler) always 0x00 [1 byte]
pos += 8 + 1
@ -186,10 +220,14 @@ func (mc *mysqlConn) readInitPacket() ([]byte, error) {
// capability flags (lower 2 bytes) [2 bytes]
mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
if mc.flags&clientProtocol41 == 0 {
return nil, ErrOldProtocol
return nil, "", ErrOldProtocol
}
if mc.flags&clientSSL == 0 && mc.cfg.tls != nil {
return nil, ErrNoTLS
if mc.cfg.TLSConfig == "preferred" {
mc.cfg.tls = nil
} else {
return nil, "", ErrNoTLS
}
}
pos += 2
@ -213,32 +251,32 @@ func (mc *mysqlConn) readInitPacket() ([]byte, error) {
//
// The official Python library uses the fixed length 12
// which seems to work but technically could have a hidden bug.
cipher = append(cipher, data[pos:pos+12]...)
authData = append(authData, data[pos:pos+12]...)
pos += 13
// TODO: Verify string termination
// EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2)
// \NUL otherwise
//
//if data[len(data)-1] == 0 {
// return
//}
//return ErrMalformPkt
if end := bytes.IndexByte(data[pos:], 0x00); end != -1 {
plugin = string(data[pos : pos+end])
} else {
plugin = string(data[pos:])
}
// make a memory safe copy of the cipher slice
var b [20]byte
copy(b[:], cipher)
return b[:], nil
copy(b[:], authData)
return b[:], plugin, nil
}
// make a memory safe copy of the cipher slice
var b [8]byte
copy(b[:], cipher)
return b[:], nil
copy(b[:], authData)
return b[:], plugin, nil
}
// Client Authentication Packet
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse
func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string) error {
// Adjust client flags based on server support
clientFlags := clientProtocol41 |
clientSecureConn |
@ -262,10 +300,17 @@ func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
clientFlags |= clientMultiStatements
}
// User Password
scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.Passwd))
// encode length of the auth plugin data
var authRespLEIBuf [9]byte
authRespLen := len(authResp)
authRespLEI := appendLengthEncodedInteger(authRespLEIBuf[:0], uint64(authRespLen))
if len(authRespLEI) > 1 {
// if the length can not be written in 1 byte, it must be written as a
// length encoded integer
clientFlags |= clientPluginAuthLenEncClientData
}
pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + 1 + len(scrambleBuff) + 21 + 1
pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + len(authRespLEI) + len(authResp) + 21 + 1
// To specify a db name
if n := len(mc.cfg.DBName); n > 0 {
@ -274,10 +319,10 @@ func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
}
// Calculate packet length and get buffer with that size
data := mc.buf.takeSmallBuffer(pktLen + 4)
if data == nil {
data, err := mc.buf.takeSmallBuffer(pktLen + 4)
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
errLog.Print(err)
return errBadConnNoWrite
}
@ -316,6 +361,7 @@ func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
if err := tlsConn.Handshake(); err != nil {
return err
}
mc.rawConn = mc.netConn
mc.netConn = tlsConn
mc.buf.nc = tlsConn
}
@ -333,9 +379,9 @@ func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
data[pos] = 0x00
pos++
// ScrambleBuffer [length encoded integer]
data[pos] = byte(len(scrambleBuff))
pos += 1 + copy(data[pos+1:], scrambleBuff)
// Auth Data [length encoded integer]
pos += copy(data[pos:], authRespLEI)
pos += copy(data[pos:], authResp)
// Databasename [null terminated string]
if len(mc.cfg.DBName) > 0 {
@ -344,76 +390,26 @@ func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
pos++
}
// Assume native client during response
pos += copy(data[pos:], "mysql_native_password")
pos += copy(data[pos:], plugin)
data[pos] = 0x00
pos++
// Send Auth packet
return mc.writePacket(data)
return mc.writePacket(data[:pos])
}
// Client old authentication packet
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
func (mc *mysqlConn) writeOldAuthPacket(cipher []byte) error {
// User password
// https://dev.mysql.com/doc/internals/en/old-password-authentication.html
// Old password authentication only need and will need 8-byte challenge.
scrambleBuff := scrambleOldPassword(cipher[:8], []byte(mc.cfg.Passwd))
// Calculate the packet length and add a tailing 0
pktLen := len(scrambleBuff) + 1
data := mc.buf.takeSmallBuffer(4 + pktLen)
if data == nil {
func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error {
pktLen := 4 + len(authData)
data, err := mc.buf.takeSmallBuffer(pktLen)
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
errLog.Print(err)
return errBadConnNoWrite
}
// Add the scrambled password [null terminated string]
copy(data[4:], scrambleBuff)
data[4+pktLen-1] = 0x00
return mc.writePacket(data)
}
// Client clear text authentication packet
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
func (mc *mysqlConn) writeClearAuthPacket() error {
// Calculate the packet length and add a tailing 0
pktLen := len(mc.cfg.Passwd) + 1
data := mc.buf.takeSmallBuffer(4 + pktLen)
if data == nil {
// can not take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
return errBadConnNoWrite
}
// Add the clear password [null terminated string]
copy(data[4:], mc.cfg.Passwd)
data[4+pktLen-1] = 0x00
return mc.writePacket(data)
}
// Native password authentication method
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
func (mc *mysqlConn) writeNativeAuthPacket(cipher []byte) error {
// https://dev.mysql.com/doc/internals/en/secure-password-authentication.html
// Native password authentication only need and will need 20-byte challenge.
scrambleBuff := scramblePassword(cipher[0:20], []byte(mc.cfg.Passwd))
// Calculate the packet length and add a tailing 0
pktLen := len(scrambleBuff)
data := mc.buf.takeSmallBuffer(4 + pktLen)
if data == nil {
// can not take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
return errBadConnNoWrite
}
// Add the scramble
copy(data[4:], scrambleBuff)
// Add the auth data [EOF]
copy(data[4:], authData)
return mc.writePacket(data)
}
@ -425,10 +421,10 @@ func (mc *mysqlConn) writeCommandPacket(command byte) error {
// Reset Packet Sequence
mc.sequence = 0
data := mc.buf.takeSmallBuffer(4 + 1)
if data == nil {
data, err := mc.buf.takeSmallBuffer(4 + 1)
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
errLog.Print(err)
return errBadConnNoWrite
}
@ -444,10 +440,10 @@ func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
mc.sequence = 0
pktLen := 1 + len(arg)
data := mc.buf.takeBuffer(pktLen + 4)
if data == nil {
data, err := mc.buf.takeBuffer(pktLen + 4)
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
errLog.Print(err)
return errBadConnNoWrite
}
@ -465,10 +461,10 @@ func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
// Reset Packet Sequence
mc.sequence = 0
data := mc.buf.takeSmallBuffer(4 + 1 + 4)
if data == nil {
data, err := mc.buf.takeSmallBuffer(4 + 1 + 4)
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
errLog.Print(err)
return errBadConnNoWrite
}
@ -489,45 +485,50 @@ func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
* Result Packets *
******************************************************************************/
// Returns error if Packet is not an 'Result OK'-Packet
func (mc *mysqlConn) readResultOK() ([]byte, error) {
func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
data, err := mc.readPacket()
if err == nil {
if err != nil {
return nil, "", err
}
// packet indicator
switch data[0] {
case iOK:
return nil, mc.handleOkPacket(data)
return nil, "", mc.handleOkPacket(data)
case iAuthMoreData:
return data[1:], "", err
case iEOF:
if len(data) > 1 {
pluginEndIndex := bytes.IndexByte(data, 0x00)
plugin := string(data[1:pluginEndIndex])
cipher := data[pluginEndIndex+1:]
switch plugin {
case "mysql_old_password":
// using old_passwords
return cipher, ErrOldPassword
case "mysql_clear_password":
// using clear text password
return cipher, ErrCleartextPassword
case "mysql_native_password":
// using mysql default authentication method
return cipher, ErrNativePassword
default:
return cipher, ErrUnknownPlugin
}
}
if len(data) == 1 {
// https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest
return nil, ErrOldPassword
return nil, "mysql_old_password", nil
}
pluginEndIndex := bytes.IndexByte(data, 0x00)
if pluginEndIndex < 0 {
return nil, "", ErrMalformPkt
}
plugin := string(data[1:pluginEndIndex])
authData := data[pluginEndIndex+1:]
return authData, plugin, nil
default: // Error otherwise
return nil, mc.handleErrorPacket(data)
return nil, "", mc.handleErrorPacket(data)
}
}
return nil, err
// Returns error if Packet is not an 'Result OK'-Packet
func (mc *mysqlConn) readResultOK() error {
data, err := mc.readPacket()
if err != nil {
return err
}
if data[0] == iOK {
return mc.handleOkPacket(data)
}
return mc.handleErrorPacket(data)
}
// Result Set Header Packet
@ -571,7 +572,8 @@ func (mc *mysqlConn) handleErrorPacket(data []byte) error {
errno := binary.LittleEndian.Uint16(data[1:3])
// 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION
if errno == 1792 && mc.cfg.RejectReadOnly {
// 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover)
if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly {
// Oops; we are connected to a read-only connection, and won't be able
// to issue any write statements. Since RejectReadOnly is configured,
// we throw away this connection hoping this one would have write
@ -624,14 +626,7 @@ func (mc *mysqlConn) handleOkPacket(data []byte) error {
}
// warning count [2 bytes]
if !mc.strict {
return nil
}
pos := 1 + n + m + 2
if binary.LittleEndian.Uint16(data[pos:pos+2]) > 0 {
return mc.getWarnings()
}
return nil
}
@ -703,14 +698,21 @@ func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) {
if err != nil {
return nil, err
}
pos += n
// Filler [uint8]
pos++
// Charset [charset, collation uint8]
columns[i].charSet = data[pos]
pos += 2
// Length [uint32]
pos += n + 1 + 2 + 4
columns[i].length = binary.LittleEndian.Uint32(data[pos : pos+4])
pos += 4
// Field type [uint8]
columns[i].fieldType = data[pos]
columns[i].fieldType = fieldType(data[pos])
pos++
// Flags [uint16]
@ -843,14 +845,7 @@ func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) {
// Reserved [8 bit]
// Warning count [16 bit uint]
if !stmt.mc.strict {
return columnCount, nil
}
// Check for warnings count > 0, only available in MySQL > 4.1
if len(data) >= 12 && binary.LittleEndian.Uint16(data[10:12]) > 0 {
return columnCount, stmt.mc.getWarnings()
}
return columnCount, nil
}
return 0, err
@ -922,19 +917,27 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
const minPktLen = 4 + 1 + 4 + 1 + 4
mc := stmt.mc
// Determine threshold dynamically to avoid packet size shortage.
longDataSize := mc.maxAllowedPacket / (stmt.paramCount + 1)
if longDataSize < 64 {
longDataSize = 64
}
// Reset packet-sequence
mc.sequence = 0
var data []byte
var err error
if len(args) == 0 {
data = mc.buf.takeBuffer(minPktLen)
data, err = mc.buf.takeBuffer(minPktLen)
} else {
data = mc.buf.takeCompleteBuffer()
data, err = mc.buf.takeCompleteBuffer()
// In this case the len(data) == cap(data) which is used to optimise the flow below.
}
if data == nil {
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
errLog.Print(err)
return errBadConnNoWrite
}
@ -960,7 +963,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
pos := minPktLen
var nullMask []byte
if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= len(data) {
if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= cap(data) {
// buffer has to be extended but we don't know by how much so
// we depend on append after all data with known sizes fit.
// We stop at that because we deal with a lot of columns here
@ -969,10 +972,11 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
copy(tmp[:pos], data[:pos])
data = tmp
nullMask = data[pos : pos+maskLen]
// No need to clean nullMask as make ensures that.
pos += maskLen
} else {
nullMask = data[pos : pos+maskLen]
for i := 0; i < maskLen; i++ {
for i := range nullMask {
nullMask[i] = 0
}
pos += maskLen
@ -994,7 +998,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
// build NULL-bitmap
if arg == nil {
nullMask[i/8] |= 1 << (uint(i) & 7)
paramTypes[i+i] = fieldTypeNULL
paramTypes[i+i] = byte(fieldTypeNULL)
paramTypes[i+i+1] = 0x00
continue
}
@ -1002,7 +1006,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
// cache types and values
switch v := arg.(type) {
case int64:
paramTypes[i+i] = fieldTypeLongLong
paramTypes[i+i] = byte(fieldTypeLongLong)
paramTypes[i+i+1] = 0x00
if cap(paramValues)-len(paramValues)-8 >= 0 {
@ -1017,8 +1021,24 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
)
}
case uint64:
paramTypes[i+i] = byte(fieldTypeLongLong)
paramTypes[i+i+1] = 0x80 // type is unsigned
if cap(paramValues)-len(paramValues)-8 >= 0 {
paramValues = paramValues[:len(paramValues)+8]
binary.LittleEndian.PutUint64(
paramValues[len(paramValues)-8:],
uint64(v),
)
} else {
paramValues = append(paramValues,
uint64ToBytes(uint64(v))...,
)
}
case float64:
paramTypes[i+i] = fieldTypeDouble
paramTypes[i+i] = byte(fieldTypeDouble)
paramTypes[i+i+1] = 0x00
if cap(paramValues)-len(paramValues)-8 >= 0 {
@ -1034,7 +1054,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
}
case bool:
paramTypes[i+i] = fieldTypeTiny
paramTypes[i+i] = byte(fieldTypeTiny)
paramTypes[i+i+1] = 0x00
if v {
@ -1046,10 +1066,10 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
case []byte:
// Common case (non-nil value) first
if v != nil {
paramTypes[i+i] = fieldTypeString
paramTypes[i+i] = byte(fieldTypeString)
paramTypes[i+i+1] = 0x00
if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 {
if len(v) < longDataSize {
paramValues = appendLengthEncodedInteger(paramValues,
uint64(len(v)),
)
@ -1064,14 +1084,14 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
// Handle []byte(nil) as a NULL value
nullMask[i/8] |= 1 << (uint(i) & 7)
paramTypes[i+i] = fieldTypeNULL
paramTypes[i+i] = byte(fieldTypeNULL)
paramTypes[i+i+1] = 0x00
case string:
paramTypes[i+i] = fieldTypeString
paramTypes[i+i] = byte(fieldTypeString)
paramTypes[i+i+1] = 0x00
if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 {
if len(v) < longDataSize {
paramValues = appendLengthEncodedInteger(paramValues,
uint64(len(v)),
)
@ -1083,7 +1103,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
}
case time.Time:
paramTypes[i+i] = fieldTypeString
paramTypes[i+i] = byte(fieldTypeString)
paramTypes[i+i+1] = 0x00
var a [64]byte
@ -1109,7 +1129,10 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
// In that case we must build the data packet with the new values buffer
if valuesCap != cap(paramValues) {
data = append(data[:pos], paramValues...)
mc.buf.buf = data
if err = mc.buf.store(data); err != nil {
errLog.Print(err)
return errBadConnNoWrite
}
}
pos += len(paramValues)
@ -1157,10 +1180,11 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
}
return io.EOF
}
mc := rows.mc
rows.mc = nil
// Error otherwise
return rows.mc.handleErrorPacket(data)
return mc.handleErrorPacket(data)
}
// NULL-bitmap, [(column-count + 7 + 2) / 8 bytes]
@ -1278,7 +1302,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
rows.rs.columns[i].decimals,
)
}
dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, true)
dest[i], err = formatBinaryTime(data[pos:pos+int(num)], dstlen)
case rows.mc.parseTime:
dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc)
default:
@ -1298,7 +1322,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
)
}
}
dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, false)
dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen)
}
if err == nil {

View File

@ -9,6 +9,7 @@
package mysql
import (
"bytes"
"errors"
"net"
"testing"
@ -26,9 +27,10 @@ type mockConn struct {
laddr net.Addr
raddr net.Addr
data []byte
written []byte
queuedReplies [][]byte
closed bool
read int
written int
reads int
writes int
maxReads int
@ -61,7 +63,12 @@ func (m *mockConn) Write(b []byte) (n int, err error) {
}
n = len(b)
m.written += n
m.written = append(m.written, b...)
if n > 0 && len(m.queuedReplies) > 0 {
m.data = m.queuedReplies[0]
m.queuedReplies = m.queuedReplies[1:]
}
return
}
func (m *mockConn) Close() error {
@ -87,6 +94,19 @@ func (m *mockConn) SetWriteDeadline(t time.Time) error {
// make sure mockConn implements the net.Conn interface
var _ net.Conn = new(mockConn)
func newRWMockConn(sequence uint8) (*mockConn, *mysqlConn) {
conn := new(mockConn)
mc := &mysqlConn{
buf: newBuffer(conn),
cfg: NewConfig(),
netConn: conn,
closech: make(chan struct{}),
maxAllowedPacket: defaultMaxAllowedPacket,
sequence: sequence,
}
return conn, mc
}
func TestReadPacketSingleByte(t *testing.T) {
conn := new(mockConn)
mc := &mysqlConn{
@ -280,3 +300,37 @@ func TestReadPacketFail(t *testing.T) {
t.Errorf("expected ErrInvalidConn, got %v", err)
}
}
// https://github.com/go-sql-driver/mysql/pull/801
// not-NUL terminated plugin_name in init packet
func TestRegression801(t *testing.T) {
conn := new(mockConn)
mc := &mysqlConn{
buf: newBuffer(conn),
cfg: new(Config),
sequence: 42,
closech: make(chan struct{}),
}
conn.data = []byte{72, 0, 0, 42, 10, 53, 46, 53, 46, 56, 0, 165, 0, 0, 0,
60, 70, 63, 58, 68, 104, 34, 97, 0, 223, 247, 33, 2, 0, 15, 128, 21, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 98, 120, 114, 47, 85, 75, 109, 99, 51, 77,
50, 64, 0, 109, 121, 115, 113, 108, 95, 110, 97, 116, 105, 118, 101, 95,
112, 97, 115, 115, 119, 111, 114, 100}
conn.maxReads = 1
authData, pluginName, err := mc.readHandshakePacket()
if err != nil {
t.Fatalf("got error: %v", err)
}
if pluginName != "mysql_native_password" {
t.Errorf("expected plugin name 'mysql_native_password', got '%s'", pluginName)
}
expectedAuthData := []byte{60, 70, 63, 58, 68, 104, 34, 97, 98, 120, 114,
47, 85, 75, 109, 99, 51, 77, 50, 64}
if !bytes.Equal(authData, expectedAuthData) {
t.Errorf("expected authData '%v', got '%v'", expectedAuthData, authData)
}
}

View File

@ -11,16 +11,10 @@ package mysql
import (
"database/sql/driver"
"io"
"math"
"reflect"
)
type mysqlField struct {
tableName string
name string
flags fieldFlag
fieldType byte
decimals byte
}
type resultSet struct {
columns []mysqlField
columnNames []string
@ -65,6 +59,44 @@ func (rows *mysqlRows) Columns() []string {
return columns
}
func (rows *mysqlRows) ColumnTypeDatabaseTypeName(i int) string {
return rows.rs.columns[i].typeDatabaseName()
}
// func (rows *mysqlRows) ColumnTypeLength(i int) (length int64, ok bool) {
// return int64(rows.rs.columns[i].length), true
// }
func (rows *mysqlRows) ColumnTypeNullable(i int) (nullable, ok bool) {
return rows.rs.columns[i].flags&flagNotNULL == 0, true
}
func (rows *mysqlRows) ColumnTypePrecisionScale(i int) (int64, int64, bool) {
column := rows.rs.columns[i]
decimals := int64(column.decimals)
switch column.fieldType {
case fieldTypeDecimal, fieldTypeNewDecimal:
if decimals > 0 {
return int64(column.length) - 2, decimals, true
}
return int64(column.length) - 1, decimals, true
case fieldTypeTimestamp, fieldTypeDateTime, fieldTypeTime:
return decimals, decimals, true
case fieldTypeFloat, fieldTypeDouble:
if decimals == 0x1f {
return math.MaxInt64, math.MaxInt64, true
}
return math.MaxInt64, decimals, true
}
return 0, 0, false
}
func (rows *mysqlRows) ColumnTypeScanType(i int) reflect.Type {
return rows.rs.columns[i].scanType()
}
func (rows *mysqlRows) Close() (err error) {
if f := rows.finish; f != nil {
f()
@ -79,6 +111,13 @@ func (rows *mysqlRows) Close() (err error) {
return err
}
// flip the buffer for this connection if we need to drain it.
// note that for a successful query (i.e. one where rows.next()
// has been called until it returns false), `rows.mc` will be nil
// by the time the user calls `(*Rows).Close`, so we won't reach this
// see: https://github.com/golang/go/commit/651ddbdb5056ded455f47f9c494c67b389622a47
mc.buf.flip()
// Remove unread packets from stream
if !rows.rs.done {
err = mc.readUntilEOF()

View File

@ -13,7 +13,6 @@ import (
"fmt"
"io"
"reflect"
"strconv"
)
type mysqlStmt struct {
@ -132,31 +131,74 @@ func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
type converter struct{}
// ConvertValue mirrors the reference/default converter in database/sql/driver
// with _one_ exception. We support uint64 with their high bit and the default
// implementation does not. This function should be kept in sync with
// database/sql/driver defaultConverter.ConvertValue() except for that
// deliberate difference.
func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
if driver.IsValue(v) {
return v, nil
}
if vr, ok := v.(driver.Valuer); ok {
sv, err := callValuerValue(vr)
if err != nil {
return nil, err
}
if !driver.IsValue(sv) {
return nil, fmt.Errorf("non-Value type %T returned from Value", sv)
}
return sv, nil
}
rv := reflect.ValueOf(v)
switch rv.Kind() {
case reflect.Ptr:
// indirect pointers
if rv.IsNil() {
return nil, nil
}
} else {
return c.ConvertValue(rv.Elem().Interface())
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return rv.Int(), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
return int64(rv.Uint()), nil
case reflect.Uint64:
u64 := rv.Uint()
if u64 >= 1<<63 {
return strconv.FormatUint(u64, 10), nil
}
return int64(u64), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return rv.Uint(), nil
case reflect.Float32, reflect.Float64:
return rv.Float(), nil
case reflect.Bool:
return rv.Bool(), nil
case reflect.Slice:
ek := rv.Type().Elem().Kind()
if ek == reflect.Uint8 {
return rv.Bytes(), nil
}
return nil, fmt.Errorf("unsupported type %T, a slice of %s", v, ek)
case reflect.String:
return rv.String(), nil
}
return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind())
}
var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
// callValuerValue returns vr.Value(), with one exception:
// If vr.Value is an auto-generated method on a pointer type and the
// pointer is nil, it would panic at runtime in the panicwrap
// method. Treat it like nil instead.
//
// This is so people can implement driver.Value on value types and
// still use nil pointers to those types to mean nil/NULL, just like
// string/*string.
//
// This is an exact copy of the same-named unexported function from the
// database/sql package.
func callValuerValue(vr driver.Valuer) (v driver.Value, err error) {
if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Ptr &&
rv.IsNil() &&
rv.Type().Elem().Implements(valuerReflectType) {
return nil, nil
}
return vr.Value()
}

126
vendor/github.com/go-sql-driver/mysql/statement_test.go generated vendored Normal file
View File

@ -0,0 +1,126 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
import (
"bytes"
"testing"
)
func TestConvertDerivedString(t *testing.T) {
type derived string
output, err := converter{}.ConvertValue(derived("value"))
if err != nil {
t.Fatal("Derived string type not convertible", err)
}
if output != "value" {
t.Fatalf("Derived string type not converted, got %#v %T", output, output)
}
}
func TestConvertDerivedByteSlice(t *testing.T) {
type derived []uint8
output, err := converter{}.ConvertValue(derived("value"))
if err != nil {
t.Fatal("Byte slice not convertible", err)
}
if bytes.Compare(output.([]byte), []byte("value")) != 0 {
t.Fatalf("Byte slice not converted, got %#v %T", output, output)
}
}
func TestConvertDerivedUnsupportedSlice(t *testing.T) {
type derived []int
_, err := converter{}.ConvertValue(derived{1})
if err == nil || err.Error() != "unsupported type mysql.derived, a slice of int" {
t.Fatal("Unexpected error", err)
}
}
func TestConvertDerivedBool(t *testing.T) {
type derived bool
output, err := converter{}.ConvertValue(derived(true))
if err != nil {
t.Fatal("Derived bool type not convertible", err)
}
if output != true {
t.Fatalf("Derived bool type not converted, got %#v %T", output, output)
}
}
func TestConvertPointer(t *testing.T) {
str := "value"
output, err := converter{}.ConvertValue(&str)
if err != nil {
t.Fatal("Pointer type not convertible", err)
}
if output != "value" {
t.Fatalf("Pointer type not converted, got %#v %T", output, output)
}
}
func TestConvertSignedIntegers(t *testing.T) {
values := []interface{}{
int8(-42),
int16(-42),
int32(-42),
int64(-42),
int(-42),
}
for _, value := range values {
output, err := converter{}.ConvertValue(value)
if err != nil {
t.Fatalf("%T type not convertible %s", value, err)
}
if output != int64(-42) {
t.Fatalf("%T type not converted, got %#v %T", value, output, output)
}
}
}
func TestConvertUnsignedIntegers(t *testing.T) {
values := []interface{}{
uint8(42),
uint16(42),
uint32(42),
uint64(42),
uint(42),
}
for _, value := range values {
output, err := converter{}.ConvertValue(value)
if err != nil {
t.Fatalf("%T type not convertible %s", value, err)
}
if output != uint64(42) {
t.Fatalf("%T type not converted, got %#v %T", value, output, output)
}
}
output, err := converter{}.ConvertValue(^uint64(0))
if err != nil {
t.Fatal("uint64 high-bit not convertible", err)
}
if output != ^uint64(0) {
t.Fatalf("uint64 high-bit converted, got %#v %T", output, output)
}
}

View File

@ -9,27 +9,31 @@
package mysql
import (
"crypto/sha1"
"crypto/tls"
"database/sql"
"database/sql/driver"
"encoding/binary"
"errors"
"fmt"
"io"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
)
// Registry for custom tls.Configs
var (
tlsConfigLock sync.RWMutex
tlsConfigRegister map[string]*tls.Config // Register for custom tls.Configs
tlsConfigRegistry map[string]*tls.Config
)
// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
// Use the key as a value in the DSN where tls=value.
//
// Note: The tls.Config provided to needs to be exclusively owned by the driver after registering.
// Note: The provided tls.Config is exclusively owned by the driver after
// registering it.
//
// rootCertPool := x509.NewCertPool()
// pem, err := ioutil.ReadFile("/path/ca-cert.pem")
@ -52,16 +56,16 @@ var (
// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom")
//
func RegisterTLSConfig(key string, config *tls.Config) error {
if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" {
if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" || strings.ToLower(key) == "preferred" {
return fmt.Errorf("key '%s' is reserved", key)
}
tlsConfigLock.Lock()
if tlsConfigRegister == nil {
tlsConfigRegister = make(map[string]*tls.Config)
if tlsConfigRegistry == nil {
tlsConfigRegistry = make(map[string]*tls.Config)
}
tlsConfigRegister[key] = config
tlsConfigRegistry[key] = config
tlsConfigLock.Unlock()
return nil
}
@ -69,16 +73,16 @@ func RegisterTLSConfig(key string, config *tls.Config) error {
// DeregisterTLSConfig removes the tls.Config associated with key.
func DeregisterTLSConfig(key string) {
tlsConfigLock.Lock()
if tlsConfigRegister != nil {
delete(tlsConfigRegister, key)
if tlsConfigRegistry != nil {
delete(tlsConfigRegistry, key)
}
tlsConfigLock.Unlock()
}
func getTLSConfigClone(key string) (config *tls.Config) {
tlsConfigLock.RLock()
if v, ok := tlsConfigRegister[key]; ok {
config = cloneTLSConfig(v)
if v, ok := tlsConfigRegistry[key]; ok {
config = v.Clone()
}
tlsConfigLock.RUnlock()
return
@ -98,177 +102,10 @@ func readBool(input string) (value bool, valid bool) {
return
}
/******************************************************************************
* Authentication *
******************************************************************************/
// Encrypt password using 4.1+ method
func scramblePassword(scramble, password []byte) []byte {
if len(password) == 0 {
return nil
}
// stage1Hash = SHA1(password)
crypt := sha1.New()
crypt.Write(password)
stage1 := crypt.Sum(nil)
// scrambleHash = SHA1(scramble + SHA1(stage1Hash))
// inner Hash
crypt.Reset()
crypt.Write(stage1)
hash := crypt.Sum(nil)
// outer Hash
crypt.Reset()
crypt.Write(scramble)
crypt.Write(hash)
scramble = crypt.Sum(nil)
// token = scrambleHash XOR stage1Hash
for i := range scramble {
scramble[i] ^= stage1[i]
}
return scramble
}
// Encrypt password using pre 4.1 (old password) method
// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c
type myRnd struct {
seed1, seed2 uint32
}
const myRndMaxVal = 0x3FFFFFFF
// Pseudo random number generator
func newMyRnd(seed1, seed2 uint32) *myRnd {
return &myRnd{
seed1: seed1 % myRndMaxVal,
seed2: seed2 % myRndMaxVal,
}
}
// Tested to be equivalent to MariaDB's floating point variant
// http://play.golang.org/p/QHvhd4qved
// http://play.golang.org/p/RG0q4ElWDx
func (r *myRnd) NextByte() byte {
r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal
r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal
return byte(uint64(r.seed1) * 31 / myRndMaxVal)
}
// Generate binary hash from byte string using insecure pre 4.1 method
func pwHash(password []byte) (result [2]uint32) {
var add uint32 = 7
var tmp uint32
result[0] = 1345345333
result[1] = 0x12345671
for _, c := range password {
// skip spaces and tabs in password
if c == ' ' || c == '\t' {
continue
}
tmp = uint32(c)
result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8)
result[1] += (result[1] << 8) ^ result[0]
add += tmp
}
// Remove sign bit (1<<31)-1)
result[0] &= 0x7FFFFFFF
result[1] &= 0x7FFFFFFF
return
}
// Encrypt password using insecure pre 4.1 method
func scrambleOldPassword(scramble, password []byte) []byte {
if len(password) == 0 {
return nil
}
scramble = scramble[:8]
hashPw := pwHash(password)
hashSc := pwHash(scramble)
r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1])
var out [8]byte
for i := range out {
out[i] = r.NextByte() + 64
}
mask := r.NextByte()
for i := range out {
out[i] ^= mask
}
return out[:]
}
/******************************************************************************
* Time related utils *
******************************************************************************/
// NullTime represents a time.Time that may be NULL.
// NullTime implements the Scanner interface so
// it can be used as a scan destination:
//
// var nt NullTime
// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt)
// ...
// if nt.Valid {
// // use nt.Time
// } else {
// // NULL value
// }
//
// This NullTime implementation is not driver-specific
type NullTime struct {
Time time.Time
Valid bool // Valid is true if Time is not NULL
}
// Scan implements the Scanner interface.
// The value type must be time.Time or string / []byte (formatted time-string),
// otherwise Scan fails.
func (nt *NullTime) Scan(value interface{}) (err error) {
if value == nil {
nt.Time, nt.Valid = time.Time{}, false
return
}
switch v := value.(type) {
case time.Time:
nt.Time, nt.Valid = v, true
return
case []byte:
nt.Time, err = parseDateTime(string(v), time.UTC)
nt.Valid = (err == nil)
return
case string:
nt.Time, err = parseDateTime(v, time.UTC)
nt.Valid = (err == nil)
return
}
nt.Valid = false
return fmt.Errorf("Can't convert %T to time.Time", value)
}
// Value implements the driver Valuer interface.
func (nt NullTime) Value() (driver.Value, error) {
if !nt.Valid {
return nil, nil
}
return nt.Time, nil
}
func parseDateTime(str string, loc *time.Location) (t time.Time, err error) {
base := "0000-00-00 00:00:00.0000000"
switch len(str) {
@ -339,47 +176,64 @@ var zeroDateTime = []byte("0000-00-00 00:00:00.000000")
const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999"
func formatBinaryDateTime(src []byte, length uint8, justTime bool) (driver.Value, error) {
func appendMicrosecs(dst, src []byte, decimals int) []byte {
if decimals <= 0 {
return dst
}
if len(src) == 0 {
return append(dst, ".000000"[:decimals+1]...)
}
microsecs := binary.LittleEndian.Uint32(src[:4])
p1 := byte(microsecs / 10000)
microsecs -= 10000 * uint32(p1)
p2 := byte(microsecs / 100)
microsecs -= 100 * uint32(p2)
p3 := byte(microsecs)
switch decimals {
default:
return append(dst, '.',
digits10[p1], digits01[p1],
digits10[p2], digits01[p2],
digits10[p3], digits01[p3],
)
case 1:
return append(dst, '.',
digits10[p1],
)
case 2:
return append(dst, '.',
digits10[p1], digits01[p1],
)
case 3:
return append(dst, '.',
digits10[p1], digits01[p1],
digits10[p2],
)
case 4:
return append(dst, '.',
digits10[p1], digits01[p1],
digits10[p2], digits01[p2],
)
case 5:
return append(dst, '.',
digits10[p1], digits01[p1],
digits10[p2], digits01[p2],
digits10[p3],
)
}
}
func formatBinaryDateTime(src []byte, length uint8) (driver.Value, error) {
// length expects the deterministic length of the zero value,
// negative time and 100+ hours are automatically added if needed
if len(src) == 0 {
if justTime {
return zeroDateTime[11 : 11+length], nil
}
return zeroDateTime[:length], nil
}
var dst []byte // return value
var pt, p1, p2, p3 byte // current digit pair
var zOffs byte // offset of value in zeroDateTime
if justTime {
switch length {
case
8, // time (can be up to 10 when negative and 100+ hours)
10, 11, 12, 13, 14, 15: // time with fractional seconds
default:
return nil, fmt.Errorf("illegal TIME length %d", length)
}
switch len(src) {
case 8, 12:
default:
return nil, fmt.Errorf("invalid TIME packet length %d", len(src))
}
// +2 to enable negative time and 100+ hours
dst = make([]byte, 0, length+2)
if src[0] == 1 {
dst = append(dst, '-')
}
if src[1] != 0 {
hour := uint16(src[1])*24 + uint16(src[5])
pt = byte(hour / 100)
p1 = byte(hour - 100*uint16(pt))
dst = append(dst, digits01[pt])
} else {
p1 = src[5]
}
zOffs = 11
src = src[6:]
} else {
var p1, p2, p3 byte // current digit pair
switch length {
case 10, 19, 21, 22, 23, 24, 25, 26:
default:
@ -401,7 +255,7 @@ func formatBinaryDateTime(src []byte, length uint8, justTime bool) (driver.Value
dst = make([]byte, 0, length)
// start with the date
year := binary.LittleEndian.Uint16(src[:2])
pt = byte(year / 100)
pt := year / 100
p1 = byte(year - 100*uint16(pt))
p2, p3 = src[2], src[3]
dst = append(dst,
@ -419,7 +273,7 @@ func formatBinaryDateTime(src []byte, length uint8, justTime bool) (driver.Value
dst = append(dst, ' ')
p1 = src[4] // hour
src = src[5:]
}
// p1 is 2-digit hour, src is after hour
p2, p3 = src[0], src[1]
dst = append(dst,
@ -427,51 +281,49 @@ func formatBinaryDateTime(src []byte, length uint8, justTime bool) (driver.Value
digits10[p2], digits01[p2], ':',
digits10[p3], digits01[p3],
)
if length <= byte(len(dst)) {
return dst, nil
return appendMicrosecs(dst, src[2:], int(length)-20), nil
}
src = src[2:]
func formatBinaryTime(src []byte, length uint8) (driver.Value, error) {
// length expects the deterministic length of the zero value,
// negative time and 100+ hours are automatically added if needed
if len(src) == 0 {
return append(dst, zeroDateTime[19:zOffs+length]...), nil
return zeroDateTime[11 : 11+length], nil
}
microsecs := binary.LittleEndian.Uint32(src[:4])
p1 = byte(microsecs / 10000)
microsecs -= 10000 * uint32(p1)
p2 = byte(microsecs / 100)
microsecs -= 100 * uint32(p2)
p3 = byte(microsecs)
switch decimals := zOffs + length - 20; decimals {
var dst []byte // return value
switch length {
case
8, // time (can be up to 10 when negative and 100+ hours)
10, 11, 12, 13, 14, 15: // time with fractional seconds
default:
return append(dst, '.',
digits10[p1], digits01[p1],
digits10[p2], digits01[p2],
digits10[p3], digits01[p3],
), nil
case 1:
return append(dst, '.',
digits10[p1],
), nil
case 2:
return append(dst, '.',
digits10[p1], digits01[p1],
), nil
case 3:
return append(dst, '.',
digits10[p1], digits01[p1],
digits10[p2],
), nil
case 4:
return append(dst, '.',
digits10[p1], digits01[p1],
digits10[p2], digits01[p2],
), nil
case 5:
return append(dst, '.',
digits10[p1], digits01[p1],
digits10[p2], digits01[p2],
digits10[p3],
), nil
return nil, fmt.Errorf("illegal TIME length %d", length)
}
switch len(src) {
case 8, 12:
default:
return nil, fmt.Errorf("invalid TIME packet length %d", len(src))
}
// +2 to enable negative time and 100+ hours
dst = make([]byte, 0, length+2)
if src[0] == 1 {
dst = append(dst, '-')
}
days := binary.LittleEndian.Uint32(src[1:5])
hours := int64(days)*24 + int64(src[5])
if hours >= 100 {
dst = strconv.AppendInt(dst, hours, 10)
} else {
dst = append(dst, digits10[hours], digits01[hours])
}
min, sec := src[6], src[7]
dst = append(dst, ':',
digits10[min], digits01[min], ':',
digits10[sec], digits01[sec],
)
return appendMicrosecs(dst, src[8:], int(length)-9), nil
}
/******************************************************************************
@ -537,7 +389,7 @@ func readLengthEncodedString(b []byte) ([]byte, bool, int, error) {
// Check data length
if len(b) >= n {
return b[n-int(num) : n], false, n, nil
return b[n-int(num) : n : n], false, n, nil
}
return nil, false, n, io.EOF
}
@ -566,8 +418,8 @@ func readLengthEncodedInteger(b []byte) (uint64, bool, int) {
if len(b) == 0 {
return 0, true, 1
}
switch b[0] {
switch b[0] {
// 251: NULL
case 0xfb:
return 0, true, 1
@ -778,7 +630,7 @@ type atomicBool struct {
value uint32
}
// IsSet returns wether the current boolean value is true
// IsSet returns whether the current boolean value is true
func (ab *atomicBool) IsSet() bool {
return atomic.LoadUint32(&ab.value) > 0
}
@ -792,7 +644,7 @@ func (ab *atomicBool) Set(value bool) {
}
}
// TrySet sets the value of the bool and returns wether the value changed
// TrySet sets the value of the bool and returns whether the value changed
func (ab *atomicBool) TrySet(value bool) bool {
if value {
return atomic.SwapUint32(&ab.value, 1) == 0
@ -800,7 +652,7 @@ func (ab *atomicBool) TrySet(value bool) bool {
return atomic.SwapUint32(&ab.value, 0) > 0
}
// atomicBool is a wrapper for atomically accessed error values
// atomicError is a wrapper for atomically accessed error values
type atomicError struct {
_noCopy noCopy
value atomic.Value
@ -820,3 +672,30 @@ func (ae *atomicError) Value() error {
}
return nil
}
func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
dargs := make([]driver.Value, len(named))
for n, param := range named {
if len(param.Name) > 0 {
// TODO: support the use of Named Parameters #561
return nil, errors.New("mysql: driver does not support the use of Named Parameters")
}
dargs[n] = param.Value
}
return dargs, nil
}
func mapIsolationLevel(level driver.IsolationLevel) (string, error) {
switch sql.IsolationLevel(level) {
case sql.LevelRepeatableRead:
return "REPEATABLE READ", nil
case sql.LevelReadCommitted:
return "READ COMMITTED", nil
case sql.LevelReadUncommitted:
return "READ UNCOMMITTED", nil
case sql.LevelSerializable:
return "SERIALIZABLE", nil
default:
return "", fmt.Errorf("mysql: unsupported isolation level: %v", level)
}
}

View File

@ -1,40 +0,0 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
// +build go1.7
// +build !go1.8
package mysql
import "crypto/tls"
func cloneTLSConfig(c *tls.Config) *tls.Config {
return &tls.Config{
Rand: c.Rand,
Time: c.Time,
Certificates: c.Certificates,
NameToCertificate: c.NameToCertificate,
GetCertificate: c.GetCertificate,
RootCAs: c.RootCAs,
NextProtos: c.NextProtos,
ServerName: c.ServerName,
ClientAuth: c.ClientAuth,
ClientCAs: c.ClientCAs,
InsecureSkipVerify: c.InsecureSkipVerify,
CipherSuites: c.CipherSuites,
PreferServerCipherSuites: c.PreferServerCipherSuites,
SessionTicketsDisabled: c.SessionTicketsDisabled,
SessionTicketKey: c.SessionTicketKey,
ClientSessionCache: c.ClientSessionCache,
MinVersion: c.MinVersion,
MaxVersion: c.MaxVersion,
CurvePreferences: c.CurvePreferences,
DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
Renegotiation: c.Renegotiation,
}
}

View File

@ -1,49 +0,0 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
// +build go1.8
package mysql
import (
"crypto/tls"
"database/sql"
"database/sql/driver"
"errors"
)
func cloneTLSConfig(c *tls.Config) *tls.Config {
return c.Clone()
}
func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
dargs := make([]driver.Value, len(named))
for n, param := range named {
if len(param.Name) > 0 {
// TODO: support the use of Named Parameters #561
return nil, errors.New("mysql: driver does not support the use of Named Parameters")
}
dargs[n] = param.Value
}
return dargs, nil
}
func mapIsolationLevel(level driver.IsolationLevel) (string, error) {
switch sql.IsolationLevel(level) {
case sql.LevelRepeatableRead:
return "REPEATABLE READ", nil
case sql.LevelReadCommitted:
return "READ COMMITTED", nil
case sql.LevelReadUncommitted:
return "READ UNCOMMITTED", nil
case sql.LevelSerializable:
return "SERIALIZABLE", nil
default:
return "", errors.New("mysql: unsupported isolation level: " + string(level))
}
}

View File

@ -1,54 +0,0 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
// +build go1.8
package mysql
import (
"database/sql"
"database/sql/driver"
"testing"
)
func TestIsolationLevelMapping(t *testing.T) {
data := []struct {
level driver.IsolationLevel
expected string
}{
{
level: driver.IsolationLevel(sql.LevelReadCommitted),
expected: "READ COMMITTED",
},
{
level: driver.IsolationLevel(sql.LevelRepeatableRead),
expected: "REPEATABLE READ",
},
{
level: driver.IsolationLevel(sql.LevelReadUncommitted),
expected: "READ UNCOMMITTED",
},
{
level: driver.IsolationLevel(sql.LevelSerializable),
expected: "SERIALIZABLE",
},
}
for i, td := range data {
if actual, err := mapIsolationLevel(td.level); actual != td.expected || err != nil {
t.Fatal(i, td.expected, actual, err)
}
}
// check unsupported mapping
if actual, err := mapIsolationLevel(driver.IsolationLevel(sql.LevelLinearizable)); actual != "" || err == nil {
t.Fatal("Expected error on unsupported isolation level")
}
}

View File

@ -10,52 +10,12 @@ package mysql
import (
"bytes"
"database/sql"
"database/sql/driver"
"encoding/binary"
"fmt"
"testing"
"time"
)
func TestScanNullTime(t *testing.T) {
var scanTests = []struct {
in interface{}
error bool
valid bool
time time.Time
}{
{tDate, false, true, tDate},
{sDate, false, true, tDate},
{[]byte(sDate), false, true, tDate},
{tDateTime, false, true, tDateTime},
{sDateTime, false, true, tDateTime},
{[]byte(sDateTime), false, true, tDateTime},
{tDate0, false, true, tDate0},
{sDate0, false, true, tDate0},
{[]byte(sDate0), false, true, tDate0},
{sDateTime0, false, true, tDate0},
{[]byte(sDateTime0), false, true, tDate0},
{"", true, false, tDate0},
{"1234", true, false, tDate0},
{0, true, false, tDate0},
}
var nt = NullTime{}
var err error
for _, tst := range scanTests {
err = nt.Scan(tst.in)
if (err != nil) != tst.error {
t.Errorf("%v: expected error status %t, got %t", tst.in, tst.error, (err != nil))
}
if nt.Valid != tst.valid {
t.Errorf("%v: expected valid status %t, got %t", tst.in, tst.valid, nt.Valid)
}
if nt.Time != tst.time {
t.Errorf("%v: expected time %v, got %v", tst.in, tst.time, nt.Time)
}
}
}
func TestLengthEncodedInteger(t *testing.T) {
var integerTests = []struct {
num uint64
@ -93,25 +53,6 @@ func TestLengthEncodedInteger(t *testing.T) {
}
}
func TestOldPass(t *testing.T) {
scramble := []byte{9, 8, 7, 6, 5, 4, 3, 2}
vectors := []struct {
pass string
out string
}{
{" pass", "47575c5a435b4251"},
{"pass ", "47575c5a435b4251"},
{"123\t456", "575c47505b5b5559"},
{"C0mpl!ca ted#PASS123", "5d5d554849584a45"},
}
for _, tuple := range vectors {
ours := scrambleOldPassword(scramble, []byte(tuple.pass))
if tuple.out != fmt.Sprintf("%x", ours) {
t.Errorf("Failed old password %q", tuple.pass)
}
}
}
func TestFormatBinaryDateTime(t *testing.T) {
rawDate := [11]byte{}
binary.LittleEndian.PutUint16(rawDate[:2], 1978) // years
@ -122,7 +63,7 @@ func TestFormatBinaryDateTime(t *testing.T) {
rawDate[6] = 23 // seconds
binary.LittleEndian.PutUint32(rawDate[7:], 987654) // microseconds
expect := func(expected string, inlen, outlen uint8) {
actual, _ := formatBinaryDateTime(rawDate[:inlen], outlen, false)
actual, _ := formatBinaryDateTime(rawDate[:inlen], outlen)
bytes, ok := actual.([]byte)
if !ok {
t.Errorf("formatBinaryDateTime must return []byte, was %T", actual)
@ -130,7 +71,7 @@ func TestFormatBinaryDateTime(t *testing.T) {
if string(bytes) != expected {
t.Errorf(
"expected %q, got %q for length in %d, out %d",
bytes, actual, inlen, outlen,
expected, actual, inlen, outlen,
)
}
}
@ -141,6 +82,41 @@ func TestFormatBinaryDateTime(t *testing.T) {
expect("1978-12-30 15:46:23.987654", 11, 26)
}
func TestFormatBinaryTime(t *testing.T) {
expect := func(expected string, src []byte, outlen uint8) {
actual, _ := formatBinaryTime(src, outlen)
bytes, ok := actual.([]byte)
if !ok {
t.Errorf("formatBinaryDateTime must return []byte, was %T", actual)
}
if string(bytes) != expected {
t.Errorf(
"expected %q, got %q for src=%q and outlen=%d",
expected, actual, src, outlen)
}
}
// binary format:
// sign (0: positive, 1: negative), days(4), hours, minutes, seconds, micro(4)
// Zeros
expect("00:00:00", []byte{}, 8)
expect("00:00:00.0", []byte{}, 10)
expect("00:00:00.000000", []byte{}, 15)
// Without micro(4)
expect("12:34:56", []byte{0, 0, 0, 0, 0, 12, 34, 56}, 8)
expect("-12:34:56", []byte{1, 0, 0, 0, 0, 12, 34, 56}, 8)
expect("12:34:56.00", []byte{0, 0, 0, 0, 0, 12, 34, 56}, 11)
expect("24:34:56", []byte{0, 1, 0, 0, 0, 0, 34, 56}, 8)
expect("-99:34:56", []byte{1, 4, 0, 0, 0, 3, 34, 56}, 8)
expect("103079215103:34:56", []byte{0, 255, 255, 255, 255, 23, 34, 56}, 8)
// With micro(4)
expect("12:34:56.00", []byte{0, 0, 0, 0, 0, 12, 34, 56, 99, 0, 0, 0}, 11)
expect("12:34:56.000099", []byte{0, 0, 0, 0, 0, 12, 34, 56, 99, 0, 0, 0}, 15)
}
func TestEscapeBackslash(t *testing.T) {
expect := func(expected, value string) {
actual := string(escapeBytesBackslash([]byte{}, []byte(value)))
@ -275,3 +251,43 @@ func TestAtomicError(t *testing.T) {
t.Fatal("Error did not match")
}
}
func TestIsolationLevelMapping(t *testing.T) {
data := []struct {
level driver.IsolationLevel
expected string
}{
{
level: driver.IsolationLevel(sql.LevelReadCommitted),
expected: "READ COMMITTED",
},
{
level: driver.IsolationLevel(sql.LevelRepeatableRead),
expected: "REPEATABLE READ",
},
{
level: driver.IsolationLevel(sql.LevelReadUncommitted),
expected: "READ UNCOMMITTED",
},
{
level: driver.IsolationLevel(sql.LevelSerializable),
expected: "SERIALIZABLE",
},
}
for i, td := range data {
if actual, err := mapIsolationLevel(td.level); actual != td.expected || err != nil {
t.Fatal(i, td.expected, actual, err)
}
}
// check unsupported mapping
expectedErr := "mysql: unsupported isolation level: 7"
actual, err := mapIsolationLevel(driver.IsolationLevel(sql.LevelLinearizable))
if actual != "" || err == nil {
t.Fatal("Expected error on unsupported isolation level")
}
if err.Error() != expectedErr {
t.Fatalf("Expected error to be %q, got %q", expectedErr, err)
}
}

View File

@ -193,6 +193,16 @@ func (s *upgradeController) upgradeOne(app v1.AppService) error {
return fmt.Errorf("upgrade deployment %s failure %s", app.ServiceAlias, err.Error())
}
}
// create claims
for _, claim := range app.GetClaimsManually() {
logrus.Debugf("create claim: %s", claim.Name)
_, err := s.manager.client.CoreV1().PersistentVolumeClaims(app.TenantID).Create(claim)
if err != nil && !errors.IsAlreadyExists(err) {
return fmt.Errorf("create claims: %v", err)
}
}
if statefulset := app.GetStatefulSet(); statefulset != nil {
_, err = s.manager.client.AppsV1().StatefulSets(statefulset.Namespace).Patch(statefulset.Name, types.MergePatchType, app.UpgradePatch["statefulset"])
if err != nil {

View File

@ -87,8 +87,7 @@ func (p *rainbondssscProvisioner) Provision(options controller.VolumeOptions) (*
return nil, err
}
// new volume path
newPath := strings.Replace(hostpath, "/grdata", options.NFS.Path, 1)
persistentVolumeSource, err := updatePathForPersistentVolumeSource(&options.PersistentVolumeSource, newPath)
persistentVolumeSource, err := updatePathForPersistentVolumeSource(&options.PersistentVolumeSource, hostpath)
if err != nil {
return nil, err
}
@ -147,19 +146,27 @@ func getVolumeIDByPVCName(pvcName string) int {
return 0
}
func updatePathForPersistentVolumeSource(persistentVolumeSource *v1.PersistentVolumeSource, newPath string) (*v1.PersistentVolumeSource, error) {
func updatePathForPersistentVolumeSource(persistentVolumeSource *v1.PersistentVolumeSource, hostpath string) (*v1.PersistentVolumeSource, error) {
newPath := func(new string) string {
p := strings.Replace(hostpath, "/grdata", "", 1)
return path.Join(new, p)
}
source := &v1.PersistentVolumeSource{}
switch {
case persistentVolumeSource.NFS != nil:
persistentVolumeSource.NFS.Path = newPath
source.NFS = persistentVolumeSource.NFS
source.NFS.Path = newPath(persistentVolumeSource.NFS.Path)
case persistentVolumeSource.CSI != nil:
if persistentVolumeSource.CSI.VolumeAttributes != nil {
persistentVolumeSource.CSI.VolumeAttributes["path"] = newPath
source.CSI = persistentVolumeSource.CSI
source.CSI.VolumeAttributes["path"] = newPath(persistentVolumeSource.CSI.VolumeAttributes["path"])
}
case persistentVolumeSource.Glusterfs != nil:
//glusterfs:
// endpoints: glusterfs-cluster
// path: myVol1
persistentVolumeSource.Glusterfs.Path = newPath
source.Glusterfs.Path = persistentVolumeSource.Glusterfs.Path
source.Glusterfs.Path = newPath(persistentVolumeSource.Glusterfs.Path)
default:
return nil, fmt.Errorf("unsupported persistence volume source")
}

View File

@ -499,35 +499,43 @@ func (r *RuntimeServer) GetAppVolumeStatus(ctx context.Context, re *pb.ServiceRe
if as == nil {
return ret, nil
}
appPodList, err := r.GetAppPods(ctx, re)
if err != nil {
logrus.Warnf("get volume status error : %s", err.Error())
return ret, nil
// get component all pods
pods := as.GetPods(false)
for _, pod := range pods {
// if pod is terminated, volume status of pod is NOT_READY
if v1.IsPodTerminated(pod) {
continue
}
for _, pod := range appPodList.GetNewPods() {
for _, volumeName := range pod.PodVolumes {
prefix := "manual"
if strings.HasPrefix(volumeName, prefix) {
volumeName = strings.TrimPrefix(volumeName, prefix)
if pod.GetPodStatus() != pb.PodStatus_RUNNING.String() {
ret.Status[volumeName] = pb.ServiceVolumeStatus_NOT_READY // volumeName tranfer to serviceVolume's id in db
} else {
ret.Status[volumeName] = pb.ServiceVolumeStatus_READY // volumeName tranfer to serviceVolume's id in db
// Exception pod information due to node loss is no longer displayed, so volume status is NOT_READY
if v1.IsPodNodeLost(pod) {
continue
}
}
}
}
podStatus := &pb.PodStatus{}
wutil.DescribePodStatus(r.clientset, pod, podStatus, k8s.DefListEventsByPod)
for _, pod := range appPodList.GetOldPods() {
for _, volumeName := range pod.PodVolumes {
prefix := "manual"
for _, volume := range pod.Spec.Volumes {
volumeName := volume.Name
prefix := "manual" // all volume name start with manual but config file, volume name style: fmt.Sprintf("manual%d", TenantServiceVolume.ID)
if strings.HasPrefix(volumeName, prefix) {
volumeName = strings.TrimPrefix(volumeName, prefix)
if pod.GetPodStatus() != pb.PodStatus_RUNNING.String() {
ret.Status[volumeName] = pb.ServiceVolumeStatus_NOT_READY // volumeName tranfer to serviceVolume's id in db
} else {
ret.Status[volumeName] = pb.ServiceVolumeStatus_READY // volumeName tranfer to serviceVolume's id in db
switch podStatus.Type {
case pb.PodStatus_SCHEDULING:
// pod can't bind volume
ret.Status[volumeName] = pb.ServiceVolumeStatus_NOT_READY
case pb.PodStatus_UNKNOWN:
// pod status is unknown
ret.Status[volumeName] = pb.ServiceVolumeStatus_NOT_READY
case pb.PodStatus_INITIATING:
// pod status is unknown
ret.Status[volumeName] = pb.ServiceVolumeStatus_READY
if pod.Status.Phase == corev1.PodPending {
ret.Status[volumeName] = pb.ServiceVolumeStatus_NOT_READY
}
case pb.PodStatus_RUNNING, pb.PodStatus_ABNORMAL, pb.PodStatus_NOTREADY, pb.PodStatus_UNHEALTHY:
// pod is running
ret.Status[volumeName] = pb.ServiceVolumeStatus_READY
}
}
}

View File

@ -5,14 +5,11 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
"reflect"
"testing"
"time"
"github.com/eapache/channels"
"github.com/golang/mock/gomock"
"github.com/goodrain/rainbond/cmd/worker/option"
"github.com/goodrain/rainbond/db"
"github.com/goodrain/rainbond/db/config"
"github.com/goodrain/rainbond/worker/appm/store"
v1 "github.com/goodrain/rainbond/worker/appm/types/v1"
"github.com/goodrain/rainbond/worker/server/pb"
@ -158,93 +155,289 @@ func TestListEvents(t *testing.T) {
t.Logf("pod events: %v", podEvents)
}
func TestGetStorageClass(t *testing.T) {
c, err := clientcmd.BuildConfigFromFlags("", "/Users/fanyangyang/Documents/company/goodrain/admin.kubeconfig")
if err != nil {
t.Fatalf("read kube config file error: %v", err)
}
clientset, err := kubernetes.NewForConfig(c)
if err != nil {
t.Fatalf("create kube api client error: %v", err)
}
s := store.NewStore(clientset, nil, option.Config{}, nil, nil)
stes := s.GetStorageClasses()
storageclasses := new(pb.StorageClasses)
if stes != nil {
for _, st := range stes {
var allowTopologies []*pb.TopologySelectorTerm
for _, topologySelectorTerm := range st.AllowedTopologies {
var expressions []*pb.TopologySelectorLabelRequirement
for _, value := range topologySelectorTerm.MatchLabelExpressions {
expressions = append(expressions, &pb.TopologySelectorLabelRequirement{Key: value.Key, Values: value.Values})
}
allowTopologies = append(allowTopologies, &pb.TopologySelectorTerm{MatchLabelExpressions: expressions})
}
var allowVolumeExpansion bool
if st.AllowVolumeExpansion == nil {
allowVolumeExpansion = false
} else {
allowVolumeExpansion = *st.AllowVolumeExpansion
}
storageclasses.List = append(storageclasses.List, &pb.StorageClassDetail{
Name: st.Name,
Provisioner: st.Provisioner,
ReclaimPolicy: st.ReclaimPolicy,
AllowVolumeExpansion: allowVolumeExpansion,
VolumeBindingMode: st.VolumeBindingMode,
AllowedTopologies: allowTopologies,
func TestRuntimeServer_GetAppVolumeStatus(t *testing.T) {
tests := []struct {
name string
re *pb.ServiceRequest
as *v1.AppService
want *pb.ServiceVolumeStatusMessage
wantErr bool
}{
{
name: "running pod",
re: &pb.ServiceRequest{
ServiceId: "serviceID",
},
as: func() *v1.AppService {
as := &v1.AppService{}
as.SetPods(&corev1.Pod{
Spec: corev1.PodSpec{
Volumes: []corev1.Volume{
{
Name: "manual1",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{},
},
},
{
Name: "default-token-xwxgv",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{},
},
},
},
},
Status: corev1.PodStatus{
Phase: corev1.PodRunning,
},
})
t.Logf("allowVolumeExpansion is : %v", allowVolumeExpansion)
}
}
return as
}(),
want: &pb.ServiceVolumeStatusMessage{
Status: map[string]pb.ServiceVolumeStatus{"1": pb.ServiceVolumeStatus_READY},
},
wantErr: false,
},
{
name: "PodStatus_PENDING",
re: &pb.ServiceRequest{
ServiceId: "serviceID",
},
as: func() *v1.AppService {
as := &v1.AppService{}
as.SetPods(&corev1.Pod{
Spec: corev1.PodSpec{
Volumes: []corev1.Volume{
{
Name: "manual1",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{},
},
},
{
Name: "default-token-xwxgv",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{},
},
},
},
},
Status: corev1.PodStatus{
Phase: corev1.PodPending,
ContainerStatuses: []corev1.ContainerStatus{
{
Ready: false,
},
},
},
})
return as
}(),
want: &pb.ServiceVolumeStatusMessage{
Status: map[string]pb.ServiceVolumeStatus{"1": pb.ServiceVolumeStatus_NOT_READY},
},
wantErr: false,
},
{
name: "PodStatus_INITIATING",
re: &pb.ServiceRequest{
ServiceId: "serviceID",
},
as: func() *v1.AppService {
as := &v1.AppService{}
as.SetPods(&corev1.Pod{
Spec: corev1.PodSpec{
Volumes: []corev1.Volume{
{
Name: "manual1",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{},
},
},
{
Name: "default-token-xwxgv",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{},
},
},
},
},
Status: corev1.PodStatus{
Phase: corev1.PodPending,
Conditions: []corev1.PodCondition{
{
Type: corev1.PodInitialized,
Status: corev1.ConditionTrue,
},
},
ContainerStatuses: []corev1.ContainerStatus{
{
Ready: false,
},
},
},
})
return as
}(),
want: &pb.ServiceVolumeStatusMessage{
Status: map[string]pb.ServiceVolumeStatus{"1": pb.ServiceVolumeStatus_READY},
},
wantErr: false,
},
{
name: "PodStatus_ABNORMAL",
re: &pb.ServiceRequest{
ServiceId: "serviceID",
},
as: func() *v1.AppService {
as := &v1.AppService{}
as.SetPods(&corev1.Pod{
Spec: corev1.PodSpec{
Volumes: []corev1.Volume{
{
Name: "manual1",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{},
},
},
{
Name: "default-token-xwxgv",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{},
},
},
},
},
Status: corev1.PodStatus{
Phase: corev1.PodRunning,
Conditions: []corev1.PodCondition{
{
Type: corev1.PodReady,
Status: corev1.ConditionTrue,
},
},
ContainerStatuses: []corev1.ContainerStatus{
{
Ready: false,
State: corev1.ContainerState{
Waiting: nil,
Running: nil,
Terminated: &corev1.ContainerStateTerminated{},
},
},
},
},
})
return as
}(),
want: &pb.ServiceVolumeStatusMessage{
Status: map[string]pb.ServiceVolumeStatus{"1": pb.ServiceVolumeStatus_READY},
},
wantErr: false,
},
{
name: "PodStatus_NOTREADY",
re: &pb.ServiceRequest{
ServiceId: "serviceID",
},
as: func() *v1.AppService {
as := &v1.AppService{}
as.SetPods(&corev1.Pod{
Spec: corev1.PodSpec{
Volumes: []corev1.Volume{
{
Name: "manual1",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{},
},
},
{
Name: "default-token-xwxgv",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{},
},
},
},
},
Status: corev1.PodStatus{
Phase: corev1.PodRunning,
Conditions: []corev1.PodCondition{
{
Type: corev1.PodReady,
Status: corev1.ConditionTrue,
},
},
ContainerStatuses: []corev1.ContainerStatus{},
},
})
return as
}(),
want: &pb.ServiceVolumeStatusMessage{
Status: map[string]pb.ServiceVolumeStatus{"1": pb.ServiceVolumeStatus_READY},
},
wantErr: false,
},
{
name: "PodStatus_SCHEDULING",
re: &pb.ServiceRequest{
ServiceId: "serviceID",
},
as: func() *v1.AppService {
as := &v1.AppService{}
as.SetPods(&corev1.Pod{
Spec: corev1.PodSpec{
Volumes: []corev1.Volume{
{
Name: "manual1",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{},
},
},
{
Name: "default-token-xwxgv",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{},
},
},
},
},
Status: corev1.PodStatus{
Phase: corev1.PodPending,
Conditions: []corev1.PodCondition{
{
Type: corev1.PodScheduled,
Status: corev1.ConditionFalse,
},
},
},
})
return as
}(),
want: &pb.ServiceVolumeStatusMessage{
Status: map[string]pb.ServiceVolumeStatus{"1": pb.ServiceVolumeStatus_NOT_READY},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
func TestGetAppVolumeStatus(t *testing.T) {
ocfg := option.Config{
DBType: "mysql",
MysqlConnectionInfo: "oc6Poh:noot6Mea@tcp(192.168.2.203:3306)/region",
EtcdEndPoints: []string{"http://192.168.2.203:2379"},
EtcdTimeout: 5,
KubeConfig: "/Users/fanyangyang/Documents/company/goodrain/admin.kubeconfig",
LeaderElectionNamespace: "rainbond",
}
dbconfig := config.Config{
DBType: ocfg.DBType,
MysqlConnectionInfo: ocfg.MysqlConnectionInfo,
EtcdEndPoints: ocfg.EtcdEndPoints,
EtcdTimeout: ocfg.EtcdTimeout,
}
//step 1:db manager init ,event log client init
if err := db.CreateManager(dbconfig); err != nil {
t.Fatalf("error creating db manager: %v", err)
}
defer db.CloseManager()
c, err := clientcmd.BuildConfigFromFlags("", ocfg.KubeConfig)
if err != nil {
t.Fatalf("read kube config file error: %v", err)
}
clientset, err := kubernetes.NewForConfig(c)
if err != nil {
t.Fatalf("create kube api client error: %v", err)
}
startCh := channels.NewRingChannel(1024)
probeCh := channels.NewRingChannel(1024)
storer := store.NewStore(clientset, db.GetManager(), option.Config{LeaderElectionNamespace: ocfg.LeaderElectionNamespace, KubeClient: clientset}, startCh, probeCh)
if err := storer.Start(); err != nil {
t.Fatalf("error starting store: %v", err)
}
server := &RuntimeServer{
storer := store.NewMockStorer(ctrl)
r := &RuntimeServer{
store: storer,
clientset: clientset,
}
statusList, err := server.GetAppVolumeStatus(context.Background(), &pb.ServiceRequest{ServiceId: "c5802cd0276018209ff1f9b52bc04ec1"})
if err != nil {
t.Fatal(err)
storer.EXPECT().GetAppService(tt.re.ServiceId).Return(tt.as)
got, err := r.GetAppVolumeStatus(context.Background(), tt.re)
if (err != nil) != tt.wantErr {
t.Errorf("GetAppVolumeStatus() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("GetAppVolumeStatus() got = %v, want %v", got, tt.want)
}
})
}
t.Log(statusList.GetStatus())
t.Log("end")
time.Sleep(20 * time.Second) // db woulld close
}