add panic if internal watcher creation fails for gfsnotify; improve codes and change comment from chinese to english for gdb

This commit is contained in:
John 2020-02-10 20:37:53 +08:00
parent 784983806a
commit 88684ca00a
12 changed files with 344 additions and 188 deletions

View File

@ -184,7 +184,7 @@ var (
// The parameter <name> specifies the configuration group name,
// which is DEFAULT_GROUP_NAME in default.
func New(name ...string) (db DB, err error) {
group := configs.defaultGroup
group := configs.group
if len(name) > 0 && name[0] != "" {
group = name[0]
}
@ -232,7 +232,7 @@ func New(name ...string) (db DB, err error) {
// The parameter <name> specifies the configuration group name,
// which is DEFAULT_GROUP_NAME in default.
func Instance(name ...string) (db DB, err error) {
group := configs.defaultGroup
group := configs.group
if len(name) > 0 && name[0] != "" {
group = name[0]
}

View File

@ -192,7 +192,9 @@ func (bs *dbBase) GetStructs(pointer interface{}, query string, args ...interfac
return all.Structs(pointer)
}
// GetScan queries one or more records from database and converts them to given struct.
// GetScan queries one or more records from database and converts them to given struct or
// struct array.
//
// If parameter <pointer> is type of struct pointer, it calls GetStruct internally for
// the conversion. If parameter <pointer> is type of slice, it calls GetStructs internally
// for conversion.
@ -240,7 +242,7 @@ func (bs *dbBase) GetCount(query string, args ...interface{}) (int, error) {
return value.Int(), nil
}
// ping一下判断或保持数据库链接(master)
// PingMaster pings the master node to check authentication or keeps the connection alive.
func (bs *dbBase) PingMaster() error {
if master, err := bs.db.Master(); err != nil {
return err
@ -249,7 +251,7 @@ func (bs *dbBase) PingMaster() error {
}
}
// ping一下判断或保持数据库链接(slave)
// PingSlave pings the slave node to check authentication or keeps the connection alive.
func (bs *dbBase) PingSlave() error {
if slave, err := bs.db.Slave(); err != nil {
return err
@ -258,8 +260,10 @@ func (bs *dbBase) PingSlave() error {
}
}
// 事务操作,开启,会返回一个底层的事务操作对象链接如需要嵌套事务,那么可以使用该对象,否则请忽略
// 只有在tx.Commit/tx.Rollback时链接会自动Close
// Begin starts and returns the transaction object.
// You should call Commit or Rollback functions of the transaction object
// if you no longer use the transaction. Commit or Rollback functions will also
// close the transaction automatically.
func (bs *dbBase) Begin() (*TX, error) {
if master, err := bs.db.Master(); err != nil {
return nil, err
@ -276,46 +280,76 @@ func (bs *dbBase) Begin() (*TX, error) {
}
}
// CURD操作:单条数据写入, 仅仅执行写入操作,如果存在冲突的主键或者唯一索引,那么报错返回。
// 参数data支持map/struct/*struct/slice类型
// 当为slice(例如[]map/[]struct/[]*struct)类型时batch参数生效并自动切换为批量操作。
// Insert does "INSERT INTO ..." statement for the table.
// If there's already one unique record of the data in the table, it returns error.
//
// The parameter <data> can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
// Eg:
// Data(g.Map{"uid": 10000, "name":"john"})
// Data(g.Slice{g.Map{"uid": 10000, "name":"john"}, g.Map{"uid": 20000, "name":"smith"})
//
// The parameter <batch> specifies the batch operation count when given data is slice.
func (bs *dbBase) Insert(table string, data interface{}, batch ...int) (sql.Result, error) {
return bs.db.doInsert(nil, table, data, gINSERT_OPTION_DEFAULT, batch...)
}
// InsertIgnore does "INSERT IGNORE INTO ..." statement for the table.
// If there's already one unique record of the data in the table, it ignores the inserting.
//
// The parameter <data> can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
// Eg:
// Data(g.Map{"uid": 10000, "name":"john"})
// Data(g.Slice{g.Map{"uid": 10000, "name":"john"}, g.Map{"uid": 20000, "name":"smith"})
//
// The parameter <batch> specifies the batch operation count when given data is slice.
func (bs *dbBase) InsertIgnore(table string, data interface{}, batch ...int) (sql.Result, error) {
return bs.db.doInsert(nil, table, data, gINSERT_OPTION_IGNORE, batch...)
}
// CURD操作:单条数据写入, 如果数据存在(主键或者唯一索引),那么删除后重新写入一条。
// 参数data支持map/struct/*struct/slice类型
// 当为slice(例如[]map/[]struct/[]*struct)类型时batch参数生效并自动切换为批量操作。
// Replace does "REPLACE INTO ..." statement for the table.
// If there's already one unique record of the data in the table, it deletes the record
// and inserts a new one.
//
// The parameter <data> can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
// Eg:
// Data(g.Map{"uid": 10000, "name":"john"})
// Data(g.Slice{g.Map{"uid": 10000, "name":"john"}, g.Map{"uid": 20000, "name":"smith"})
//
// The parameter <data> can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
// If given data is type of slice, it then does batch replacing, and the optional parameter
// <batch> specifies the batch operation count.
func (bs *dbBase) Replace(table string, data interface{}, batch ...int) (sql.Result, error) {
return bs.db.doInsert(nil, table, data, gINSERT_OPTION_REPLACE, batch...)
}
// CURD操作:单条数据写入, 如果数据存在(主键或者唯一索引),那么更新,否则写入一条新数据。
// 参数data支持map/struct/*struct/slice类型
// 当为slice(例如[]map/[]struct/[]*struct)类型时batch参数生效并自动切换为批量操作。
// Save does "INSERT INTO ... ON DUPLICATE KEY UPDATE..." statement for the table.
// It updates the record if there's primary or unique index in the saving data,
// or else it inserts a new record into the table.
//
// The parameter <data> can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
// Eg:
// Data(g.Map{"uid": 10000, "name":"john"})
// Data(g.Slice{g.Map{"uid": 10000, "name":"john"}, g.Map{"uid": 20000, "name":"smith"})
//
// If given data is type of slice, it then does batch saving, and the optional parameter
// <batch> specifies the batch operation count.
func (bs *dbBase) Save(table string, data interface{}, batch ...int) (sql.Result, error) {
return bs.db.doInsert(nil, table, data, gINSERT_OPTION_SAVE, batch...)
}
// 支持insert、replace, save ignore操作。
// 0: insert: 仅仅执行写入操作,如果存在冲突的主键或者唯一索引,那么报错返回;
// 1: replace: 如果数据存在(主键或者唯一索引),那么删除后重新写入一条;
// 2: save: 如果数据存在(主键或者唯一索引),那么更新,否则写入一条新数据;
// 3: ignore: 如果数据存在(主键或者唯一索引),那么什么也不做;
// doInsert inserts or updates data for given table.
//
// 参数data支持map/struct/*struct/slice类型
// 当为slice(例如[]map/[]struct/[]*struct)类型时batch参数生效并自动切换为批量操作。
// The parameter <option> values are as follows:
// 0: insert: just insert, if there's unique/primary key in the data, it returns error;
// 1: replace: if there's unique/primary key in the data, it deletes it from table and inserts a new one;
// 2: save: if there's unique/primary key in the data, it updates it or else inserts a new one;
// 3: ignore: if there's unique/primary key in the data, it ignores the inserting;
func (bs *dbBase) doInsert(link dbLink, table string, data interface{}, option int, batch ...int) (result sql.Result, err error) {
var fields []string
var values []string
var params []interface{}
var dataMap Map
table = bs.db.handleTableName(table)
// 使用反射判断data数据类型如果为slice类型那么自动转为批量操作
rv := reflect.ValueOf(data)
kind := rv.Kind()
if kind == reflect.Ptr {
@ -364,22 +398,31 @@ func (bs *dbBase) doInsert(link dbLink, table string, data interface{}, option i
params...)
}
// CURD操作:批量数据指定批次量写入
// BatchInsert batch inserts data.
// The parameter <list> must be type of slice of map or struct.
func (bs *dbBase) BatchInsert(table string, list interface{}, batch ...int) (sql.Result, error) {
return bs.db.doBatchInsert(nil, table, list, gINSERT_OPTION_DEFAULT, batch...)
}
// CURD操作:批量数据指定批次量写入, 如果数据存在(主键或者唯一索引),那么删除后重新写入一条
// BatchInsert batch inserts data with ignore option.
// The parameter <list> must be type of slice of map or struct.
func (bs *dbBase) BatchInsertIgnore(table string, list interface{}, batch ...int) (sql.Result, error) {
return bs.db.doBatchInsert(nil, table, list, gINSERT_OPTION_IGNORE, batch...)
}
// BatchReplace batch replaces data.
// The parameter <list> must be type of slice of map or struct.
func (bs *dbBase) BatchReplace(table string, list interface{}, batch ...int) (sql.Result, error) {
return bs.db.doBatchInsert(nil, table, list, gINSERT_OPTION_REPLACE, batch...)
}
// CURD操作:批量数据指定批次量写入, 如果数据存在(主键或者唯一索引),那么更新,否则写入一条新数据
// BatchSave batch replaces data.
// The parameter <list> must be type of slice of map or struct.
func (bs *dbBase) BatchSave(table string, list interface{}, batch ...int) (sql.Result, error) {
return bs.db.doBatchInsert(nil, table, list, gINSERT_OPTION_SAVE, batch...)
}
// 批量写入数据, 参数list支持slice类型例如: []map/[]struct/[]*struct。
// doBatchInsert batch inserts/replaces/saves data.
func (bs *dbBase) doBatchInsert(link dbLink, table string, list interface{}, option int, batch ...int) (result sql.Result, err error) {
var keys, values []string
var params []interface{}
@ -448,7 +491,6 @@ func (bs *dbBase) doBatchInsert(link dbLink, table string, list interface{}, opt
}
updateStr = fmt.Sprintf("ON DUPLICATE KEY UPDATE %s", updateStr)
}
// 构造批量写入数据格式(注意map的遍历是无序的)
batchNum := gDEFAULT_BATCH_NUM
if len(batch) > 0 && batch[0] > 0 {
batchNum = batch[0]
@ -490,8 +532,20 @@ func (bs *dbBase) doBatchInsert(link dbLink, table string, list interface{}, opt
return batchResult, nil
}
// CURD操作:数据更新统一采用sql预处理。
// data参数支持string/map/struct/*struct类型。
// Update does "UPDATE ... " statement for the table.
//
// The parameter <data> can be type of string/map/gmap/struct/*struct, etc.
// Eg: "uid=10000", "uid", 10000, g.Map{"uid": 10000, "name":"john"}
//
// The parameter <condition> can be type of string/map/gmap/slice/struct/*struct, etc.
// It is commonly used with parameter <args>.
// Eg:
// "uid=10000",
// "uid", 10000
// "money>? AND name like ?", 99999, "vip_%"
// "status IN (?)", g.Slice{1,2,3}
// "age IN(?,?)", 18, 50
// User{ Id : 1, UserName : "john"}
func (bs *dbBase) Update(table string, data interface{}, condition interface{}, args ...interface{}) (sql.Result, error) {
newWhere, newArgs := formatWhere(bs.db, condition, args, false)
if newWhere != "" {
@ -500,12 +554,11 @@ func (bs *dbBase) Update(table string, data interface{}, condition interface{},
return bs.db.doUpdate(nil, table, data, newWhere, newArgs...)
}
// CURD操作:数据更新统一采用sql预处理。
// data参数支持string/map/struct/*struct类型类型。
// doUpdate does "UPDATE ... " statement for the table.
// Also see Update.
func (bs *dbBase) doUpdate(link dbLink, table string, data interface{}, condition string, args ...interface{}) (result sql.Result, err error) {
table = bs.db.handleTableName(table)
updates := ""
// 使用反射进行类型判断
rv := reflect.ValueOf(data)
kind := rv.Kind()
if kind == reflect.Ptr {
@ -539,7 +592,17 @@ func (bs *dbBase) doUpdate(link dbLink, table string, data interface{}, conditio
return bs.db.doExec(link, fmt.Sprintf("UPDATE %s SET %s%s", table, updates, condition), args...)
}
// CURD操作:删除数据
// Delete does "DELETE FROM ... " statement for the table.
//
// The parameter <condition> can be type of string/map/gmap/slice/struct/*struct, etc.
// It is commonly used with parameter <args>.
// Eg:
// "uid=10000",
// "uid", 10000
// "money>? AND name like ?", 99999, "vip_%"
// "status IN (?)", g.Slice{1,2,3}
// "age IN(?,?)", 18, 50
// User{ Id : 1, UserName : "john"}
func (bs *dbBase) Delete(table string, condition interface{}, args ...interface{}) (result sql.Result, err error) {
newWhere, newArgs := formatWhere(bs.db, condition, args, false)
if newWhere != "" {
@ -548,7 +611,8 @@ func (bs *dbBase) Delete(table string, condition interface{}, args ...interface{
return bs.db.doDelete(nil, table, newWhere, newArgs...)
}
// CURD操作:删除数据
// doDelete does "DELETE FROM ... " statement for the table.
// Also see Delete.
func (bs *dbBase) doDelete(link dbLink, table string, condition string, args ...interface{}) (result sql.Result, err error) {
if link == nil {
if link, err = bs.db.Master(); err != nil {
@ -559,36 +623,35 @@ func (bs *dbBase) doDelete(link dbLink, table string, condition string, args ...
return bs.db.doExec(link, fmt.Sprintf("DELETE FROM %s%s", table, condition), args...)
}
// 获得缓存对象
// getCache returns the internal cache object.
func (bs *dbBase) getCache() *gcache.Cache {
return bs.cache
}
// 获得表名前缀
// getPrefix returns the table prefix string configured.
func (bs *dbBase) getPrefix() string {
return bs.prefix
}
// 将数据查询的列表数据*sql.Rows转换为Result类型
// rowsToResult converts underlying data record type sql.Rows to Result type.
func (bs *dbBase) rowsToResult(rows *sql.Rows) (Result, error) {
if !rows.Next() {
return nil, nil
}
// 列信息列表, 名称与类型
columnTypes, err := rows.ColumnTypes()
// Column names and types.
columns, err := rows.ColumnTypes()
if err != nil {
return nil, err
}
types := make([]string, len(columnTypes))
columns := make([]string, len(columnTypes))
for k, v := range columnTypes {
types[k] = v.DatabaseTypeName()
columns[k] = v.Name()
columnTypes := make([]string, len(columns))
columnNames := make([]string, len(columns))
for k, v := range columns {
columnTypes[k] = v.DatabaseTypeName()
columnNames[k] = v.Name()
}
// 返回结构组装
values := make([]sql.RawBytes, len(columns))
scanArgs := make([]interface{}, len(values))
values := make([]sql.RawBytes, len(columnNames))
records := make(Result, 0)
scanArgs := make([]interface{}, len(values))
for i := range values {
scanArgs[i] = &values[i]
}
@ -596,17 +659,19 @@ func (bs *dbBase) rowsToResult(rows *sql.Rows) (Result, error) {
if err := rows.Scan(scanArgs...); err != nil {
return records, err
}
// Creates a new row object.
row := make(Record)
// 注意col字段是一个[]byte类型(slice类型本身是一个引用类型)
// 多个记录循环时该变量指向的是同一个内存地址
for i, column := range values {
if column == nil {
row[columns[i]] = gvar.New(nil)
// Note that the internal looping variable <value> is type of []byte,
// which points to the same memory address. So it should do a copy.
for i, value := range values {
if value == nil {
row[columnNames[i]] = gvar.New(nil)
} else {
// 由于 sql.RawBytes 是slice类型, 这里必须使用值复制
v := make([]byte, len(column))
copy(v, column)
row[columns[i]] = gvar.New(bs.db.convertValue(v, types[i]))
// As sql.RawBytes is type of slice,
// it should do a copy of it.
v := make([]byte, len(value))
copy(v, value)
row[columnNames[i]] = gvar.New(bs.db.convertValue(v, columnTypes[i]))
}
}
records = append(records, row)
@ -642,7 +707,8 @@ func (bs *dbBase) quoteString(s string) string {
return doQuoteString(s, charLeft, charRight)
}
// 打印SQL对象(仅在debug=true时有效)
// printSql outputs the sql object to logger.
// It is enabled when configuration "debug" is true.
func (bs *dbBase) printSql(v *Sql) {
s := fmt.Sprintf("[%d ms] %s", v.End-v.Start, v.Format)
if v.Error != nil {

View File

@ -8,7 +8,7 @@ package gdb
import "database/sql"
// 批量执行的结果对象
// batchSqlResult is execution result for batch operations.
type batchSqlResult struct {
rowsAffected int64
lastResult sql.Result

View File

@ -15,48 +15,48 @@ import (
)
const (
DEFAULT_GROUP_NAME = "default" // 默认配置名称
DEFAULT_GROUP_NAME = "default" // Default group name.
)
// 数据库分组配置
// Config is the configuration management object.
type Config map[string]ConfigGroup
// 数据库集群配置
// ConfigGroup is a slice of configuration node for specified named group.
type ConfigGroup []ConfigNode
// 数据库单项配置
// ConfigNode is configuration for one node.
type ConfigNode struct {
Host string // 地址
Port string // 端口
User string // 账号
Pass string // 密码
Name string // 数据库名称
Type string // 数据库类型mysql, sqlite, mssql, pgsql, oracle
Role string // (可选默认为master)数据库的角色用于主从操作分离至少需要有一个master参数值master, slave
Debug bool // (可选)开启调试模式
Prefix string // (可选)表名前缀
Weight int // (可选)用于负载均衡的权重计算,当集群中只有一个节点时,权重没有任何意义
Charset string // (可选,默认为 utf8)编码,默认为 utf8
LinkInfo string // (可选)自定义链接信息,当该字段被设置值时,以上链接字段(Host,Port,User,Pass,Name)将失效(该字段是一个扩展功能)
MaxIdleConnCount int // (可选)连接池最大限制的连接数
MaxOpenConnCount int // (可选)连接池最大打开的连接数
MaxConnLifetime time.Duration // (可选)连接对象可重复使用的时间长度
Host string // Host of server, ip or domain like: 127.0.0.1, localhost
Port string // Port, it's commonly 3306.
User string // Authentication username.
Pass string // Authentication password.
Name string // Default used database name.
Type string // Database type: mysql, sqlite, mssql, pgsql, oracle.
Role string // (Optional, "master" in default) Node role, used for master-slave mode: master, slave.
Debug bool // (Optional) Debug mode enables debug information logging and output.
Prefix string // (Optional) Table prefix.
Weight int // (Optional) Weight for load balance calculating, it's useless if there's just one node.
Charset string // (Optional, "utf8mb4" in default) Custom charset when operating on database.
LinkInfo string // (Optional) Custom link information, when it is used, configuration Host/Port/User/Pass/Name are ignored.
MaxIdleConnCount int // (Optional) Max idle connection configuration for underlying connection pool.
MaxOpenConnCount int // (Optional) Max open connection configuration for underlying connection pool.
MaxConnLifetime time.Duration // (Optional) Max connection TTL configuration for underlying connection pool.
}
// 数据库配置包内对象
// configs is internal used configuration object.
var configs struct {
sync.RWMutex // 并发安全互斥锁
config Config // 数据库分组配置
defaultGroup string // 默认数据库分组名称
sync.RWMutex
config Config // All configurations.
group string // Default configuration group.
}
// 包初始化
func init() {
configs.config = make(Config)
configs.defaultGroup = DEFAULT_GROUP_NAME
configs.group = DEFAULT_GROUP_NAME
}
// 设置当前应用的数据库配置信息,进行全局数据库配置覆盖操作
// SetConfig sets the global configuration for package.
// It will overwrite the old configuration of package.
func SetConfig(config Config) {
defer instances.Clear()
configs.Lock()
@ -64,7 +64,7 @@ func SetConfig(config Config) {
configs.config = config
}
// 按照配置分组设置数据库服务器集群配置
// SetConfigGroup sets the configuration for given group.
func SetConfigGroup(group string, nodes ConfigGroup) {
defer instances.Clear()
configs.Lock()
@ -72,7 +72,7 @@ func SetConfigGroup(group string, nodes ConfigGroup) {
configs.config[group] = nodes
}
// 按照配置分组添加一台数据库服务器配置
// AddConfigNode adds one node configuration to configuration of given group.
func AddConfigNode(group string, node ConfigNode) {
defer instances.Clear()
configs.Lock()
@ -80,66 +80,66 @@ func AddConfigNode(group string, node ConfigNode) {
configs.config[group] = append(configs.config[group], node)
}
// 添加默认链接的一台数据库服务器配置
// AddDefaultConfigNode adds one node configuration to configuration of default group.
func AddDefaultConfigNode(node ConfigNode) {
AddConfigNode(DEFAULT_GROUP_NAME, node)
}
// 添加默认链接的数据库服务器集群配置
// AddDefaultConfigGroup adds multiple node configurations to configuration of default group.
func AddDefaultConfigGroup(nodes ConfigGroup) {
SetConfigGroup(DEFAULT_GROUP_NAME, nodes)
}
// 添加一台数据库服务器配置
// GetConfig retrieves and returns the configuration of given group.
func GetConfig(group string) ConfigGroup {
configs.RLock()
defer configs.RUnlock()
return configs.config[group]
}
// 设置默认链接的数据库链接配置项(默认是 default)
// SetDefaultGroup sets the group name for default configuration.
func SetDefaultGroup(name string) {
defer instances.Clear()
configs.Lock()
defer configs.Unlock()
configs.defaultGroup = name
configs.group = name
}
// 获取默认链接的数据库链接配置项(默认是 default)
// GetDefaultGroup returns the { name of default configuration.
func GetDefaultGroup() string {
defer instances.Clear()
configs.RLock()
defer configs.RUnlock()
return configs.defaultGroup
return configs.group
}
// 设置数据库的日志管理对象
// SetLogger sets the logger for orm.
func (bs *dbBase) SetLogger(logger *glog.Logger) {
bs.logger = logger
}
// 获得数据库Logger对象
// GetLogger returns the logger of the orm.
func (bs *dbBase) GetLogger() *glog.Logger {
return bs.logger
}
// 设置数据库连接池中空闲链接的大小
// SetMaxIdleConnCount sets the max idle connection count for underlying connection pool.
func (bs *dbBase) SetMaxIdleConnCount(n int) {
bs.maxIdleConnCount = n
}
// 设置数据库连接池最大打开的链接数量
// SetMaxOpenConnCount sets the max open connection count for underlying connection pool.
func (bs *dbBase) SetMaxOpenConnCount(n int) {
bs.maxOpenConnCount = n
}
// 设置数据库连接可重复利用的时间,超过该时间则被关闭废弃
// 如果 d <= 0 表示该链接会一直重复利用
// SetMaxConnLifetime sets the connection TTL for underlying connection pool.
// If parameter <d> <= 0, it means the connection never expires.
func (bs *dbBase) SetMaxConnLifetime(d time.Duration) {
bs.maxConnLifetime = d
}
// 节点配置转换为字符串
// String returns the node as string.
func (node *ConfigNode) String() string {
if node.LinkInfo != "" {
return node.LinkInfo
@ -148,11 +148,13 @@ func (node *ConfigNode) String() string {
`%s@%s:%s,%s,%s,%s,%s,%v,%d-%d-%d`,
node.User, node.Host, node.Port,
node.Name, node.Type, node.Role, node.Charset, node.Debug,
node.MaxIdleConnCount, node.MaxOpenConnCount, node.MaxConnLifetime,
node.MaxIdleConnCount,
node.MaxOpenConnCount,
node.MaxConnLifetime,
)
}
// 是否开启调试服务
// SetDebug enables/disables the debug mode.
func (bs *dbBase) SetDebug(debug bool) {
if bs.debug.Val() == debug {
return
@ -160,7 +162,7 @@ func (bs *dbBase) SetDebug(debug bool) {
bs.debug.Set(debug)
}
// 获取是否开启调试服务
// getDebug returns the debug value.
func (bs *dbBase) getDebug() bool {
return bs.debug.Val()
}

View File

@ -174,15 +174,6 @@ func GetPrimaryKeyCondition(primary string, where ...interface{}) (newWhereCondi
return where
}
// 获得orm标签与属性的映射关系
func GetOrmMappingOfStruct(pointer interface{}) map[string]string {
mapping := make(map[string]string)
for tag, attr := range structs.TagMapName(pointer, []string{ORM_TAG_FOR_STRUCT}, true) {
mapping[strings.Split(tag, ",")[0]] = attr
}
return mapping
}
// formatQuery formats the query string and its arguments before executing.
// The internal handleArguments function might be called twice during the SQL procedure,
// but do not worry about it, it's safe and efficient.
@ -483,5 +474,10 @@ func bindArgsToQuery(query string, args []interface{}) string {
// mapToStruct maps the <data> to given struct.
// Note that the given parameter <pointer> should be a pointer to s struct.
func mapToStruct(data map[string]interface{}, pointer interface{}) error {
return gconv.StructDeep(data, pointer, GetOrmMappingOfStruct(pointer))
// It retrieves and returns the mapping between orm tag and the struct attribute name.
mapping := make(map[string]string)
for tag, attr := range structs.TagMapName(pointer, []string{ORM_TAG_FOR_STRUCT}, true) {
mapping[strings.Split(tag, ",")[0]] = attr
}
return gconv.StructDeep(data, pointer, mapping)
}

View File

@ -151,7 +151,6 @@ func (db *dbOracle) TableFields(table string, schema ...string) (fields map[stri
return
}
//查询表的主键及唯一索引并存入缓存中
func (db *dbOracle) getTableUniqueIndex(table string) (fields map[string]map[string]string, err error) {
table = strings.ToUpper(table)
v := db.cache.GetOrSetFunc("table_unique_index_"+table, func() interface{} {
@ -164,7 +163,6 @@ func (db *dbOracle) getTableUniqueIndex(table string) (fields map[string]map[str
if err != nil {
return nil
}
fields := make(map[string]map[string]string)
for _, v := range res {
mm := make(map[string]string)
@ -179,23 +177,11 @@ func (db *dbOracle) getTableUniqueIndex(table string) (fields map[string]map[str
return
}
// 支持insert、replace, save ignore操作。
// 0: insert: 仅仅执行写入操作,如果存在冲突的主键或者唯一索引,那么报错返回;
// 1: replace: 如果数据存在(主键或者唯一索引),那么删除后重新写入一条;
// 2: save: 如果数据存在(主键或者唯一索引),那么更新,否则写入一条新数据;
// 3: ignore: 如果数据存在(主键或者唯一索引),那么什么也不做,需ORACLE 11G以上版本
//
// 注意对于replace/save/ignore操作只支持表中存在一个唯一索引或主键的情况
// 如存在多个唯一索引或主键有可能会执行失败因为这3种类型的操作需指定唯一索引没有唯一索引情况下统一执行insert
//
// 参数data支持map/struct/*struct/slice类型
// 当为slice(例如[]map/[]struct/[]*struct)类型时batch参数生效并自动切换为批量操作。
func (db *dbOracle) doInsert(link dbLink, table string, data interface{}, option int, batch ...int) (result sql.Result, err error) {
var fields []string
var values []string
var params []interface{}
var dataMap Map
// 使用反射判断data数据类型如果为slice类型那么自动转为批量操作
rv := reflect.ValueOf(data)
kind := rv.Kind()
if kind == reflect.Ptr {
@ -245,7 +231,7 @@ func (db *dbOracle) doInsert(link dbLink, table string, data interface{}, option
for k, v := range dataMap {
k = strings.ToUpper(k)
//操作类型为REPLACE/SAVE时且存在唯一索引才使用merge否则使用insert
// 操作类型为REPLACE/SAVE时且存在唯一索引才使用merge否则使用insert
if (option == gINSERT_OPTION_REPLACE || option == gINSERT_OPTION_SAVE) && indexExists {
fields = append(fields, tableAlias1+"."+charL+k+charR)
values = append(values, tableAlias2+"."+charL+k+charR)
@ -277,23 +263,31 @@ func (db *dbOracle) doInsert(link dbLink, table string, data interface{}, option
case gINSERT_OPTION_REPLACE:
fallthrough
case gINSERT_OPTION_SAVE:
tmp := fmt.Sprintf("MERGE INTO %s %s USING(SELECT %s FROM DUAL) %s ON(%s) WHEN MATCHED THEN UPDATE SET %s WHEN NOT MATCHED THEN INSERT (%s) VALUES(%s)",
tmp := fmt.Sprintf(
"MERGE INTO %s %s USING(SELECT %s FROM DUAL) %s ON(%s) WHEN MATCHED THEN UPDATE SET %s WHEN NOT MATCHED THEN INSERT (%s) VALUES(%s)",
table, tableAlias1, strings.Join(subSqlStr, ","), tableAlias2,
strings.Join(onStr, "AND"), strings.Join(updateStr, ","), strings.Join(fields, ","), strings.Join(values, ","))
strings.Join(onStr, "AND"), strings.Join(updateStr, ","), strings.Join(fields, ","), strings.Join(values, ","),
)
return db.db.doExec(link, tmp, params...)
case gINSERT_OPTION_IGNORE:
return db.db.doExec(link,
fmt.Sprintf("INSERT /*+ IGNORE_ROW_ON_DUPKEY_INDEX(%s(%s)) */ INTO %s(%s) VALUES(%s)",
table, strings.Join(indexs, ","), table, strings.Join(fields, ","), strings.Join(values, ",")),
fmt.Sprintf(
"INSERT /*+ IGNORE_ROW_ON_DUPKEY_INDEX(%s(%s)) */ INTO %s(%s) VALUES(%s)",
table, strings.Join(indexs, ","), table, strings.Join(fields, ","), strings.Join(values, ","),
),
params...)
}
}
return db.db.doExec(link, fmt.Sprintf("INSERT INTO %s(%s) VALUES(%s)",
table, strings.Join(fields, ","), strings.Join(values, ",")), params...)
return db.db.doExec(
link,
fmt.Sprintf(
"INSERT INTO %s(%s) VALUES(%s)",
table, strings.Join(fields, ","), strings.Join(values, ","),
),
params...)
}
// 批量写入数据, 参数list支持slice类型例如: []map/[]struct/[]*struct。
func (db *dbOracle) doBatchInsert(link dbLink, table string, list interface{}, option int, batch ...int) (result sql.Result, err error) {
var keys []string
var values []string

View File

@ -14,39 +14,45 @@ import (
"github.com/gogf/gf/text/gregex"
)
// 数据库事务对象
// TX is the struct for transaction management.
type TX struct {
db DB
tx *sql.Tx
master *sql.DB
}
// 事务操作,提交
// Commit commits the transaction.
func (tx *TX) Commit() error {
return tx.tx.Commit()
}
// 事务操作,回滚
// Rollback aborts the transaction.
func (tx *TX) Rollback() error {
return tx.tx.Rollback()
}
// (事务)数据库sql查询操作主要执行查询
// Query does query operation on transaction.
// See dbBase.Query.
func (tx *TX) Query(query string, args ...interface{}) (rows *sql.Rows, err error) {
return tx.db.doQuery(tx.tx, query, args...)
}
// (事务)执行一条sql并返回执行情况主要用于非查询操作
// Exec does none query operation on transaction.
// See dbBase.Exec.
func (tx *TX) Exec(query string, args ...interface{}) (sql.Result, error) {
return tx.db.doExec(tx.tx, query, args...)
}
// sql预处理执行完成后调用返回值sql.Stmt.Exec完成sql操作
// Prepare creates a prepared statement for later queries or executions.
// Multiple queries or executions may be run concurrently from the
// returned statement.
// The caller must call the statement's Close method
// when the statement is no longer needed.
func (tx *TX) Prepare(query string) (*sql.Stmt, error) {
return tx.db.doPrepare(tx.tx, query)
}
// 数据库查询,获取查询结果集,以列表结构返回
// GetAll queries and returns data records from database.
func (tx *TX) GetAll(query string, args ...interface{}) (Result, error) {
rows, err := tx.Query(query, args...)
if err != nil || rows == nil {
@ -56,7 +62,7 @@ func (tx *TX) GetAll(query string, args ...interface{}) (Result, error) {
return tx.db.rowsToResult(rows)
}
// 数据库查询,获取查询结果记录,以关联数组结构返回
// GetOne queries and returns one record from database.
func (tx *TX) GetOne(query string, args ...interface{}) (Record, error) {
list, err := tx.GetAll(query, args...)
if err != nil {
@ -68,7 +74,8 @@ func (tx *TX) GetOne(query string, args ...interface{}) (Record, error) {
return nil, nil
}
// 数据库查询获取查询结果记录自动映射数据到给定的struct对象中
// GetStruct queries one record from database and converts it to given struct.
// The parameter <pointer> should be a pointer to struct.
func (tx *TX) GetStruct(obj interface{}, query string, args ...interface{}) error {
one, err := tx.GetOne(query, args...)
if err != nil {
@ -77,7 +84,8 @@ func (tx *TX) GetStruct(obj interface{}, query string, args ...interface{}) erro
return one.Struct(obj)
}
// 数据库查询查询多条记录并自动转换为指定的slice对象, 如: []struct/[]*struct。
// GetStructs queries records from database and converts them to given struct.
// The parameter <pointer> should be type of struct slice: []struct/[]*struct.
func (tx *TX) GetStructs(objPointerSlice interface{}, query string, args ...interface{}) error {
all, err := tx.GetAll(query, args...)
if err != nil {
@ -86,9 +94,12 @@ func (tx *TX) GetStructs(objPointerSlice interface{}, query string, args ...inte
return all.Structs(objPointerSlice)
}
// 将结果转换为指定的struct/*struct/[]struct/[]*struct,
// 参数应该为指针类型,否则返回失败。
// 该方法自动识别参数类型调用Struct/Structs方法。
// GetScan queries one or more records from database and converts them to given struct or
// struct array.
//
// If parameter <pointer> is type of struct pointer, it calls GetStruct internally for
// the conversion. If parameter <pointer> is type of slice, it calls GetStructs internally
// for conversion.
func (tx *TX) GetScan(objPointer interface{}, query string, args ...interface{}) error {
t := reflect.TypeOf(objPointer)
k := t.Kind()
@ -97,8 +108,7 @@ func (tx *TX) GetScan(objPointer interface{}, query string, args ...interface{})
}
k = t.Elem().Kind()
switch k {
case reflect.Array:
case reflect.Slice:
case reflect.Array, reflect.Slice:
return tx.db.GetStructs(objPointer, query, args...)
case reflect.Struct:
return tx.db.GetStruct(objPointer, query, args...)
@ -108,7 +118,9 @@ func (tx *TX) GetScan(objPointer interface{}, query string, args ...interface{})
return nil
}
// 数据库查询,获取查询字段值
// GetValue queries and returns the field value from database.
// The sql should queries only one field from database, or else it returns only one
// field of the result.
func (tx *TX) GetValue(query string, args ...interface{}) (Value, error) {
one, err := tx.GetOne(query, args...)
if err != nil {
@ -120,7 +132,7 @@ func (tx *TX) GetValue(query string, args ...interface{}) (Value, error) {
return nil, nil
}
// 数据库查询,获取查询数量
// GetCount queries and returns the count from database.
func (tx *TX) GetCount(query string, args ...interface{}) (int, error) {
if !gregex.IsMatchString(`(?i)SELECT\s+COUNT\(.+\)\s+FROM`, query) {
query, _ = gregex.ReplaceString(`(?i)(SELECT)\s+(.+)\s+(FROM)`, `$1 COUNT($2) $3`, query)
@ -132,65 +144,124 @@ func (tx *TX) GetCount(query string, args ...interface{}) (int, error) {
return value.Int(), nil
}
// CURD操作:单条数据写入, 仅仅执行写入操作,如果存在冲突的主键或者唯一索引,那么报错返回
// Insert does "INSERT INTO ..." statement for the table.
// If there's already one unique record of the data in the table, it returns error.
//
// The parameter <data> can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
// Eg:
// Data(g.Map{"uid": 10000, "name":"john"})
// Data(g.Slice{g.Map{"uid": 10000, "name":"john"}, g.Map{"uid": 20000, "name":"smith"})
//
// The parameter <batch> specifies the batch operation count when given data is slice.
func (tx *TX) Insert(table string, data interface{}, batch ...int) (sql.Result, error) {
return tx.db.doInsert(tx.tx, table, data, gINSERT_OPTION_DEFAULT, batch...)
}
// InsertIgnore does "INSERT IGNORE INTO ..." statement for the table.
// If there's already one unique record of the data in the table, it ignores the inserting.
//
// The parameter <data> can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
// Eg:
// Data(g.Map{"uid": 10000, "name":"john"})
// Data(g.Slice{g.Map{"uid": 10000, "name":"john"}, g.Map{"uid": 20000, "name":"smith"})
//
// The parameter <batch> specifies the batch operation count when given data is slice.
func (tx *TX) InsertIgnore(table string, data interface{}, batch ...int) (sql.Result, error) {
return tx.db.doInsert(tx.tx, table, data, gINSERT_OPTION_IGNORE, batch...)
}
// CURD操作:单条数据写入, 如果数据存在(主键或者唯一索引),那么删除后重新写入一条
// Replace does "REPLACE INTO ..." statement for the table.
// If there's already one unique record of the data in the table, it deletes the record
// and inserts a new one.
//
// The parameter <data> can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
// Eg:
// Data(g.Map{"uid": 10000, "name":"john"})
// Data(g.Slice{g.Map{"uid": 10000, "name":"john"}, g.Map{"uid": 20000, "name":"smith"})
//
// The parameter <data> can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
// If given data is type of slice, it then does batch replacing, and the optional parameter
// <batch> specifies the batch operation count.
func (tx *TX) Replace(table string, data interface{}, batch ...int) (sql.Result, error) {
return tx.db.doInsert(tx.tx, table, data, gINSERT_OPTION_REPLACE, batch...)
}
// CURD操作:单条数据写入, 如果数据存在(主键或者唯一索引),那么更新,否则写入一条新数据
// Save does "INSERT INTO ... ON DUPLICATE KEY UPDATE..." statement for the table.
// It updates the record if there's primary or unique index in the saving data,
// or else it inserts a new record into the table.
//
// The parameter <data> can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
// Eg:
// Data(g.Map{"uid": 10000, "name":"john"})
// Data(g.Slice{g.Map{"uid": 10000, "name":"john"}, g.Map{"uid": 20000, "name":"smith"})
//
// If given data is type of slice, it then does batch saving, and the optional parameter
// <batch> specifies the batch operation count.
func (tx *TX) Save(table string, data interface{}, batch ...int) (sql.Result, error) {
return tx.db.doInsert(tx.tx, table, data, gINSERT_OPTION_SAVE, batch...)
}
// CURD操作:批量数据指定批次量写入
// BatchInsert batch inserts data.
// The parameter <list> must be type of slice of map or struct.
func (tx *TX) BatchInsert(table string, list interface{}, batch ...int) (sql.Result, error) {
return tx.db.doBatchInsert(tx.tx, table, list, gINSERT_OPTION_DEFAULT, batch...)
}
// CURD操作:批量数据指定批次量写入, 如果数据存在(主键或者唯一索引),那么删除后重新写入一条
// BatchInsert batch inserts data with ignore option.
// The parameter <list> must be type of slice of map or struct.
func (tx *TX) BatchInsertIgnore(table string, list interface{}, batch ...int) (sql.Result, error) {
return tx.db.doBatchInsert(tx.tx, table, list, gINSERT_OPTION_IGNORE, batch...)
}
// BatchReplace batch replaces data.
// The parameter <list> must be type of slice of map or struct.
func (tx *TX) BatchReplace(table string, list interface{}, batch ...int) (sql.Result, error) {
return tx.db.doBatchInsert(tx.tx, table, list, gINSERT_OPTION_REPLACE, batch...)
}
// CURD操作:批量数据指定批次量写入, 如果数据存在(主键或者唯一索引),那么更新,否则写入一条新数据
// BatchSave batch replaces data.
// The parameter <list> must be type of slice of map or struct.
func (tx *TX) BatchSave(table string, list interface{}, batch ...int) (sql.Result, error) {
return tx.db.doBatchInsert(tx.tx, table, list, gINSERT_OPTION_SAVE, batch...)
}
// CURD操作:数据更新统一采用sql预处理,
// data参数支持字符串或者关联数组类型内部会自行做判断处理.
// Update does "UPDATE ... " statement for the table.
//
// The parameter <data> can be type of string/map/gmap/struct/*struct, etc.
// Eg: "uid=10000", "uid", 10000, g.Map{"uid": 10000, "name":"john"}
//
// The parameter <condition> can be type of string/map/gmap/slice/struct/*struct, etc.
// It is commonly used with parameter <args>.
// Eg:
// "uid=10000",
// "uid", 10000
// "money>? AND name like ?", 99999, "vip_%"
// "status IN (?)", g.Slice{1,2,3}
// "age IN(?,?)", 18, 50
// User{ Id : 1, UserName : "john"}
func (tx *TX) Update(table string, data interface{}, condition interface{}, args ...interface{}) (sql.Result, error) {
newWhere, newArgs := formatWhere(tx.db, condition, args, false)
if newWhere != "" {
newWhere = " WHERE " + newWhere
}
return tx.doUpdate(table, data, newWhere, newArgs...)
return tx.db.doUpdate(tx.tx, table, data, newWhere, newArgs...)
}
// 与Update方法的区别是不处理条件参数
func (tx *TX) doUpdate(table string, data interface{}, condition string, args ...interface{}) (sql.Result, error) {
return tx.db.doUpdate(tx.tx, table, data, condition, args...)
}
// CURD操作:删除数据
// Delete does "DELETE FROM ... " statement for the table.
//
// The parameter <condition> can be type of string/map/gmap/slice/struct/*struct, etc.
// It is commonly used with parameter <args>.
// Eg:
// "uid=10000",
// "uid", 10000
// "money>? AND name like ?", 99999, "vip_%"
// "status IN (?)", g.Slice{1,2,3}
// "age IN(?,?)", 18, 50
// User{ Id : 1, UserName : "john"}
func (tx *TX) Delete(table string, condition interface{}, args ...interface{}) (sql.Result, error) {
newWhere, newArgs := formatWhere(tx.db, condition, args, false)
if newWhere != "" {
newWhere = " WHERE " + newWhere
}
return tx.doDelete(table, newWhere, newArgs...)
}
// 与Delete方法的区别是不处理条件参数
func (tx *TX) doDelete(table string, condition string, args ...interface{}) (sql.Result, error) {
return tx.db.doDelete(tx.tx, table, condition, args...)
return tx.db.doDelete(tx.tx, table, newWhere, newArgs...)
}

View File

@ -11,7 +11,7 @@ import (
"io/ioutil"
)
// bodyReadCloser implements the io.ReadCloser interface
// BodyReadCloser implements the io.ReadCloser interface
// which is used for reading request body content multiple times.
type BodyReadCloser struct {
*bytes.Reader

View File

@ -61,16 +61,25 @@ const (
)
const (
REPEAT_EVENT_FILTER_DURATION = time.Millisecond // Duration for repeated event filter.
gFSNOTIFY_EVENT_EXIT = "exit" // Custom exit event for internal usage.
repeatEventFilterDuration = time.Millisecond // Duration for repeated event filter.
callbackExitEventPanicStr = "exit" // Custom exit event for internal usage.
)
var (
defaultWatcher, _ = New() // Default watcher.
defaultWatcher *Watcher // Default watcher.
callbackIdMap = gmap.NewIntAnyMap(true) // Id to callback mapping.
callbackIdGenerator = gtype.NewInt() // Atomic id generator for callback.
)
func init() {
var err error
defaultWatcher, err = New()
if err != nil {
// Default watcher object must be created, or else it panics.
panic(fmt.Sprintf(`creating default fsnotify watcher failed: %s`, err.Error()))
}
}
// New creates and returns a new watcher.
// Note that the watcher number is limited by the file handle setting of the system.
// Eg: fs.inotify.max_user_instances system variable in linux systems.
@ -125,7 +134,8 @@ func RemoveCallback(callbackId int) error {
return nil
}
// Exit is only used in the callback function, which can be used to remove current callback from the watcher.
// Exit is only used in the callback function, which can be used to remove current callback
// of itself from the watcher.
func Exit() {
panic(gFSNOTIFY_EVENT_EXIT)
panic(callbackExitEventPanicStr)
}

View File

@ -22,7 +22,6 @@ func (w *Watcher) startWatchLoop() {
// Event listening.
case ev := <-w.watcher.Events:
//intlog.Print(ev.String())
// Filter the repeated event in custom duration.
w.cache.SetIfNotExist(ev.String(), func() interface{} {
w.events.Push(&Event{
@ -32,7 +31,7 @@ func (w *Watcher) startWatchLoop() {
Watcher: w,
})
return struct{}{}
}, REPEAT_EVENT_FILTER_DURATION)
}, repeatEventFilterDuration)
case err := <-w.watcher.Errors:
intlog.Error(err)
@ -148,7 +147,7 @@ func (w *Watcher) startEventLoop() {
defer func() {
if err := recover(); err != nil {
switch err {
case gFSNOTIFY_EVENT_EXIT:
case callbackExitEventPanicStr:
w.RemoveCallback(callback.Id)
default:
panic(err)

View File

@ -28,7 +28,7 @@ func New(t ...time.Time) *Time {
}
}
// Now returns a time object for now.
// Now creates and returns a time object of now.
func Now() *Time {
return &Time{
time.Now(),
@ -50,7 +50,8 @@ func NewFromStr(str string) *Time {
return nil
}
// NewFromStrFormat creates and returns a Time object with given string and custom format like: Y-m-d H:i:s.
// NewFromStrFormat creates and returns a Time object with given string and
// custom format like: Y-m-d H:i:s.
func NewFromStrFormat(str string, format string) *Time {
if t, err := StrToTimeFormat(str, format); err == nil {
return t
@ -58,7 +59,8 @@ func NewFromStrFormat(str string, format string) *Time {
return nil
}
// NewFromStrLayout creates and returns a Time object with given string and stdlib layout like: 2006-01-02 15:04:05.
// NewFromStrLayout creates and returns a Time object with given string and
// stdlib layout like: 2006-01-02 15:04:05.
func NewFromStrLayout(str string, layout string) *Time {
if t, err := StrToTimeLayout(str, layout); err == nil {
return t
@ -66,7 +68,8 @@ func NewFromStrLayout(str string, layout string) *Time {
return nil
}
// NewFromTimeStamp creates and returns a Time object with given timestamp, which can be in seconds to nanoseconds.
// NewFromTimeStamp creates and returns a Time object with given timestamp,
// which can be in seconds to nanoseconds.
func NewFromTimeStamp(timestamp int64) *Time {
if timestamp == 0 {
return &Time{}

View File

@ -832,6 +832,21 @@ func Test_In(t *testing.T) {
}
func Test_NotIn(t *testing.T) {
gtest.Case(t, func() {
rule := "not-in:100"
val1 := ""
val2 := "1"
val3 := "100"
val4 := "200"
err1 := gvalid.Check(val1, rule, nil)
err2 := gvalid.Check(val2, rule, nil)
err3 := gvalid.Check(val3, rule, nil)
err4 := gvalid.Check(val4, rule, nil)
gtest.Assert(err1, nil)
gtest.Assert(err2, nil)
gtest.AssertNE(err3, nil)
gtest.Assert(err4, nil)
})
gtest.Case(t, func() {
rule := "not-in:100,200"
val1 := ""