mirror of
https://gitee.com/fit2cloud-feizhiyun/1Panel.git
synced 2024-11-30 02:47:51 +08:00
feat: 完成 mysql 导入备份功能
This commit is contained in:
parent
8431f49c47
commit
c49d2ef243
@ -47,6 +47,50 @@ func (b *BaseApi) UpdateMysql(c *gin.Context) {
|
||||
helper.SuccessWithData(c, nil)
|
||||
}
|
||||
|
||||
func (b *BaseApi) UploadMysqlFiles(c *gin.Context) {
|
||||
form, err := c.MultipartForm()
|
||||
if err != nil {
|
||||
helper.ErrorWithDetail(c, constant.CodeErrBadRequest, constant.ErrTypeInvalidParams, err)
|
||||
return
|
||||
}
|
||||
files := form.File["file"]
|
||||
|
||||
mysqlName, ok := c.Params.Get("mysqlName")
|
||||
if !ok {
|
||||
helper.ErrorWithDetail(c, constant.CodeErrBadRequest, constant.ErrTypeInvalidParams, errors.New("error mysqlName in path"))
|
||||
return
|
||||
}
|
||||
if err := mysqlService.UpFile(mysqlName, files); err != nil {
|
||||
helper.ErrorWithDetail(c, constant.CodeErrInternalServer, constant.ErrTypeInternalServer, err)
|
||||
return
|
||||
}
|
||||
|
||||
helper.SuccessWithData(c, nil)
|
||||
}
|
||||
|
||||
func (b *BaseApi) MysqlUpList(c *gin.Context) {
|
||||
var req dto.SearchDBWithPage
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
helper.ErrorWithDetail(c, constant.CodeErrBadRequest, constant.ErrTypeInvalidParams, err)
|
||||
return
|
||||
}
|
||||
if err := global.VALID.Struct(req); err != nil {
|
||||
helper.ErrorWithDetail(c, constant.CodeErrBadRequest, constant.ErrTypeInvalidParams, err)
|
||||
return
|
||||
}
|
||||
|
||||
total, list, err := mysqlService.SearchUpListWithPage(req)
|
||||
if err != nil {
|
||||
helper.ErrorWithDetail(c, constant.CodeErrInternalServer, constant.ErrTypeInternalServer, err)
|
||||
return
|
||||
}
|
||||
|
||||
helper.SuccessWithData(c, dto.PageResult{
|
||||
Items: list,
|
||||
Total: total,
|
||||
})
|
||||
}
|
||||
|
||||
func (b *BaseApi) UpdateMysqlVariables(c *gin.Context) {
|
||||
var req []dto.MysqlVariablesUpdate
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
|
@ -66,6 +66,15 @@ func (c *BackupRepo) WithByDetailName(detailName string) DBOption {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *BackupRepo) WithByFileName(fileName string) DBOption {
|
||||
return func(g *gorm.DB) *gorm.DB {
|
||||
if len(fileName) == 0 {
|
||||
return g
|
||||
}
|
||||
return g.Where("file_name = ?", fileName)
|
||||
}
|
||||
}
|
||||
|
||||
func (u *BackupRepo) List(opts ...DBOption) ([]model.BackupAccount, error) {
|
||||
var ops []model.BackupAccount
|
||||
db := global.DB.Model(&model.BackupAccount{})
|
||||
|
@ -78,6 +78,10 @@ func (u *CronjobService) HandleBackup(cronjob *model.Cronjob, startTime time.Tim
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
app, err := mysqlRepo.LoadBaseInfoByName(cronjob.Database)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if cronjob.KeepLocal || cronjob.Type != "LOCAL" {
|
||||
backupLocal, err := backupRepo.Get(commonRepo.WithByType("LOCAL"))
|
||||
if err != nil {
|
||||
@ -94,7 +98,7 @@ func (u *CronjobService) HandleBackup(cronjob *model.Cronjob, startTime time.Tim
|
||||
|
||||
if cronjob.Type == "database" {
|
||||
fileName = fmt.Sprintf("db_%s_%s.sql.gz", cronjob.DBName, time.Now().Format("20060102150405"))
|
||||
backupDir = fmt.Sprintf("database/%s/%s", cronjob.Database, cronjob.DBName)
|
||||
backupDir = fmt.Sprintf("database/%s/%s/%s", app.Key, cronjob.Database, cronjob.DBName)
|
||||
err = backupMysql(backup.Type, baseDir, backupDir, cronjob.Database, cronjob.DBName, fileName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -161,8 +165,24 @@ func (u *CronjobService) HandleRmExpired(backType, baseDir, backupDir string, cr
|
||||
global.LOG.Errorf("read dir %s failed, err: %v", baseDir+"/"+backupDir, err)
|
||||
return
|
||||
}
|
||||
for i := 0; i < len(files)-int(cronjob.RetainCopies); i++ {
|
||||
_ = os.Remove(baseDir + "/" + backupDir + "/" + files[i].Name())
|
||||
if len(files) == 0 {
|
||||
return
|
||||
}
|
||||
if cronjob.Type == "database" {
|
||||
dbCopies := uint64(0)
|
||||
for i := len(files) - 1; i >= 0; i-- {
|
||||
if strings.HasPrefix(files[i].Name(), "db_") {
|
||||
dbCopies++
|
||||
if dbCopies > cronjob.RetainCopies {
|
||||
_ = os.Remove(baseDir + "/" + backupDir + "/" + files[i].Name())
|
||||
_ = backupRepo.DeleteRecord(backupRepo.WithByFileName(files[i].Name()))
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < len(files)-int(cronjob.RetainCopies); i++ {
|
||||
_ = os.Remove(baseDir + "/" + backupDir + "/" + files[i].Name())
|
||||
}
|
||||
}
|
||||
records, _ := cronjobRepo.ListRecord(cronjobRepo.WithByJobID(int(cronjob.ID)))
|
||||
if len(records) > int(cronjob.RetainCopies) {
|
||||
|
@ -5,8 +5,10 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"mime/multipart"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -32,6 +34,8 @@ type IMysqlService interface {
|
||||
ChangeInfo(info dto.ChangeDBInfo) error
|
||||
UpdateVariables(mysqlName string, updatas []dto.MysqlVariablesUpdate) error
|
||||
|
||||
UpFile(mysqlName string, files []*multipart.FileHeader) error
|
||||
SearchUpListWithPage(req dto.SearchDBWithPage) (int64, interface{}, error)
|
||||
Backup(db dto.BackupDB) error
|
||||
Recover(db dto.RecoverDB) error
|
||||
|
||||
@ -59,8 +63,88 @@ func (u *MysqlService) SearchWithPage(search dto.SearchDBWithPage) (int64, inter
|
||||
return total, dtoMysqls, err
|
||||
}
|
||||
|
||||
func (u *MysqlService) SearchUpListWithPage(req dto.SearchDBWithPage) (int64, interface{}, error) {
|
||||
var (
|
||||
list []dto.RedisBackupRecords
|
||||
backDatas []dto.RedisBackupRecords
|
||||
)
|
||||
redisInfo, err := mysqlRepo.LoadBaseInfoByName(req.MysqlName)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
backupLocal, err := backupRepo.Get(commonRepo.WithByType("LOCAL"))
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
localDir, err := loadLocalDir(backupLocal)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
uploadDir := fmt.Sprintf("%s/database/%s/%s/upload", localDir, redisInfo.Key, redisInfo.Name)
|
||||
if _, err := os.Stat(uploadDir); err != nil {
|
||||
return 0, list, nil
|
||||
}
|
||||
_ = filepath.Walk(uploadDir, func(path string, info os.FileInfo, err error) error {
|
||||
if !info.IsDir() {
|
||||
list = append(list, dto.RedisBackupRecords{
|
||||
CreatedAt: info.ModTime().Format("2006-01-02 15:04:05"),
|
||||
Size: int(info.Size()),
|
||||
FileDir: uploadDir,
|
||||
FileName: info.Name(),
|
||||
})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
total, start, end := len(list), (req.Page-1)*req.PageSize, req.Page*req.PageSize
|
||||
if start > total {
|
||||
backDatas = make([]dto.RedisBackupRecords, 0)
|
||||
} else {
|
||||
if end >= total {
|
||||
end = total
|
||||
}
|
||||
backDatas = list[start:end]
|
||||
}
|
||||
return int64(total), backDatas, nil
|
||||
}
|
||||
|
||||
func (u *MysqlService) UpFile(mysqlName string, files []*multipart.FileHeader) error {
|
||||
backupLocal, err := backupRepo.Get(commonRepo.WithByType("LOCAL"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
app, err := mysqlRepo.LoadBaseInfoByName(mysqlName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
localDir, err := loadLocalDir(backupLocal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dstDir := fmt.Sprintf("%s/database/%s/%s/upload", localDir, app.Key, mysqlName)
|
||||
if _, err := os.Stat(dstDir); err != nil && os.IsNotExist(err) {
|
||||
if err = os.MkdirAll(dstDir, os.ModePerm); err != nil {
|
||||
if err != nil {
|
||||
return fmt.Errorf("mkdir %s failed, err: %v", dstDir, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, file := range files {
|
||||
src, err := file.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer src.Close()
|
||||
out, err := os.Create(dstDir + "/" + file.Filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer out.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *MysqlService) ListDBByVersion(name string) ([]string, error) {
|
||||
mysqls, err := mysqlRepo.List(commonRepo.WithByName(name))
|
||||
mysqls, err := mysqlRepo.List(mysqlRepo.WithByMysqlName(name))
|
||||
var dbNames []string
|
||||
for _, mysql := range mysqls {
|
||||
dbNames = append(dbNames, mysql.Name)
|
||||
@ -128,11 +212,15 @@ func (u *MysqlService) Backup(db dto.BackupDB) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
app, err := mysqlRepo.LoadBaseInfoByName(db.MysqlName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
localDir, err := loadLocalDir(backupLocal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
backupDir := fmt.Sprintf("database/%s/%s", db.MysqlName, db.DBName)
|
||||
backupDir := fmt.Sprintf("database/%s/%s/%s", app.Key, db.MysqlName, db.DBName)
|
||||
fileName := fmt.Sprintf("%s_%s.sql.gz", db.DBName, time.Now().Format("20060102150405"))
|
||||
if err := backupMysql("LOCAL", localDir, backupDir, db.MysqlName, db.DBName, fileName); err != nil {
|
||||
return err
|
||||
|
@ -24,6 +24,8 @@ func (s *DatabaseRouter) InitDatabaseRouter(Router *gin.RouterGroup) {
|
||||
withRecordRouter.POST("", baseApi.CreateMysql)
|
||||
withRecordRouter.PUT("/:id", baseApi.UpdateMysql)
|
||||
withRecordRouter.POST("/backup", baseApi.BackupMysql)
|
||||
withRecordRouter.POST("/uplist", baseApi.MysqlUpList)
|
||||
withRecordRouter.POST("/uplist/upload/:mysqlName", baseApi.UploadMysqlFiles)
|
||||
withRecordRouter.POST("/recover", baseApi.RecoverMysql)
|
||||
withRecordRouter.POST("/backups/search", baseApi.SearchDBBackups)
|
||||
withRecordRouter.POST("/del", baseApi.DeleteMysql)
|
||||
|
@ -90,7 +90,7 @@ class RequestHttp {
|
||||
download<BlobPart>(url: string, params?: object, _object = {}): Promise<BlobPart> {
|
||||
return this.service.post(url, params, _object);
|
||||
}
|
||||
upload<T>(url: string, params: object = {}, config: AxiosRequestConfig): Promise<T> {
|
||||
upload<T>(url: string, params: object = {}, config?: AxiosRequestConfig): Promise<T> {
|
||||
return this.service.post(url, params, config);
|
||||
}
|
||||
}
|
||||
|
@ -161,7 +161,7 @@ export namespace Database {
|
||||
appendfsync: string;
|
||||
save: string;
|
||||
}
|
||||
export interface RedisBackupRecord {
|
||||
export interface FileRecord {
|
||||
fileName: string;
|
||||
fileDir: string;
|
||||
createdAt: string;
|
||||
|
@ -2,14 +2,21 @@ import http from '@/api';
|
||||
import { ReqPage, ResPage } from '../interface';
|
||||
import { Backup } from '../interface/backup';
|
||||
import { Database } from '../interface/database';
|
||||
import { File } from '@/api/interface/file';
|
||||
|
||||
export const searchMysqlDBs = (params: Database.Search) => {
|
||||
return http.post<ResPage<Database.MysqlDBInfo>>(`databases/search`, params);
|
||||
return http.post<ResPage<Database.MysqlDBInfo>>(`/databases/search`, params);
|
||||
};
|
||||
export const listDBByVersion = (params: string) => {
|
||||
return http.get(`databases/dbs/${params}`);
|
||||
return http.get(`/databases/dbs/${params}`);
|
||||
};
|
||||
|
||||
export const searchUpList = (params: Database.Search) => {
|
||||
return http.post<ResPage<Database.FileRecord>>(`/databases/uplist`, params);
|
||||
};
|
||||
export const uploadFile = (mysqlName: string, params: FormData) => {
|
||||
return http.upload<File.File>(`/databases/uplist/upload/${mysqlName}`, params);
|
||||
};
|
||||
export const backup = (params: Database.Backup) => {
|
||||
return http.post(`/databases/backup`, params);
|
||||
};
|
||||
@ -75,7 +82,7 @@ export const recoverRedis = (param: Database.RedisRecover) => {
|
||||
return http.post(`/databases/redis/recover`, param);
|
||||
};
|
||||
export const redisBackupRedisRecords = (param: ReqPage) => {
|
||||
return http.post<ResPage<Database.RedisBackupRecord>>(`/databases/redis/backup/records`, param);
|
||||
return http.post<ResPage<Database.FileRecord>>(`/databases/redis/backup/records`, param);
|
||||
};
|
||||
export const deleteBackupRedis = (param: Database.RedisBackupDelete) => {
|
||||
return http.post(`/databases/redis/backup/del`, param);
|
||||
|
@ -152,6 +152,120 @@ export default {
|
||||
header: {
|
||||
logout: 'Logout',
|
||||
},
|
||||
database: {
|
||||
source: 'Source',
|
||||
backup: 'Database backup',
|
||||
permission: 'Permission',
|
||||
permissionLocal: 'Local server',
|
||||
permissionForIP: 'IP',
|
||||
permissionAll: 'All of them (unsafe)',
|
||||
rootPassword: 'Root password',
|
||||
backupList: 'Backup list',
|
||||
backList: 'Return',
|
||||
loadBackup: 'Import the backup',
|
||||
setting: 'Database Settings',
|
||||
remoteAccess: 'Remote access',
|
||||
changePassword: 'Password change',
|
||||
|
||||
baseSetting: 'infrastructure',
|
||||
remoteConnHelper:
|
||||
'Remote connection to mysql as user root may have security risks. Therefore, perform this operation with caution.',
|
||||
confChange: 'Configuration change',
|
||||
portHelper:
|
||||
'This port is the exposed port of the container. You need to save the modification separately and restart the container!',
|
||||
|
||||
currentStatus: 'Current state',
|
||||
runTime: 'Startup time',
|
||||
connections: 'Total connections',
|
||||
bytesSent: 'Send bytes',
|
||||
bytesReceived: 'Received bytes',
|
||||
queryPerSecond: 'Query per second',
|
||||
txPerSecond: 'Tx per second',
|
||||
connInfo: 'active/peak connections',
|
||||
connInfoHelper: 'If the value is too large, increase max_connections',
|
||||
threadCacheHit: 'Thread cache hit',
|
||||
threadCacheHitHelper: 'If it is too low, increase thread_cache_size',
|
||||
indexHit: 'Index hit',
|
||||
indexHitHelper: 'If it is too low, increase key_buffer_size',
|
||||
innodbIndexHit: 'Innodb 索引命中率',
|
||||
innodbIndexHitHelper: 'If it is too low, increase innodb_buffer_pool_size',
|
||||
cacheHit: 'Querying the Cache Hit',
|
||||
cacheHitHelper: 'If it is too low, increase query_cache_size',
|
||||
tmpTableToDB: 'Temporary table to disk',
|
||||
tmpTableToDBHelper: 'If it is too large, try increasing tmp_table_size',
|
||||
openTables: 'Open tables',
|
||||
openTablesHelper: 'The configuration value of table_open_cache must be greater than or equal to this value',
|
||||
selectFullJoin: 'Select full join',
|
||||
selectFullJoinHelper: 'If the value is not 0, check whether the index of the data table is correct',
|
||||
selectRangeCheck: 'The number of joins with no index',
|
||||
selectRangeCheckHelper: 'If the value is not 0, check whether the index of the data table is correct',
|
||||
sortMergePasses: 'Number of sorted merges',
|
||||
sortMergePassesHelper: 'If the value is too large, increase sort_buffer_size',
|
||||
tableLocksWaited: 'Lock table number',
|
||||
tableLocksWaitedHelper: 'If the value is too large, consider increasing your database performance',
|
||||
|
||||
performanceTuning: 'Performance tuning',
|
||||
optimizationScheme: 'Optimization scheme',
|
||||
keyBufferSizeHelper: 'Buffer size for index',
|
||||
queryCacheSizeHelper: 'Query cache. If this function is disabled, set this parameter to 0',
|
||||
tmpTableSizeHelper: 'Temporary table cache size',
|
||||
innodbBufferPoolSizeHelper: 'Innodb buffer size',
|
||||
innodbLogBufferSizeHelper: 'Innodb log buffer size',
|
||||
sortBufferSizeHelper: '* connections, buffer size per thread sort',
|
||||
readBufferSizeHelper: '* connections, read buffer size',
|
||||
readRndBufferSizeHelper: '* connections, random read buffer size',
|
||||
joinBufferSizeHelper: '* connections, association table cache size',
|
||||
threadStackelper: '* connections, stack size per thread',
|
||||
binlogCacheSizeHelper: '* onnections, binary log cache size (multiples of 4096)',
|
||||
threadCacheSizeHelper: 'Thread pool size',
|
||||
tableOpenCacheHelper: 'Table cache',
|
||||
maxConnectionsHelper: 'Max connections',
|
||||
restart: 'Restart',
|
||||
|
||||
isOn: 'Is on',
|
||||
longQueryTime: 'Slow query threshold',
|
||||
|
||||
status: 'The current state',
|
||||
terminal: 'Terminal mode',
|
||||
second: 'Second',
|
||||
timeout: 'Timeout',
|
||||
timeoutHelper: 'Idle connection timeout period. 0 indicates that the connection is on continuously',
|
||||
maxclients: 'Max clients',
|
||||
requirepass: 'Password',
|
||||
requirepassHelper: 'Leaving a blank indicates that no password has been set',
|
||||
databases: 'Number of databases',
|
||||
maxmemory: 'Maximum memory usage',
|
||||
maxmemoryHelper: '0 indicates no restriction',
|
||||
tcpPort: 'Current listening port',
|
||||
uptimeInDays: 'Days in operation',
|
||||
connectedClients: 'Number of connected clients',
|
||||
usedMemory: 'Redis indicates the peak value of memory allocated historically',
|
||||
usedMemoryRss: 'Total system memory used by Redis',
|
||||
memFragmentationRatio: 'Memory fragmentation ratio',
|
||||
totalConnectionsReceived: 'Total number of clients connected since run',
|
||||
totalCommandsProcessed: 'The total number of commands executed since the run',
|
||||
instantaneousOpsPerSec: 'Number of commands executed by the server per second',
|
||||
keyspaceHits: 'The number of times a database key was successfully found',
|
||||
keyspaceMisses: 'Number of failed attempts to find the database key',
|
||||
hit: 'Find the database key hit ratio',
|
||||
latestForkUsec: 'The number of microseconds spent on the last fork() operation',
|
||||
|
||||
recoverHelper: 'Data is about to be overwritten with [{0}]. Do you want to continue?',
|
||||
submitIt: 'Overwrite the data',
|
||||
|
||||
baseConf: 'Basic configuration',
|
||||
allConf: 'All configuration',
|
||||
restartNow: 'Restart now',
|
||||
restartNowHelper1:
|
||||
'You need to restart the system after the configuration changes take effect. If your data needs to be persisted, perform the save operation first.',
|
||||
restartNowHelper: 'The modification takes effect only after the system restarts.',
|
||||
|
||||
persistence: 'Persistence',
|
||||
rdbHelper1: 'In seconds, insert',
|
||||
rdbHelper2: 'The data',
|
||||
rdbHelper3: 'Meeting either condition triggers RDB persistence',
|
||||
rdbInfo: 'Rule list has 0 value, please confirm and try again!',
|
||||
},
|
||||
container: {
|
||||
operatorHelper: '{0} will be performed on the selected container. Do you want to continue?',
|
||||
start: 'Start',
|
||||
|
@ -163,6 +163,7 @@ export default {
|
||||
permissionAll: '所有人(不安全)',
|
||||
rootPassword: 'root 密码',
|
||||
backupList: '备份列表',
|
||||
backList: '返回列表',
|
||||
loadBackup: '导入备份',
|
||||
setting: '数据库设置',
|
||||
remoteAccess: '远程访问',
|
||||
@ -173,6 +174,8 @@ export default {
|
||||
confChange: '配置修改',
|
||||
portHelper: '该端口为容器对外暴露端口,修改需要单独保存并且重启容器!',
|
||||
|
||||
unSupportType: '不支持当前文件类型',
|
||||
|
||||
currentStatus: '当前状态',
|
||||
runTime: '启动时间',
|
||||
connections: '总连接数',
|
||||
|
@ -27,7 +27,7 @@
|
||||
icon="Back"
|
||||
@click="onBacklist"
|
||||
>
|
||||
{{ $t('commons.button.back') }}列表
|
||||
{{ $t('database.backList') }}
|
||||
</el-button>
|
||||
|
||||
<Setting ref="settingRef"></Setting>
|
||||
@ -106,6 +106,7 @@
|
||||
</template>
|
||||
</el-dialog>
|
||||
|
||||
<UploadDialog ref="uploadRef" />
|
||||
<OperatrDialog @search="search" ref="dialogRef" />
|
||||
<BackupRecords ref="dialogBackupRef" />
|
||||
</div>
|
||||
@ -115,6 +116,7 @@
|
||||
import ComplexTable from '@/components/complex-table/index.vue';
|
||||
import OperatrDialog from '@/views/database/mysql/create/index.vue';
|
||||
import BackupRecords from '@/views/database/mysql/backup/index.vue';
|
||||
import UploadDialog from '@/views/database/mysql/upload/index.vue';
|
||||
import Setting from '@/views/database/mysql/setting/index.vue';
|
||||
import Submenu from '@/views/database/index.vue';
|
||||
import { dateFromat } from '@/utils/util';
|
||||
@ -155,6 +157,8 @@ const onOpenBackupDialog = async (dbName: string) => {
|
||||
dialogBackupRef.value!.acceptParams(params);
|
||||
};
|
||||
|
||||
const uploadRef = ref();
|
||||
|
||||
const settingRef = ref();
|
||||
const onSetting = async () => {
|
||||
isOnSetting.value = true;
|
||||
@ -270,8 +274,11 @@ const buttons = [
|
||||
},
|
||||
{
|
||||
label: i18n.global.t('database.loadBackup'),
|
||||
click: (row: Database.MysqlDBInfo) => {
|
||||
onBatchDelete(row);
|
||||
click: () => {
|
||||
let params = {
|
||||
mysqlName: mysqlName.value,
|
||||
};
|
||||
uploadRef.value!.acceptParams(params);
|
||||
},
|
||||
},
|
||||
{
|
||||
|
179
frontend/src/views/database/mysql/upload/index.vue
Normal file
179
frontend/src/views/database/mysql/upload/index.vue
Normal file
@ -0,0 +1,179 @@
|
||||
<template>
|
||||
<div>
|
||||
<el-dialog v-model="upVisiable" :destroy-on-close="true" :close-on-click-modal="false" width="70%">
|
||||
<template #header>
|
||||
<div class="card-header">
|
||||
<span>{{ $t('database.backup') }}</span>
|
||||
</div>
|
||||
</template>
|
||||
<el-upload
|
||||
ref="uploadRef"
|
||||
:on-change="fileOnChange"
|
||||
:before-upload="beforeAvatarUpload"
|
||||
class="upload-demo"
|
||||
:auto-upload="false"
|
||||
>
|
||||
<template #trigger>
|
||||
<el-button>选择文件</el-button>
|
||||
</template>
|
||||
<el-button style="margin-left: 10px" @click="onSubmit">上传</el-button>
|
||||
</el-upload>
|
||||
<div style="margin-left: 10px">
|
||||
<span class="input-help">仅支持sql、zip、sql.gz、(tar.gz|gz|tgz)</span>
|
||||
<span class="input-help">zip、tar.gz压缩包结构:test.zip或test.tar.gz压缩包内,必需包含test.sql</span>
|
||||
</div>
|
||||
<el-divider />
|
||||
<ComplexTable :pagination-config="paginationConfig" v-model:selects="selects" :data="data">
|
||||
<template #toolbar>
|
||||
<el-button
|
||||
style="margin-left: 10px"
|
||||
type="danger"
|
||||
plain
|
||||
:disabled="selects.length === 0"
|
||||
@click="onBatchDelete(null)"
|
||||
>
|
||||
{{ $t('commons.button.delete') }}
|
||||
</el-button>
|
||||
</template>
|
||||
<el-table-column type="selection" fix />
|
||||
<el-table-column :label="$t('commons.table.name')" show-overflow-tooltip prop="fileName" />
|
||||
<el-table-column :label="$t('file.dir')" show-overflow-tooltip prop="fileDir" />
|
||||
<el-table-column :label="$t('file.size')" prop="size">
|
||||
<template #default="{ row }">
|
||||
{{ computeSize(row.size) }}
|
||||
</template>
|
||||
</el-table-column>
|
||||
<el-table-column :label="$t('commons.table.createdAt')" prop="createdAt" />
|
||||
<fu-table-operations
|
||||
width="300px"
|
||||
:buttons="buttons"
|
||||
:ellipsis="10"
|
||||
:label="$t('commons.table.operate')"
|
||||
fix
|
||||
/>
|
||||
</ComplexTable>
|
||||
</el-dialog>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script lang="ts" setup>
|
||||
import ComplexTable from '@/components/complex-table/index.vue';
|
||||
import { reactive, ref } from 'vue';
|
||||
import { computeSize } from '@/utils/util';
|
||||
import { useDeleteData } from '@/hooks/use-delete-data';
|
||||
import { recover, searchUpList, uploadFile } from '@/api/modules/database';
|
||||
import i18n from '@/lang';
|
||||
import { ElMessage, UploadFile, UploadFiles, UploadInstance, UploadProps } from 'element-plus';
|
||||
import { deleteBackupRecord } from '@/api/modules/backup';
|
||||
import { Backup } from '@/api/interface/backup';
|
||||
|
||||
const selects = ref<any>([]);
|
||||
|
||||
const data = ref();
|
||||
const paginationConfig = reactive({
|
||||
currentPage: 1,
|
||||
pageSize: 10,
|
||||
total: 0,
|
||||
});
|
||||
|
||||
const upVisiable = ref(false);
|
||||
const mysqlName = ref();
|
||||
const dbName = ref();
|
||||
interface DialogProps {
|
||||
mysqlName: string;
|
||||
dbName: string;
|
||||
}
|
||||
const acceptParams = (params: DialogProps): void => {
|
||||
mysqlName.value = params.mysqlName;
|
||||
dbName.value = params.dbName;
|
||||
upVisiable.value = true;
|
||||
search();
|
||||
};
|
||||
|
||||
const search = async () => {
|
||||
let params = {
|
||||
page: paginationConfig.currentPage,
|
||||
pageSize: paginationConfig.pageSize,
|
||||
mysqlName: mysqlName.value,
|
||||
};
|
||||
const res = await searchUpList(params);
|
||||
data.value = res.data.items || [];
|
||||
paginationConfig.total = res.data.total;
|
||||
};
|
||||
|
||||
const onRecover = async (row: Backup.RecordInfo) => {
|
||||
let params = {
|
||||
mysqlName: mysqlName.value,
|
||||
dbName: dbName.value,
|
||||
backupName: row.fileDir + row.fileName,
|
||||
};
|
||||
await recover(params);
|
||||
ElMessage.success(i18n.global.t('commons.msg.operationSuccess'));
|
||||
};
|
||||
|
||||
const uploaderFiles = ref<UploadFiles>([]);
|
||||
const uploadRef = ref<UploadInstance>();
|
||||
|
||||
const beforeAvatarUpload: UploadProps['beforeUpload'] = (rawFile) => {
|
||||
if (rawFile.name.endsWith('.sql') || rawFile.name.endsWith('gz') || rawFile.name.endsWith('.zip')) {
|
||||
ElMessage.error(i18n.global.t('database.unSupportType'));
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
const fileOnChange = (_uploadFile: UploadFile, uploadFiles: UploadFiles) => {
|
||||
uploaderFiles.value = uploadFiles;
|
||||
};
|
||||
|
||||
const handleClose = () => {
|
||||
uploadRef.value!.clearFiles();
|
||||
};
|
||||
|
||||
const onSubmit = () => {
|
||||
const formData = new FormData();
|
||||
if (uploaderFiles.value.length !== 1) {
|
||||
return;
|
||||
}
|
||||
if (uploaderFiles.value[0]!.raw != undefined) {
|
||||
formData.append('file', uploaderFiles.value[0]!.raw);
|
||||
}
|
||||
uploadFile(mysqlName.value, formData).then(() => {
|
||||
ElMessage.success(i18n.global.t('file.uploadSuccess'));
|
||||
handleClose();
|
||||
search();
|
||||
});
|
||||
};
|
||||
|
||||
const onBatchDelete = async (row: Backup.RecordInfo | null) => {
|
||||
let ids: Array<number> = [];
|
||||
if (row) {
|
||||
ids.push(row.id);
|
||||
} else {
|
||||
selects.value.forEach((item: Backup.RecordInfo) => {
|
||||
ids.push(item.id);
|
||||
});
|
||||
}
|
||||
await useDeleteData(deleteBackupRecord, { ids: ids }, 'commons.msg.delete', true);
|
||||
search();
|
||||
};
|
||||
|
||||
const buttons = [
|
||||
{
|
||||
label: i18n.global.t('commons.button.recover'),
|
||||
click: (row: Backup.RecordInfo) => {
|
||||
onRecover(row);
|
||||
},
|
||||
},
|
||||
{
|
||||
label: i18n.global.t('commons.button.delete'),
|
||||
click: (row: Backup.RecordInfo) => {
|
||||
onBatchDelete(row);
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
defineExpose({
|
||||
acceptParams,
|
||||
});
|
||||
</script>
|
Loading…
Reference in New Issue
Block a user