mirror of
https://gitee.com/milvus-io/milvus.git
synced 2024-12-02 20:09:57 +08:00
MS-279 Merge from Branch-0.3.1
Former-commit-id: cd9e051d00423ae8a25f100366b1050f1604fca2
This commit is contained in:
commit
17fe748d30
@ -17,3 +17,5 @@ Please mark all change in change log and use the ticket from JIRA.
|
||||
- MS-1 - Add CHANGELOG.md
|
||||
- MS-161 - Add CI / CD Module to Milvus Project
|
||||
- MS-202 - Add Milvus Jenkins project email notification
|
||||
- MS-215 - Add Milvus cluster CI/CD groovy file
|
||||
- MS-277 - Update CUDA Version to V10.1
|
||||
|
@ -1,12 +1,13 @@
|
||||
try {
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}"
|
||||
|
||||
if (currentBuild.result == 'ABORTED') {
|
||||
throw new hudson.AbortException("Dev Test Aborted !")
|
||||
} else if (currentBuild.result == 'FAILURE') {
|
||||
error("Dev Test Failure !")
|
||||
def result = sh script: "helm status ${env.JOB_NAME}-${env.BUILD_NUMBER}", returnStatus: true
|
||||
if (!result) {
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}"
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Cleanup Dev', state: 'failed'
|
||||
def result = sh script: "helm status ${env.JOB_NAME}-${env.BUILD_NUMBER}", returnStatus: true
|
||||
if (!result) {
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}"
|
||||
}
|
||||
throw exc
|
||||
}
|
||||
|
||||
|
13
ci/jenkinsfile/cluster_cleanup_dev.groovy
Normal file
13
ci/jenkinsfile/cluster_cleanup_dev.groovy
Normal file
@ -0,0 +1,13 @@
|
||||
try {
|
||||
def result = sh script: "helm status ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster", returnStatus: true
|
||||
if (!result) {
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster"
|
||||
}
|
||||
} catch (exc) {
|
||||
def result = sh script: "helm status ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster", returnStatus: true
|
||||
if (!result) {
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster"
|
||||
}
|
||||
throw exc
|
||||
}
|
||||
|
24
ci/jenkinsfile/cluster_deploy2dev.groovy
Normal file
24
ci/jenkinsfile/cluster_deploy2dev.groovy
Normal file
@ -0,0 +1,24 @@
|
||||
try {
|
||||
sh 'helm init --client-only --skip-refresh --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
|
||||
sh 'helm repo add milvus https://registry.zilliz.com/chartrepo/milvus'
|
||||
sh 'helm repo update'
|
||||
dir ("milvus-helm") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
dir ("milvus/milvus-cluster") {
|
||||
sh "helm install --wait --timeout 300 --set roServers.image.tag=${DOCKER_VERSION} --set woServers.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP -f ci/values.yaml --name ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster --namespace milvus-cluster --version 0.3.1 . "
|
||||
}
|
||||
}
|
||||
/*
|
||||
timeout(time: 2, unit: 'MINUTES') {
|
||||
waitUntil {
|
||||
def result = sh script: "nc -z -w 3 ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster-milvus-cluster-proxy.milvus-cluster.svc.cluster.local 19530", returnStatus: true
|
||||
return !result
|
||||
}
|
||||
}
|
||||
*/
|
||||
} catch (exc) {
|
||||
echo 'Helm running failed!'
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster"
|
||||
throw exc
|
||||
}
|
||||
|
12
ci/jenkinsfile/cluster_dev_test.groovy
Normal file
12
ci/jenkinsfile/cluster_dev_test.groovy
Normal file
@ -0,0 +1,12 @@
|
||||
timeout(time: 10, unit: 'MINUTES') {
|
||||
try {
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:Test/milvus_test.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
sh 'python3 -m pip install -r requirements_cluster.txt'
|
||||
sh "pytest . --alluredir=cluster_test_out --ip ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster-milvus-cluster-proxy.milvus-cluster.svc.cluster.local"
|
||||
}
|
||||
} catch (exc) {
|
||||
echo 'Milvus Cluster Test Failed !'
|
||||
throw exc
|
||||
}
|
||||
}
|
@ -2,10 +2,15 @@ try {
|
||||
sh 'helm init --client-only --skip-refresh --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
|
||||
sh 'helm repo add milvus https://registry.zilliz.com/chartrepo/milvus'
|
||||
sh 'helm repo update'
|
||||
sh "helm install --set engine.image.repository=registry.zilliz.com/${PROJECT_NAME}/engine --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.JOB_NAME}-${env.BUILD_NUMBER} --version 0.3.0 milvus/milvus-gpu"
|
||||
dir ("milvus-helm") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
dir ("milvus/milvus-gpu") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.JOB_NAME}-${env.BUILD_NUMBER} -f ci/values.yaml --namespace milvus-1 --version 0.3.1 ."
|
||||
}
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Deloy to Dev', state: 'failed'
|
||||
echo 'Helm running failed!'
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}"
|
||||
throw exc
|
||||
}
|
||||
|
||||
|
@ -1,17 +1,29 @@
|
||||
container('milvus-testframework') {
|
||||
timeout(time: 10, unit: 'MINUTES') {
|
||||
gitlabCommitStatus(name: 'Dev Test') {
|
||||
try {
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:Test/milvus_test.git"]]])
|
||||
sh 'python3 -m pip install -r requirements.txt'
|
||||
sh "pytest . --alluredir=test_out --ip ${env.JOB_NAME}-${env.BUILD_NUMBER}-milvus-gpu-engine.kube-opt.svc.cluster.local"
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Dev Test', state: 'failed'
|
||||
currentBuild.result = 'FAILURE'
|
||||
echo 'Milvus Test Failed !'
|
||||
timeout(time: 20, unit: 'MINUTES') {
|
||||
try {
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:Test/milvus_test.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
sh 'python3 -m pip install -r requirements.txt'
|
||||
sh "pytest . --alluredir=test_out --ip ${env.JOB_NAME}-${env.BUILD_NUMBER}-milvus-gpu-engine.milvus-1.svc.cluster.local"
|
||||
}
|
||||
|
||||
// mysql database backend test
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
|
||||
if (!fileExists('milvus-helm')) {
|
||||
dir ("milvus-helm") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
}
|
||||
}
|
||||
dir ("milvus-helm") {
|
||||
dir ("milvus/milvus-gpu") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.JOB_NAME}-${env.BUILD_NUMBER} -f ci/db_backend/mysql_values.yaml --namespace milvus-2 --version 0.3.1 ."
|
||||
}
|
||||
}
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
sh "pytest . --alluredir=test_out --ip ${env.JOB_NAME}-${env.BUILD_NUMBER}-milvus-gpu-engine.milvus-2.svc.cluster.local"
|
||||
}
|
||||
} catch (exc) {
|
||||
echo 'Milvus Test Failed !'
|
||||
throw exc
|
||||
}
|
||||
}
|
||||
|
@ -1,9 +1,9 @@
|
||||
container('milvus-build-env') {
|
||||
timeout(time: 20, unit: 'MINUTES') {
|
||||
timeout(time: 30, unit: 'MINUTES') {
|
||||
gitlabCommitStatus(name: 'Build Engine') {
|
||||
dir ("milvus_engine") {
|
||||
try {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [[$class: 'SubmoduleOption',disableSubmodules: false,parentCredentials: true,recursiveSubmodules: true,reference: '',trackingSubmodules: false]], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus.git"]]])
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [[$class: 'SubmoduleOption',disableSubmodules: false,parentCredentials: true,recursiveSubmodules: true,reference: '',trackingSubmodules: false]], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
dir ("cpp") {
|
||||
sh "git config --global user.email \"test@zilliz.com\""
|
||||
sh "git config --global user.name \"test\""
|
||||
|
@ -1,10 +1,12 @@
|
||||
container('milvus-build-env') {
|
||||
timeout(time: 20, unit: 'MINUTES') {
|
||||
timeout(time: 30, unit: 'MINUTES') {
|
||||
gitlabCommitStatus(name: 'Build Engine') {
|
||||
dir ("milvus_engine") {
|
||||
try {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [[$class: 'SubmoduleOption',disableSubmodules: false,parentCredentials: true,recursiveSubmodules: true,reference: '',trackingSubmodules: false]], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus.git"]]])
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [[$class: 'SubmoduleOption',disableSubmodules: false,parentCredentials: true,recursiveSubmodules: true,reference: '',trackingSubmodules: false]], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
dir ("cpp") {
|
||||
sh "git config --global user.email \"test@zilliz.com\""
|
||||
sh "git config --global user.name \"test\""
|
||||
sh "./build.sh -t ${params.BUILD_TYPE}"
|
||||
}
|
||||
} catch (exc) {
|
||||
|
@ -3,7 +3,7 @@ container('publish-docker') {
|
||||
gitlabCommitStatus(name: 'Publish Engine Docker') {
|
||||
try {
|
||||
dir ("${PROJECT_NAME}_build") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:build/milvus_build.git"]]])
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:build/milvus_build.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
dir ("docker/deploy/ubuntu16.04/free_version") {
|
||||
sh "curl -O -u anonymous: ftp://192.168.1.126/data/${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}/${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz"
|
||||
sh "tar zxvf ${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz"
|
||||
@ -12,7 +12,10 @@ container('publish-docker') {
|
||||
def customImage = docker.build("${PROJECT_NAME}/engine:${DOCKER_VERSION}")
|
||||
customImage.push()
|
||||
}
|
||||
echo "Docker Pull Command: docker pull registry.zilliz.com/${PROJECT_NAME}/engine:${DOCKER_VERSION}"
|
||||
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
updateGitlabCommitStatus name: 'Publish Engine Docker', state: 'success'
|
||||
echo "Docker Pull Command: docker pull registry.zilliz.com/${PROJECT_NAME}/engine:${DOCKER_VERSION}"
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Publish Engine Docker', state: 'canceled'
|
||||
throw exc
|
||||
@ -29,3 +32,4 @@ container('publish-docker') {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
14
ci/jenkinsfile/upload_dev_cluster_test_out.groovy
Normal file
14
ci/jenkinsfile/upload_dev_cluster_test_out.groovy
Normal file
@ -0,0 +1,14 @@
|
||||
timeout(time: 5, unit: 'MINUTES') {
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
if (fileExists('cluster_test_out')) {
|
||||
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
|
||||
fileTransfer.FileTransfer("cluster_test_out/", "${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}", 'nas storage')
|
||||
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
echo "Milvus Dev Test Out Viewer \"ftp://192.168.1.126/data/${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}\""
|
||||
}
|
||||
} else {
|
||||
error("Milvus Dev Test Out directory don't exists!")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,26 +1,13 @@
|
||||
container('milvus-testframework') {
|
||||
timeout(time: 5, unit: 'MINUTES') {
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
gitlabCommitStatus(name: 'Upload Dev Test Out') {
|
||||
if (fileExists('test_out')) {
|
||||
try {
|
||||
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
|
||||
fileTransfer.FileTransfer("test_out/", "${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}", 'nas storage')
|
||||
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
echo "Milvus Dev Test Out Viewer \"ftp://192.168.1.126/data/${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}\""
|
||||
}
|
||||
} catch (hudson.AbortException ae) {
|
||||
updateGitlabCommitStatus name: 'Upload Dev Test Out', state: 'canceled'
|
||||
currentBuild.result = 'ABORTED'
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Upload Dev Test Out', state: 'failed'
|
||||
currentBuild.result = 'FAILURE'
|
||||
}
|
||||
} else {
|
||||
updateGitlabCommitStatus name: 'Upload Dev Test Out', state: 'failed'
|
||||
echo "Milvus Dev Test Out directory don't exists!"
|
||||
}
|
||||
timeout(time: 5, unit: 'MINUTES') {
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
if (fileExists('test_out')) {
|
||||
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
|
||||
fileTransfer.FileTransfer("test_out/", "${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}", 'nas storage')
|
||||
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
echo "Milvus Dev Test Out Viewer \"ftp://192.168.1.126/data/${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}\""
|
||||
}
|
||||
} else {
|
||||
error("Milvus Dev Test Out directory don't exists!")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ pipeline {
|
||||
defaultContainer 'jnlp'
|
||||
containerTemplate {
|
||||
name 'milvus-build-env'
|
||||
image 'registry.zilliz.com/milvus/milvus-build-env:v0.10'
|
||||
image 'registry.zilliz.com/milvus/milvus-build-env:v0.12'
|
||||
ttyEnabled true
|
||||
command 'cat'
|
||||
}
|
||||
@ -130,97 +130,187 @@ spec:
|
||||
}
|
||||
|
||||
stage("Deploy to Development") {
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
parallel {
|
||||
stage("Single Node") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'jenkins-slave'
|
||||
label 'dev-test'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: test
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-testframework
|
||||
image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: kubeconf
|
||||
mountPath: /root/.kube/
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: kubeconf
|
||||
secret:
|
||||
secretName: test-cluster-config
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Deploy') {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy to Dev') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/deploy2dev.groovy"
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/deploy2dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Deloy to Dev', state: 'canceled'
|
||||
echo "Milvus Deloy to Dev aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Deloy to Dev', state: 'failed'
|
||||
echo "Milvus Deloy to Dev failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Dev Test") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'test'
|
||||
defaultContainer 'jnlp'
|
||||
containerTemplate {
|
||||
name 'milvus-testframework'
|
||||
image 'registry.zilliz.com/milvus/milvus-test:v0.1'
|
||||
ttyEnabled true
|
||||
command 'cat'
|
||||
}
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage('Test') {
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/dev_test.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_test_out.groovy"
|
||||
gitlabCommitStatus(name: 'Deloy Test') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/dev_test.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_test_out.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage ("Cleanup Dev") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'jenkins-slave'
|
||||
defaultContainer 'jnlp'
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage('Cleanup') {
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Cleanup Dev') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Cleanup Dev', state: 'canceled'
|
||||
echo "Milvus Cleanup Dev aborted !"
|
||||
always {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
success {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD success !"
|
||||
}
|
||||
}
|
||||
aborted {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Cleanup Dev', state: 'failed'
|
||||
echo "Milvus Cleanup Dev failure !"
|
||||
echo "Milvus Single Node CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Cluster") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'dev-test'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: test
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-testframework
|
||||
image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: kubeconf
|
||||
mountPath: /root/.kube/
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: kubeconf
|
||||
secret:
|
||||
secretName: test-cluster-config
|
||||
"""
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy to Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cluster_deploy2dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy Test') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cluster_dev_test.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_cluster_test_out.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Cleanup Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
success {
|
||||
script {
|
||||
echo "Milvus Cluster CI/CD success !"
|
||||
}
|
||||
}
|
||||
aborted {
|
||||
script {
|
||||
echo "Milvus Cluster CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
failure {
|
||||
script {
|
||||
echo "Milvus Cluster CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -234,6 +324,7 @@ spec:
|
||||
post {
|
||||
always {
|
||||
script {
|
||||
<<<<<<< HEAD
|
||||
if (!currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
// Send an email only if the build status has changed from green/unstable to red
|
||||
emailext subject: '$DEFAULT_SUBJECT',
|
||||
@ -244,6 +335,20 @@ spec:
|
||||
],
|
||||
replyTo: '$DEFAULT_REPLYTO',
|
||||
to: '$DEFAULT_RECIPIENTS'
|
||||
=======
|
||||
if (env.gitlabAfter != null) {
|
||||
if (!currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
// Send an email only if the build status has changed from green/unstable to red
|
||||
emailext subject: '$DEFAULT_SUBJECT',
|
||||
body: '$DEFAULT_CONTENT',
|
||||
recipientProviders: [
|
||||
[$class: 'DevelopersRecipientProvider'],
|
||||
[$class: 'RequesterRecipientProvider']
|
||||
],
|
||||
replyTo: '$DEFAULT_REPLYTO',
|
||||
to: '$DEFAULT_RECIPIENTS'
|
||||
}
|
||||
>>>>>>> branch-0.3.1
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -270,3 +375,4 @@ spec:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -35,7 +35,7 @@ pipeline {
|
||||
defaultContainer 'jnlp'
|
||||
containerTemplate {
|
||||
name 'milvus-build-env'
|
||||
image 'registry.zilliz.com/milvus/milvus-build-env:v0.10'
|
||||
image 'registry.zilliz.com/milvus/milvus-build-env:v0.12'
|
||||
ttyEnabled true
|
||||
command 'cat'
|
||||
}
|
||||
@ -130,97 +130,187 @@ spec:
|
||||
}
|
||||
|
||||
stage("Deploy to Development") {
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
parallel {
|
||||
stage("Single Node") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'jenkins-slave'
|
||||
label 'dev-test'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: test
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-testframework
|
||||
image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: kubeconf
|
||||
mountPath: /root/.kube/
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: kubeconf
|
||||
secret:
|
||||
secretName: test-cluster-config
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Deploy') {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy to Dev') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/deploy2dev.groovy"
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/deploy2dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Deloy to Dev', state: 'canceled'
|
||||
echo "Milvus Deloy to Dev aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Deloy to Dev', state: 'failed'
|
||||
echo "Milvus Deloy to Dev failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Dev Test") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'test'
|
||||
defaultContainer 'jnlp'
|
||||
containerTemplate {
|
||||
name 'milvus-testframework'
|
||||
image 'registry.zilliz.com/milvus/milvus-test:v0.1'
|
||||
ttyEnabled true
|
||||
command 'cat'
|
||||
}
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage('Test') {
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/dev_test.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_test_out.groovy"
|
||||
gitlabCommitStatus(name: 'Deloy Test') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/dev_test.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_test_out.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage ("Cleanup Dev") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'jenkins-slave'
|
||||
defaultContainer 'jnlp'
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage('Cleanup') {
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Cleanup Dev') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Cleanup Dev', state: 'canceled'
|
||||
echo "Milvus Cleanup Dev aborted !"
|
||||
always {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
success {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD success !"
|
||||
}
|
||||
}
|
||||
aborted {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Cleanup Dev', state: 'failed'
|
||||
echo "Milvus Cleanup Dev failure !"
|
||||
echo "Milvus Single Node CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Cluster") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'dev-test'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: test
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-testframework
|
||||
image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: kubeconf
|
||||
mountPath: /root/.kube/
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: kubeconf
|
||||
secret:
|
||||
secretName: test-cluster-config
|
||||
"""
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy to Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cluster_deploy2dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy Test') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cluster_dev_test.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_cluster_test_out.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Cleanup Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
success {
|
||||
script {
|
||||
echo "Milvus Cluster CI/CD success !"
|
||||
}
|
||||
}
|
||||
aborted {
|
||||
script {
|
||||
echo "Milvus Cluster CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
failure {
|
||||
script {
|
||||
echo "Milvus Cluster CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -232,6 +322,24 @@ spec:
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
script {
|
||||
if (env.gitlabAfter != null) {
|
||||
if (!currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
// Send an email only if the build status has changed from green/unstable to red
|
||||
emailext subject: '$DEFAULT_SUBJECT',
|
||||
body: '$DEFAULT_CONTENT',
|
||||
recipientProviders: [
|
||||
[$class: 'DevelopersRecipientProvider'],
|
||||
[$class: 'RequesterRecipientProvider']
|
||||
],
|
||||
replyTo: '$DEFAULT_REPLYTO',
|
||||
to: '$DEFAULT_RECIPIENTS'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
success {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'success'
|
||||
@ -254,3 +362,4 @@ spec:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -35,7 +35,11 @@ pipeline {
|
||||
defaultContainer 'jnlp'
|
||||
containerTemplate {
|
||||
name 'milvus-build-env'
|
||||
<<<<<<< HEAD
|
||||
image 'registry.zilliz.com/milvus/milvus-build-env:v0.10'
|
||||
=======
|
||||
image 'registry.zilliz.com/milvus/milvus-build-env:v0.12'
|
||||
>>>>>>> branch-0.3.1
|
||||
ttyEnabled true
|
||||
command 'cat'
|
||||
}
|
||||
@ -130,6 +134,7 @@ spec:
|
||||
}
|
||||
|
||||
stage("Deploy to Development") {
|
||||
<<<<<<< HEAD
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
agent {
|
||||
@ -144,12 +149,78 @@ spec:
|
||||
gitlabCommitStatus(name: 'Deloy to Dev') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/deploy2dev.groovy"
|
||||
=======
|
||||
parallel {
|
||||
stage("Single Node") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'dev-test'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: test
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-testframework
|
||||
image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: kubeconf
|
||||
mountPath: /root/.kube/
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: kubeconf
|
||||
secret:
|
||||
secretName: test-cluster-config
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy to Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/deploy2dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy Test') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/dev_test.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_test_out.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Cleanup Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
}
|
||||
>>>>>>> branch-0.3.1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
<<<<<<< HEAD
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Deloy to Dev', state: 'canceled'
|
||||
@ -161,11 +232,34 @@ spec:
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Deloy to Dev', state: 'failed'
|
||||
echo "Milvus Deloy to Dev failure !"
|
||||
=======
|
||||
always {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
success {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD success !"
|
||||
}
|
||||
}
|
||||
aborted {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
failure {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD failure !"
|
||||
>>>>>>> branch-0.3.1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
<<<<<<< HEAD
|
||||
stage("Dev Test") {
|
||||
agent {
|
||||
kubernetes {
|
||||
@ -204,12 +298,76 @@ spec:
|
||||
gitlabCommitStatus(name: 'Cleanup Dev') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
=======
|
||||
stage("Cluster") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'dev-test'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: test
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-testframework
|
||||
image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: kubeconf
|
||||
mountPath: /root/.kube/
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: kubeconf
|
||||
secret:
|
||||
secretName: test-cluster-config
|
||||
"""
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy to Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cluster_deploy2dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy Test') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cluster_dev_test.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_cluster_test_out.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Cleanup Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
|
||||
}
|
||||
>>>>>>> branch-0.3.1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
<<<<<<< HEAD
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Cleanup Dev', state: 'canceled'
|
||||
@ -221,6 +379,28 @@ spec:
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Cleanup Dev', state: 'failed'
|
||||
echo "Milvus Cleanup Dev failure !"
|
||||
=======
|
||||
always {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
success {
|
||||
script {
|
||||
echo "Milvus Cluster CI/CD success !"
|
||||
}
|
||||
}
|
||||
aborted {
|
||||
script {
|
||||
echo "Milvus Cluster CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
failure {
|
||||
script {
|
||||
echo "Milvus Cluster CI/CD failure !"
|
||||
>>>>>>> branch-0.3.1
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -232,6 +412,25 @@ spec:
|
||||
}
|
||||
|
||||
post {
|
||||
<<<<<<< HEAD
|
||||
=======
|
||||
always {
|
||||
script {
|
||||
if (!currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
// Send an email only if the build status has changed from green/unstable to red
|
||||
emailext subject: '$DEFAULT_SUBJECT',
|
||||
body: '$DEFAULT_CONTENT',
|
||||
recipientProviders: [
|
||||
[$class: 'DevelopersRecipientProvider'],
|
||||
[$class: 'RequesterRecipientProvider']
|
||||
],
|
||||
replyTo: '$DEFAULT_REPLYTO',
|
||||
to: '$DEFAULT_RECIPIENTS'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
>>>>>>> branch-0.3.1
|
||||
success {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'success'
|
||||
@ -254,3 +453,7 @@ spec:
|
||||
}
|
||||
}
|
||||
}
|
||||
<<<<<<< HEAD
|
||||
=======
|
||||
|
||||
>>>>>>> branch-0.3.1
|
||||
|
@ -9,18 +9,40 @@ Please mark all change in change log and use the ticket from JIRA.
|
||||
|
||||
- MS-148 - Disable cleanup if mode is read only
|
||||
- MS-149 - Fixed searching only one index file issue in distributed mode
|
||||
- MS-153 - fix c_str error when connecting to MySQL
|
||||
- MS-157 - fix changelog
|
||||
- MS-190 - use env variable to switch mem manager and fix cmake
|
||||
- MS-153 - Fix c_str error when connecting to MySQL
|
||||
- MS-157 - Fix changelog
|
||||
- MS-190 - Use env variable to switch mem manager and fix cmake
|
||||
- MS-217 - Fix SQ8 row count bug
|
||||
- MS-224 - Return AlreadyExist status in MySQLMetaImpl::CreateTable if table already exists
|
||||
- MS-232 - Add MySQLMetaImpl::UpdateTableFilesToIndex and set maximum_memory to default if config value = 0
|
||||
- MS-233 - Remove mem manager log
|
||||
- MS-230 - Change parameter name: Maximum_memory to insert_buffer_size
|
||||
- MS-234 - Some case cause background merge thread stop
|
||||
- MS-235 - Some test cases random fail
|
||||
- MS-236 - Add MySQLMetaImpl::HasNonIndexFiles
|
||||
- MS-257 - Update bzip2 download url
|
||||
|
||||
## Improvement
|
||||
- MS-156 - Add unittest for merge result functions
|
||||
|
||||
- MS-152 - Delete assert in MySQLMetaImpl and change MySQLConnectionPool impl
|
||||
- MS-204 - Support multi db_path
|
||||
- MS-206 - Support SQ8 index type
|
||||
- MS-208 - Add buildinde interface for C++ SDK
|
||||
- MS-212 - Support Inner product metric type
|
||||
- MS-241 - Build Faiss with MKL if using Intel CPU; else build with OpenBlas
|
||||
- MS-242 - Clean up cmake and change MAKE_BUILD_ARGS to be user defined variable
|
||||
- MS-245 - Improve search result transfer performance
|
||||
- MS-248 - Support AddVector/SearchVector profiling
|
||||
- MS-256 - Add more cache config
|
||||
- MS-260 - Refine log
|
||||
- MS-249 - Check machine hardware during initialize
|
||||
- MS-261 - Update faiss version to 1.5.3 and add BUILD_FAISS_WITH_MKL as an option
|
||||
- MS-278 - add IndexStatsHelper
|
||||
|
||||
## New Feature
|
||||
- MS-137 - Integrate knowhere
|
||||
- MS-180 - Add new mem manager
|
||||
- MS-195 - Add nlist and use_blas_threshold conf
|
||||
- MS-137 - Integrate knowhere
|
||||
|
||||
## Task
|
||||
|
||||
@ -75,6 +97,7 @@ Please mark all change in change log and use the ticket from JIRA.
|
||||
- MS-144 - Add nprobe config
|
||||
- MS-147 - Enable IVF
|
||||
|
||||
- MS-130 - Add prometheus_test
|
||||
## Task
|
||||
- MS-74 - Change README.md in cpp
|
||||
- MS-88 - Add support for arm architecture
|
||||
|
@ -116,6 +116,11 @@ set(MILVUS_ENGINE_SRC ${PROJECT_SOURCE_DIR}/src)
|
||||
|
||||
add_compile_definitions(PROFILER=${PROFILER})
|
||||
|
||||
message("MILVUS_ENABLE_PROFILING = ${MILVUS_ENABLE_PROFILING}")
|
||||
if (MILVUS_ENABLE_PROFILING STREQUAL "ON")
|
||||
ADD_DEFINITIONS(-DMILVUS_ENABLE_PROFILING)
|
||||
endif()
|
||||
|
||||
include_directories(${MILVUS_ENGINE_INCLUDE})
|
||||
include_directories(${MILVUS_ENGINE_SRC})
|
||||
|
||||
|
@ -1,12 +1,13 @@
|
||||
### Compilation
|
||||
#### Step 1: install necessery tools
|
||||
|
||||
|
||||
centos7 :
|
||||
yum install gfortran qt4 flex bison mysql-devel mysql
|
||||
|
||||
ubuntu16.04 :
|
||||
sudo apt-get install gfortran qt4-qmake flex bison libmysqlclient-dev mysql-client
|
||||
|
||||
cd scripts && sudo ./requirements.sh
|
||||
|
||||
If `libmysqlclient_r.so` does not exist after installing MySQL Development Files, you need to create a symbolic link:
|
||||
|
||||
|
16
cpp/build.sh
16
cpp/build.sh
@ -7,8 +7,10 @@ INSTALL_PREFIX=$(pwd)/milvus
|
||||
MAKE_CLEAN="OFF"
|
||||
BUILD_COVERAGE="OFF"
|
||||
DB_PATH="/opt/milvus"
|
||||
PROFILING="OFF"
|
||||
BUILD_FAISS_WITH_MKL="OFF"
|
||||
|
||||
while getopts "p:d:t:uhlrc" arg
|
||||
while getopts "p:d:t:uhlrcgm" arg
|
||||
do
|
||||
case $arg in
|
||||
t)
|
||||
@ -36,6 +38,12 @@ do
|
||||
c)
|
||||
BUILD_COVERAGE="ON"
|
||||
;;
|
||||
g)
|
||||
PROFILING="ON"
|
||||
;;
|
||||
m)
|
||||
BUILD_FAISS_WITH_MKL="ON"
|
||||
;;
|
||||
h) # help
|
||||
echo "
|
||||
|
||||
@ -47,9 +55,11 @@ parameter:
|
||||
-l: build license version(default: OFF)
|
||||
-r: remove previous build directory(default: OFF)
|
||||
-c: code coverage(default: OFF)
|
||||
-g: profiling(default: OFF)
|
||||
-m: build faiss with MKL(default: OFF)
|
||||
|
||||
usage:
|
||||
./build.sh -t \${BUILD_TYPE} [-u] [-h] [-g] [-r] [-c]
|
||||
./build.sh -t \${BUILD_TYPE} [-u] [-h] [-g] [-r] [-c] [-m]
|
||||
"
|
||||
exit 0
|
||||
;;
|
||||
@ -77,6 +87,8 @@ if [[ ${MAKE_CLEAN} == "ON" ]]; then
|
||||
-DCMAKE_LICENSE_CHECK=${LICENSE_CHECK} \
|
||||
-DBUILD_COVERAGE=${BUILD_COVERAGE} \
|
||||
-DMILVUS_DB_PATH=${DB_PATH} \
|
||||
-DMILVUS_ENABLE_PROFILING=${PROFILING} \
|
||||
-DBUILD_FAISS_WITH_MKL=${BUILD_FAISS_WITH_MKL} \
|
||||
$@ ../"
|
||||
echo ${CMAKE_CMD}
|
||||
|
||||
|
@ -57,8 +57,6 @@ define_option(MILVUS_VERBOSE_THIRDPARTY_BUILD
|
||||
|
||||
define_option(MILVUS_WITH_ARROW "Build with ARROW" OFF)
|
||||
|
||||
define_option(MILVUS_BOOST_USE_SHARED "Rely on boost shared libraries where relevant" OFF)
|
||||
|
||||
define_option(MILVUS_BOOST_VENDORED "Use vendored Boost instead of existing Boost. \
|
||||
Note that this requires linking Boost statically" ON)
|
||||
|
||||
@ -110,6 +108,11 @@ define_option(MILVUS_WITH_ZSTD "Build with zstd compression" ${MILVUS_WITH_ZSTD_
|
||||
|
||||
define_option(MILVUS_WITH_AWS "Build with AWS SDK" ON)
|
||||
|
||||
if (MILVUS_ENABLE_PROFILING STREQUAL "ON")
|
||||
define_option(MILVUS_WITH_LIBUNWIND "Build with libunwind" ON)
|
||||
define_option(MILVUS_WITH_GPERFTOOLS "Build with gperftools" ON)
|
||||
endif()
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
if(MSVC)
|
||||
set_option_category("MSVC")
|
||||
|
@ -5,7 +5,6 @@
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
@ -38,7 +37,9 @@ set(MILVUS_THIRDPARTY_DEPENDENCIES
|
||||
yaml-cpp
|
||||
ZLIB
|
||||
ZSTD
|
||||
AWS)
|
||||
AWS
|
||||
libunwind
|
||||
gperftools)
|
||||
|
||||
message(STATUS "Using ${MILVUS_DEPENDENCY_SOURCE} approach to find dependencies")
|
||||
|
||||
@ -92,6 +93,10 @@ macro(build_dependency DEPENDENCY_NAME)
|
||||
build_zstd()
|
||||
elseif("${DEPENDENCY_NAME}" STREQUAL "AWS")
|
||||
build_aws()
|
||||
elseif("${DEPENDENCY_NAME}" STREQUAL "libunwind")
|
||||
build_libunwind()
|
||||
elseif("${DEPENDENCY_NAME}" STREQUAL "gperftools")
|
||||
build_gperftools()
|
||||
else()
|
||||
message(FATAL_ERROR "Unknown thirdparty dependency to build: ${DEPENDENCY_NAME}")
|
||||
endif ()
|
||||
@ -99,12 +104,8 @@ endmacro()
|
||||
|
||||
macro(resolve_dependency DEPENDENCY_NAME)
|
||||
if (${DEPENDENCY_NAME}_SOURCE STREQUAL "AUTO")
|
||||
#message(STATUS "Finding ${DEPENDENCY_NAME} package")
|
||||
# find_package(${DEPENDENCY_NAME} QUIET)
|
||||
# if (NOT ${DEPENDENCY_NAME}_FOUND)
|
||||
#message(STATUS "${DEPENDENCY_NAME} package not found")
|
||||
#disable find_package for now
|
||||
build_dependency(${DEPENDENCY_NAME})
|
||||
# endif ()
|
||||
elseif (${DEPENDENCY_NAME}_SOURCE STREQUAL "BUNDLED")
|
||||
build_dependency(${DEPENDENCY_NAME})
|
||||
elseif (${DEPENDENCY_NAME}_SOURCE STREQUAL "SYSTEM")
|
||||
@ -120,11 +121,9 @@ string(TOUPPER ${CMAKE_BUILD_TYPE} UPPERCASE_BUILD_TYPE)
|
||||
set(EP_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${UPPERCASE_BUILD_TYPE}}")
|
||||
set(EP_C_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${UPPERCASE_BUILD_TYPE}}")
|
||||
|
||||
if(NOT MSVC)
|
||||
# Set -fPIC on all external projects
|
||||
set(EP_CXX_FLAGS "${EP_CXX_FLAGS} -fPIC")
|
||||
set(EP_C_FLAGS "${EP_C_FLAGS} -fPIC")
|
||||
endif()
|
||||
# Set -fPIC on all external projects
|
||||
set(EP_CXX_FLAGS "${EP_CXX_FLAGS} -fPIC")
|
||||
set(EP_C_FLAGS "${EP_C_FLAGS} -fPIC")
|
||||
|
||||
# CC/CXX environment variables are captured on the first invocation of the
|
||||
# builder (e.g make or ninja) instead of when CMake is invoked into to build
|
||||
@ -162,20 +161,13 @@ endif()
|
||||
|
||||
# Ensure that a default make is set
|
||||
if("${MAKE}" STREQUAL "")
|
||||
if(NOT MSVC)
|
||||
find_program(MAKE make)
|
||||
endif()
|
||||
find_program(MAKE make)
|
||||
endif()
|
||||
|
||||
set(MAKE_BUILD_ARGS "-j2")
|
||||
|
||||
## Using make -j in sub-make is fragile
|
||||
#if(${CMAKE_GENERATOR} MATCHES "Makefiles")
|
||||
# set(MAKE_BUILD_ARGS "")
|
||||
#else()
|
||||
# # limit the maximum number of jobs for ninja
|
||||
# set(MAKE_BUILD_ARGS "-j4")
|
||||
#endif()
|
||||
if (NOT DEFINED MAKE_BUILD_ARGS)
|
||||
set(MAKE_BUILD_ARGS "-j8")
|
||||
endif()
|
||||
message(STATUS "Third Party MAKE_BUILD_ARGS = ${MAKE_BUILD_ARGS}")
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Find pthreads
|
||||
@ -294,7 +286,6 @@ if (DEFINED ENV{MILVUS_PROMETHEUS_URL})
|
||||
set(PROMETHEUS_SOURCE_URL "$ENV{PROMETHEUS_OPENBLAS_URL}")
|
||||
else ()
|
||||
set(PROMETHEUS_SOURCE_URL
|
||||
#"https://github.com/JinHai-CN/prometheus-cpp/archive/${PROMETHEUS_VERSION}.tar.gz"
|
||||
https://github.com/jupp0r/prometheus-cpp.git)
|
||||
endif()
|
||||
|
||||
@ -356,6 +347,21 @@ if(DEFINED ENV{MILVUS_AWS_URL})
|
||||
else()
|
||||
set(AWS_SOURCE_URL "https://github.com/aws/aws-sdk-cpp/archive/${AWS_VERSION}.tar.gz")
|
||||
endif()
|
||||
|
||||
if(DEFINED ENV{MILVUS_LIBUNWIND_URL})
|
||||
set(LIBUNWIND_SOURCE_URL "$ENV{MILVUS_LIBUNWIND_URL}")
|
||||
else()
|
||||
set(LIBUNWIND_SOURCE_URL
|
||||
"https://github.com/libunwind/libunwind/releases/download/v${LIBUNWIND_VERSION}/libunwind-${LIBUNWIND_VERSION}.tar.gz")
|
||||
endif()
|
||||
|
||||
if(DEFINED ENV{MILVUS_GPERFTOOLS_URL})
|
||||
set(GPERFTOOLS_SOURCE_URL "$ENV{MILVUS_GPERFTOOLS_URL}")
|
||||
else()
|
||||
set(GPERFTOOLS_SOURCE_URL
|
||||
"https://github.com/gperftools/gperftools/releases/download/gperftools-${GPERFTOOLS_VERSION}/gperftools-${GPERFTOOLS_VERSION}.tar.gz")
|
||||
endif()
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# ARROW
|
||||
|
||||
@ -363,19 +369,13 @@ macro(build_arrow)
|
||||
message(STATUS "Building Apache ARROW-${ARROW_VERSION} from source")
|
||||
set(ARROW_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/arrow_ep-prefix/src/arrow_ep/cpp")
|
||||
set(ARROW_STATIC_LIB_NAME arrow)
|
||||
# set(ARROW_CUDA_STATIC_LIB_NAME arrow_cuda)
|
||||
|
||||
set(ARROW_STATIC_LIB
|
||||
"${ARROW_PREFIX}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${ARROW_STATIC_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}"
|
||||
)
|
||||
# set(ARROW_CUDA_STATIC_LIB
|
||||
# "${ARROW_PREFIX}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${ARROW_CUDA_STATIC_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}"
|
||||
# )
|
||||
set(ARROW_INCLUDE_DIR "${ARROW_PREFIX}/include")
|
||||
|
||||
set(ARROW_CMAKE_ARGS
|
||||
${EP_COMMON_CMAKE_ARGS}
|
||||
# "-DARROW_THRIFT_URL=${THRIFT_SOURCE_URL}"
|
||||
#"env ARROW_THRIFT_URL=${THRIFT_SOURCE_URL}"
|
||||
-DARROW_BUILD_STATIC=ON
|
||||
-DARROW_BUILD_SHARED=OFF
|
||||
-DARROW_PARQUET=ON
|
||||
@ -384,8 +384,6 @@ macro(build_arrow)
|
||||
"-DCMAKE_LIBRARY_PATH=${CUDA_TOOLKIT_ROOT_DIR}/lib64/stubs"
|
||||
-DCMAKE_BUILD_TYPE=Release)
|
||||
|
||||
# set($ENV{ARROW_THRIFT_URL} ${THRIFT_SOURCE_URL})
|
||||
|
||||
externalproject_add(arrow_ep
|
||||
GIT_REPOSITORY
|
||||
${ARROW_SOURCE_URL}
|
||||
@ -393,14 +391,8 @@ macro(build_arrow)
|
||||
${ARROW_VERSION}
|
||||
GIT_SHALLOW
|
||||
TRUE
|
||||
# SOURCE_DIR
|
||||
# ${ARROW_PREFIX}
|
||||
# BINARY_DIR
|
||||
# ${ARROW_PREFIX}
|
||||
SOURCE_SUBDIR
|
||||
cpp
|
||||
# COMMAND
|
||||
# "export \"ARROW_THRIFT_URL=${THRIFT_SOURCE_URL}\""
|
||||
${EP_LOG_OPTIONS}
|
||||
CMAKE_ARGS
|
||||
${ARROW_CMAKE_ARGS}
|
||||
@ -409,21 +401,16 @@ macro(build_arrow)
|
||||
${MAKE_BUILD_ARGS}
|
||||
INSTALL_COMMAND
|
||||
${MAKE} install
|
||||
# BUILD_IN_SOURCE
|
||||
# 1
|
||||
BUILD_BYPRODUCTS
|
||||
"${ARROW_STATIC_LIB}"
|
||||
# "${ARROW_CUDA_STATIC_LIB}"
|
||||
)
|
||||
|
||||
# ExternalProject_Add_StepDependencies(arrow_ep build thrift_ep)
|
||||
|
||||
file(MAKE_DIRECTORY "${ARROW_PREFIX}/include")
|
||||
add_library(arrow STATIC IMPORTED)
|
||||
set_target_properties(arrow
|
||||
PROPERTIES IMPORTED_LOCATION "${ARROW_STATIC_LIB}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${ARROW_INCLUDE_DIR}")
|
||||
# INTERFACE_LINK_LIBRARIES thrift)
|
||||
|
||||
add_dependencies(arrow arrow_ep)
|
||||
|
||||
set(JEMALLOC_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/arrow_ep-prefix/src/arrow_ep-build/jemalloc_ep-prefix/src/jemalloc_ep")
|
||||
@ -447,9 +434,6 @@ endif()
|
||||
# Add Boost dependencies (code adapted from Apache Kudu (incubating))
|
||||
|
||||
set(Boost_USE_MULTITHREADED ON)
|
||||
if(MSVC AND MILVUS_USE_STATIC_CRT)
|
||||
set(Boost_USE_STATIC_RUNTIME ON)
|
||||
endif()
|
||||
set(Boost_ADDITIONAL_VERSIONS
|
||||
"1.70.0"
|
||||
"1.70"
|
||||
@ -539,59 +523,8 @@ if(MILVUS_BOOST_VENDORED)
|
||||
add_dependencies(boost_filesystem_static boost_ep)
|
||||
add_dependencies(boost_serialization_static boost_ep)
|
||||
|
||||
else()
|
||||
if(MSVC)
|
||||
# disable autolinking in boost
|
||||
add_definitions(-DBOOST_ALL_NO_LIB)
|
||||
endif()
|
||||
|
||||
# if(DEFINED ENV{BOOST_ROOT} OR DEFINED BOOST_ROOT)
|
||||
# # In older versions of CMake (such as 3.2), the system paths for Boost will
|
||||
# # be looked in first even if we set $BOOST_ROOT or pass -DBOOST_ROOT
|
||||
# set(Boost_NO_SYSTEM_PATHS ON)
|
||||
# endif()
|
||||
|
||||
if(MILVUS_BOOST_USE_SHARED)
|
||||
# Find shared Boost libraries.
|
||||
set(Boost_USE_STATIC_LIBS OFF)
|
||||
set(BUILD_SHARED_LIBS_KEEP ${BUILD_SHARED_LIBS})
|
||||
set(BUILD_SHARED_LIBS ON)
|
||||
|
||||
if(MSVC)
|
||||
# force all boost libraries to dynamic link
|
||||
add_definitions(-DBOOST_ALL_DYN_LINK)
|
||||
endif()
|
||||
|
||||
if(MILVUS_BOOST_HEADER_ONLY)
|
||||
find_package(Boost REQUIRED)
|
||||
else()
|
||||
find_package(Boost COMPONENTS serialization system filesystem REQUIRED)
|
||||
set(BOOST_SYSTEM_LIBRARY Boost::system)
|
||||
set(BOOST_FILESYSTEM_LIBRARY Boost::filesystem)
|
||||
set(BOOST_SERIALIZATION_LIBRARY Boost::serialization)
|
||||
set(MILVUS_BOOST_LIBS ${BOOST_SYSTEM_LIBRARY} ${BOOST_FILESYSTEM_LIBRARY})
|
||||
endif()
|
||||
set(BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS_KEEP})
|
||||
unset(BUILD_SHARED_LIBS_KEEP)
|
||||
else()
|
||||
# Find static boost headers and libs
|
||||
# TODO Differentiate here between release and debug builds
|
||||
set(Boost_USE_STATIC_LIBS ON)
|
||||
if(MILVUS_BOOST_HEADER_ONLY)
|
||||
find_package(Boost REQUIRED)
|
||||
else()
|
||||
find_package(Boost COMPONENTS serialization system filesystem REQUIRED)
|
||||
set(BOOST_SYSTEM_LIBRARY Boost::system)
|
||||
set(BOOST_FILESYSTEM_LIBRARY Boost::filesystem)
|
||||
set(BOOST_SERIALIZATION_LIBRARY Boost::serialization)
|
||||
set(MILVUS_BOOST_LIBS ${BOOST_SYSTEM_LIBRARY} ${BOOST_FILESYSTEM_LIBRARY})
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
#message(STATUS "Boost include dir: " ${Boost_INCLUDE_DIR})
|
||||
#message(STATUS "Boost libraries: " ${Boost_LIBRARIES})
|
||||
|
||||
include_directories(SYSTEM ${Boost_INCLUDE_DIR})
|
||||
link_directories(SYSTEM ${BOOST_LIB_DIR})
|
||||
|
||||
@ -784,13 +717,6 @@ macro(build_openblas)
|
||||
add_dependencies(openblas openblas_ep)
|
||||
endmacro()
|
||||
|
||||
#if(MILVUS_WITH_OPENBLAS)
|
||||
# resolve_dependency(OpenBLAS)
|
||||
#
|
||||
# get_target_property(OPENBLAS_INCLUDE_DIR openblas INTERFACE_INCLUDE_DIRECTORIES)
|
||||
# include_directories(SYSTEM "${OPENBLAS_INCLUDE_DIR}")
|
||||
#endif()
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# LAPACK
|
||||
|
||||
@ -828,16 +754,25 @@ macro(build_lapack)
|
||||
add_dependencies(lapack lapack_ep)
|
||||
endmacro()
|
||||
|
||||
#if(MILVUS_WITH_LAPACK)
|
||||
# resolve_dependency(LAPACK)
|
||||
#
|
||||
# get_target_property(LAPACK_INCLUDE_DIR lapack INTERFACE_INCLUDE_DIRECTORIES)
|
||||
# include_directories(SYSTEM "${LAPACK_INCLUDE_DIR}")
|
||||
#endif()
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# FAISS
|
||||
|
||||
if(NOT DEFINED BUILD_FAISS_WITH_MKL)
|
||||
set(BUILD_FAISS_WITH_MKL OFF)
|
||||
endif()
|
||||
|
||||
if(EXISTS "/proc/cpuinfo")
|
||||
FILE(READ /proc/cpuinfo PROC_CPUINFO)
|
||||
|
||||
SET(VENDOR_ID_RX "vendor_id[ \t]*:[ \t]*([a-zA-Z]+)\n")
|
||||
STRING(REGEX MATCH "${VENDOR_ID_RX}" VENDOR_ID "${PROC_CPUINFO}")
|
||||
STRING(REGEX REPLACE "${VENDOR_ID_RX}" "\\1" VENDOR_ID "${VENDOR_ID}")
|
||||
|
||||
if(NOT ${VENDOR_ID} STREQUAL "GenuineIntel")
|
||||
set(BUILD_FAISS_WITH_MKL OFF)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
macro(build_faiss)
|
||||
message(STATUS "Building FAISS-${FAISS_VERSION} from source")
|
||||
set(FAISS_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/faiss_ep-prefix/src/faiss_ep")
|
||||
@ -845,37 +780,37 @@ macro(build_faiss)
|
||||
set(FAISS_STATIC_LIB
|
||||
"${FAISS_PREFIX}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}faiss${CMAKE_STATIC_LIBRARY_SUFFIX}")
|
||||
|
||||
# add_custom_target(faiss_dependencies)
|
||||
# add_dependencies(faiss_dependencies openblas_ep)
|
||||
# add_dependencies(faiss_dependencies openblas)
|
||||
# get_target_property(FAISS_OPENBLAS_LIB_DIR openblas IMPORTED_LOCATION)
|
||||
# get_filename_component(FAISS_OPENBLAS_LIB "${FAISS_OPENBLAS_LIB_DIR}" DIRECTORY)
|
||||
|
||||
set(FAISS_CONFIGURE_ARGS
|
||||
"--prefix=${FAISS_PREFIX}"
|
||||
"CFLAGS=${EP_C_FLAGS}"
|
||||
"CXXFLAGS=${EP_CXX_FLAGS}"
|
||||
"LDFLAGS=-L${OPENBLAS_PREFIX}/lib -L${LAPACK_PREFIX}/lib -lopenblas -llapack"
|
||||
--without-python)
|
||||
|
||||
# if(OPENBLAS_STATIC_LIB)
|
||||
# set(OPENBLAS_LIBRARY ${OPENBLAS_STATIC_LIB})
|
||||
# else()
|
||||
# set(OPENBLAS_LIBRARY ${OPENBLAS_SHARED_LIB})
|
||||
# endif()
|
||||
# set(FAISS_DEPENDENCIES ${FAISS_DEPENDENCIES} ${OPENBLAS_LIBRARY})
|
||||
set(FAISS_CFLAGS ${EP_C_FLAGS})
|
||||
set(FAISS_CXXFLAGS ${EP_CXX_FLAGS})
|
||||
|
||||
if(${BUILD_FAISS_WITH_MKL} STREQUAL "ON")
|
||||
message(STATUS "Build Faiss with MKL")
|
||||
if(NOT DEFINED MKL_LIB_PATH)
|
||||
set(MKL_LIB_PATH "/opt/intel/compilers_and_libraries_${MKL_VERSION}/linux/mkl/lib/intel64")
|
||||
message(STATUS "MKL_LIB_PATH = ${MKL_LIB_PATH}")
|
||||
endif()
|
||||
|
||||
set(FAISS_CONFIGURE_ARGS ${FAISS_CONFIGURE_ARGS}
|
||||
"CPPFLAGS=-DFINTEGER=long -DMKL_ILP64 -m64 -I${MKL_LIB_PATH}/../../include"
|
||||
"LDFLAGS=-L${MKL_LIB_PATH}"
|
||||
"LIBS=-Wl,--start-group ${MKL_LIB_PATH}/libmkl_intel_ilp64.a ${MKL_LIB_PATH}/libmkl_gnu_thread.a ${MKL_LIB_PATH}/libmkl_core.a -Wl,--end-group -lgomp -lpthread -lm -ldl")
|
||||
|
||||
else()
|
||||
message(STATUS "Build Faiss with OpenBlas/LAPACK")
|
||||
set(FAISS_CONFIGURE_ARGS ${FAISS_CONFIGURE_ARGS}
|
||||
"LDFLAGS=-L${OPENBLAS_PREFIX}/lib -L${LAPACK_PREFIX}/lib")
|
||||
endif()
|
||||
|
||||
if(${MILVUS_WITH_FAISS_GPU_VERSION} STREQUAL "ON")
|
||||
set(FAISS_CONFIGURE_ARGS ${FAISS_CONFIGURE_ARGS}
|
||||
"--with-cuda=${CUDA_TOOLKIT_ROOT_DIR}"
|
||||
# "with_cuda_arch=\"-gencode=arch=compute_35,code=compute_35 \\
|
||||
# -gencode=arch=compute_52,code=compute_52 \\
|
||||
# -gencode=arch=compute_60,code=compute_60 \\
|
||||
# -gencode=arch=compute_61,code=compute_61\""
|
||||
"--with-cuda-arch=\"-gencode=arch=compute_35,code=compute_35\""
|
||||
"--with-cuda-arch=\"-gencode=arch=compute_52,code=compute_52\""
|
||||
"--with-cuda-arch=\"-gencode=arch=compute_60,code=compute_60\""
|
||||
"--with-cuda-arch=\"-gencode=arch=compute_61,code=compute_61\""
|
||||
"--with-cuda-arch=-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_52,code=sm_52 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_61,code=sm_61 -gencode=arch=compute_75,code=sm_75"
|
||||
)
|
||||
else()
|
||||
set(FAISS_CONFIGURE_ARGS ${FAISS_CONFIGURE_ARGS} --without-cuda)
|
||||
@ -888,66 +823,67 @@ macro(build_faiss)
|
||||
CONFIGURE_COMMAND
|
||||
"./configure"
|
||||
${FAISS_CONFIGURE_ARGS}
|
||||
# BINARY_DIR
|
||||
# ${FAISS_PREFIX}
|
||||
# INSTALL_DIR
|
||||
# ${FAISS_PREFIX}
|
||||
# BUILD_COMMAND
|
||||
# ${MAKE} ${MAKE_BUILD_ARGS}
|
||||
BUILD_COMMAND
|
||||
${MAKE} ${MAKE_BUILD_ARGS} all
|
||||
COMMAND
|
||||
cd gpu && ${MAKE} ${MAKE_BUILD_ARGS}
|
||||
${MAKE} ${MAKE_BUILD_ARGS} VERBOSE=1
|
||||
BUILD_IN_SOURCE
|
||||
1
|
||||
# INSTALL_DIR
|
||||
# ${FAISS_PREFIX}
|
||||
INSTALL_COMMAND
|
||||
${MAKE} install
|
||||
COMMAND
|
||||
ln -s faiss_ep ../faiss
|
||||
BUILD_BYPRODUCTS
|
||||
${FAISS_STATIC_LIB})
|
||||
# DEPENDS
|
||||
# ${faiss_dependencies})
|
||||
|
||||
ExternalProject_Add_StepDependencies(faiss_ep build openblas_ep lapack_ep)
|
||||
|
||||
if(${BUILD_FAISS_WITH_MKL} STREQUAL "OFF")
|
||||
ExternalProject_Add_StepDependencies(faiss_ep build openblas_ep lapack_ep)
|
||||
endif()
|
||||
|
||||
file(MAKE_DIRECTORY "${FAISS_INCLUDE_DIR}")
|
||||
add_library(faiss STATIC IMPORTED)
|
||||
set_target_properties(
|
||||
faiss
|
||||
PROPERTIES IMPORTED_LOCATION "${FAISS_STATIC_LIB}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${FAISS_INCLUDE_DIR}"
|
||||
INTERFACE_LINK_LIBRARIES "openblas;lapack" )
|
||||
add_library(faiss SHARED IMPORTED)
|
||||
|
||||
if(${BUILD_FAISS_WITH_MKL} STREQUAL "ON")
|
||||
set(MKL_LIBS ${MKL_LIB_PATH}/libmkl_intel_ilp64.a
|
||||
${MKL_LIB_PATH}/libmkl_gnu_thread.a
|
||||
${MKL_LIB_PATH}/libmkl_core.a)
|
||||
|
||||
set_target_properties(
|
||||
faiss
|
||||
PROPERTIES IMPORTED_LOCATION "${FAISS_STATIC_LIB}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${FAISS_INCLUDE_DIR}"
|
||||
INTERFACE_LINK_LIBRARIES "${MKL_LIBS}" )
|
||||
else()
|
||||
set_target_properties(
|
||||
faiss
|
||||
PROPERTIES IMPORTED_LOCATION "${FAISS_STATIC_LIB}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${FAISS_INCLUDE_DIR}"
|
||||
INTERFACE_LINK_LIBRARIES "openblas;lapack" )
|
||||
endif()
|
||||
|
||||
add_dependencies(faiss faiss_ep)
|
||||
#add_dependencies(faiss openblas_ep)
|
||||
#add_dependencies(faiss lapack_ep)
|
||||
#target_link_libraries(faiss ${OPENBLAS_PREFIX}/lib)
|
||||
#target_link_libraries(faiss ${LAPACK_PREFIX}/lib)
|
||||
|
||||
if(${BUILD_FAISS_WITH_MKL} STREQUAL "OFF")
|
||||
add_dependencies(faiss openblas_ep)
|
||||
add_dependencies(faiss lapack_ep)
|
||||
endif()
|
||||
|
||||
endmacro()
|
||||
|
||||
if(MILVUS_WITH_FAISS)
|
||||
|
||||
resolve_dependency(OpenBLAS)
|
||||
get_target_property(OPENBLAS_INCLUDE_DIR openblas INTERFACE_INCLUDE_DIRECTORIES)
|
||||
include_directories(SYSTEM "${OPENBLAS_INCLUDE_DIR}")
|
||||
link_directories(SYSTEM ${OPENBLAS_PREFIX}/lib)
|
||||
if(${BUILD_FAISS_WITH_MKL} STREQUAL "OFF")
|
||||
resolve_dependency(OpenBLAS)
|
||||
get_target_property(OPENBLAS_INCLUDE_DIR openblas INTERFACE_INCLUDE_DIRECTORIES)
|
||||
include_directories(SYSTEM "${OPENBLAS_INCLUDE_DIR}")
|
||||
link_directories(SYSTEM ${OPENBLAS_PREFIX}/lib)
|
||||
|
||||
resolve_dependency(LAPACK)
|
||||
get_target_property(LAPACK_INCLUDE_DIR lapack INTERFACE_INCLUDE_DIRECTORIES)
|
||||
include_directories(SYSTEM "${LAPACK_INCLUDE_DIR}")
|
||||
link_directories(SYSTEM "${LAPACK_PREFIX}/lib")
|
||||
resolve_dependency(LAPACK)
|
||||
get_target_property(LAPACK_INCLUDE_DIR lapack INTERFACE_INCLUDE_DIRECTORIES)
|
||||
include_directories(SYSTEM "${LAPACK_INCLUDE_DIR}")
|
||||
link_directories(SYSTEM "${LAPACK_PREFIX}/lib")
|
||||
endif()
|
||||
|
||||
resolve_dependency(FAISS)
|
||||
get_target_property(FAISS_INCLUDE_DIR faiss INTERFACE_INCLUDE_DIRECTORIES)
|
||||
include_directories(SYSTEM "${FAISS_INCLUDE_DIR}")
|
||||
include_directories(SYSTEM "${CMAKE_CURRENT_BINARY_DIR}/faiss_ep-prefix/src/")
|
||||
link_directories(SYSTEM ${FAISS_PREFIX}/)
|
||||
link_directories(SYSTEM ${FAISS_PREFIX}/lib/)
|
||||
link_directories(SYSTEM ${FAISS_PREFIX}/gpu/)
|
||||
endif()
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
@ -984,8 +920,6 @@ macro(build_gtest)
|
||||
set(GMOCK_STATIC_LIB
|
||||
"${GTEST_PREFIX}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}gmock${CMAKE_STATIC_LIBRARY_SUFFIX}"
|
||||
)
|
||||
|
||||
|
||||
ExternalProject_Add(googletest_ep
|
||||
URL
|
||||
${GTEST_SOURCE_URL}
|
||||
@ -1025,13 +959,11 @@ macro(build_gtest)
|
||||
endmacro()
|
||||
|
||||
if (MILVUS_BUILD_TESTS)
|
||||
#message(STATUS "Resolving gtest dependency")
|
||||
resolve_dependency(GTest)
|
||||
|
||||
if(NOT GTEST_VENDORED)
|
||||
endif()
|
||||
|
||||
# TODO: Don't use global includes but rather target_include_directories
|
||||
|
||||
get_target_property(GTEST_INCLUDE_DIR gtest INTERFACE_INCLUDE_DIRECTORIES)
|
||||
link_directories(SYSTEM "${GTEST_PREFIX}/lib")
|
||||
include_directories(SYSTEM ${GTEST_INCLUDE_DIR})
|
||||
@ -1069,32 +1001,8 @@ macro(build_lz4)
|
||||
set(LZ4_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/lz4_ep-prefix/src/lz4_ep")
|
||||
set(LZ4_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/lz4_ep-prefix/")
|
||||
|
||||
if(MSVC)
|
||||
if(MILVUS_USE_STATIC_CRT)
|
||||
if(${UPPERCASE_BUILD_TYPE} STREQUAL "DEBUG")
|
||||
set(LZ4_RUNTIME_LIBRARY_LINKAGE "/p:RuntimeLibrary=MultiThreadedDebug")
|
||||
else()
|
||||
set(LZ4_RUNTIME_LIBRARY_LINKAGE "/p:RuntimeLibrary=MultiThreaded")
|
||||
endif()
|
||||
endif()
|
||||
set(LZ4_STATIC_LIB
|
||||
"${LZ4_BUILD_DIR}/visual/VS2010/bin/x64_${CMAKE_BUILD_TYPE}/liblz4_static.lib")
|
||||
set(LZ4_BUILD_COMMAND
|
||||
BUILD_COMMAND
|
||||
msbuild.exe
|
||||
/m
|
||||
/p:Configuration=${CMAKE_BUILD_TYPE}
|
||||
/p:Platform=x64
|
||||
/p:PlatformToolset=v140
|
||||
${LZ4_RUNTIME_LIBRARY_LINKAGE}
|
||||
/t:Build
|
||||
${LZ4_BUILD_DIR}/visual/VS2010/lz4.sln)
|
||||
else()
|
||||
set(LZ4_STATIC_LIB "${LZ4_BUILD_DIR}/lib/liblz4.a")
|
||||
#set(LZ4_BUILD_COMMAND BUILD_COMMAND ${CMAKE_SOURCE_DIR}/build-support/build-lz4-lib.sh
|
||||
# "AR=${CMAKE_AR}")
|
||||
set(LZ4_BUILD_COMMAND BUILD_COMMAND ${MAKE} ${MAKE_BUILD_ARGS} CFLAGS=${EP_C_FLAGS})
|
||||
endif()
|
||||
set(LZ4_STATIC_LIB "${LZ4_BUILD_DIR}/lib/liblz4.a")
|
||||
set(LZ4_BUILD_COMMAND BUILD_COMMAND ${MAKE} ${MAKE_BUILD_ARGS} CFLAGS=${EP_C_FLAGS})
|
||||
|
||||
# We need to copy the header in lib to directory outside of the build
|
||||
externalproject_add(lz4_ep
|
||||
@ -1129,7 +1037,6 @@ endmacro()
|
||||
if(MILVUS_WITH_LZ4)
|
||||
resolve_dependency(Lz4)
|
||||
|
||||
# TODO: Don't use global includes but rather target_include_directories
|
||||
get_target_property(LZ4_INCLUDE_DIR lz4 INTERFACE_INCLUDE_DIRECTORIES)
|
||||
link_directories(SYSTEM ${LZ4_BUILD_DIR}/lib/)
|
||||
include_directories(SYSTEM ${LZ4_INCLUDE_DIR})
|
||||
@ -1155,16 +1062,8 @@ macro(build_mysqlpp)
|
||||
externalproject_add(mysqlpp_ep
|
||||
URL
|
||||
${MYSQLPP_SOURCE_URL}
|
||||
# GIT_REPOSITORY
|
||||
# ${MYSQLPP_SOURCE_URL}
|
||||
# GIT_TAG
|
||||
# ${MYSQLPP_VERSION}
|
||||
# GIT_SHALLOW
|
||||
# TRUE
|
||||
${EP_LOG_OPTIONS}
|
||||
CONFIGURE_COMMAND
|
||||
# "./bootstrap"
|
||||
# COMMAND
|
||||
"./configure"
|
||||
${MYSQLPP_CONFIGURE_ARGS}
|
||||
BUILD_COMMAND
|
||||
@ -1225,10 +1124,6 @@ macro(build_prometheus)
|
||||
${PROMETHEUS_VERSION}
|
||||
GIT_SHALLOW
|
||||
TRUE
|
||||
# GIT_CONFIG
|
||||
# recurse-submodules=true
|
||||
# URL
|
||||
# ${PROMETHEUS_SOURCE_URL}
|
||||
${EP_LOG_OPTIONS}
|
||||
CMAKE_ARGS
|
||||
${PROMETHEUS_CMAKE_ARGS}
|
||||
@ -1272,21 +1167,15 @@ if(MILVUS_WITH_PROMETHEUS)
|
||||
|
||||
resolve_dependency(Prometheus)
|
||||
|
||||
# TODO: Don't use global includes but rather target_include_directories
|
||||
#get_target_property(PROMETHEUS-core_INCLUDE_DIRS prometheus-core INTERFACE_INCLUDE_DIRECTORIES)
|
||||
|
||||
#get_target_property(PROMETHEUS_PUSH_INCLUDE_DIRS prometheus_push INTERFACE_INCLUDE_DIRECTORIES)
|
||||
link_directories(SYSTEM ${PROMETHEUS_PREFIX}/push/)
|
||||
include_directories(SYSTEM ${PROMETHEUS_PREFIX}/push/include)
|
||||
|
||||
#get_target_property(PROMETHEUS_PULL_INCLUDE_DIRS prometheus_pull INTERFACE_INCLUDE_DIRECTORIES)
|
||||
link_directories(SYSTEM ${PROMETHEUS_PREFIX}/pull/)
|
||||
include_directories(SYSTEM ${PROMETHEUS_PREFIX}/pull/include)
|
||||
|
||||
link_directories(SYSTEM ${PROMETHEUS_PREFIX}/core/)
|
||||
include_directories(SYSTEM ${PROMETHEUS_PREFIX}/core/include)
|
||||
|
||||
#link_directories(${PROMETHEUS_PREFIX}/civetweb_ep-prefix/src/civetweb_ep)
|
||||
endif()
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
@ -1334,8 +1223,6 @@ if(MILVUS_WITH_ROCKSDB)
|
||||
|
||||
resolve_dependency(RocksDB)
|
||||
|
||||
# TODO: Don't use global includes but rather target_include_directories
|
||||
# get_target_property(ROCKSDB_INCLUDE_DIRS rocksdb INTERFACE_INCLUDE_DIRECTORIES)
|
||||
link_directories(SYSTEM ${ROCKSDB_PREFIX}/lib/lib/)
|
||||
include_directories(SYSTEM ${ROCKSDB_INCLUDE_DIRS})
|
||||
endif()
|
||||
@ -1384,34 +1271,9 @@ macro(build_snappy)
|
||||
endmacro()
|
||||
|
||||
if(MILVUS_WITH_SNAPPY)
|
||||
# if(Snappy_SOURCE STREQUAL "AUTO")
|
||||
# # Normally *Config.cmake files reside in /usr/lib/cmake but Snappy
|
||||
# # errornously places them in ${CMAKE_ROOT}/Modules/
|
||||
# # This is fixed in 1.1.7 but fedora (30) still installs into the wrong
|
||||
# # location.
|
||||
# # https://bugzilla.redhat.com/show_bug.cgi?id=1679727
|
||||
# # https://src.fedoraproject.org/rpms/snappy/pull-request/1
|
||||
# find_package(Snappy QUIET HINTS "${CMAKE_ROOT}/Modules/")
|
||||
# if(NOT Snappy_FOUND)
|
||||
# find_package(SnappyAlt)
|
||||
# endif()
|
||||
# if(NOT Snappy_FOUND AND NOT SnappyAlt_FOUND)
|
||||
# build_snappy()
|
||||
# endif()
|
||||
# elseif(Snappy_SOURCE STREQUAL "BUNDLED")
|
||||
# build_snappy()
|
||||
# elseif(Snappy_SOURCE STREQUAL "SYSTEM")
|
||||
# # SnappyConfig.cmake is not installed on Ubuntu/Debian
|
||||
# # TODO: Make a bug report upstream
|
||||
# find_package(Snappy HINTS "${CMAKE_ROOT}/Modules/")
|
||||
# if(NOT Snappy_FOUND)
|
||||
# find_package(SnappyAlt REQUIRED)
|
||||
# endif()
|
||||
# endif()
|
||||
|
||||
resolve_dependency(Snappy)
|
||||
|
||||
# TODO: Don't use global includes but rather target_include_directories
|
||||
|
||||
get_target_property(SNAPPY_INCLUDE_DIRS snappy INTERFACE_INCLUDE_DIRECTORIES)
|
||||
link_directories(SYSTEM ${SNAPPY_PREFIX}/lib/)
|
||||
include_directories(SYSTEM ${SNAPPY_INCLUDE_DIRS})
|
||||
@ -1483,75 +1345,11 @@ macro(build_sqlite_orm)
|
||||
|
||||
endif ()
|
||||
|
||||
#set(SQLITE_ORM_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/sqlite_orm_ep-prefix/src/sqlite_orm_ep")
|
||||
#set(SQLITE_ORM_INCLUDE_DIR "${SQLITE_ORM_PREFIX}/include/sqlite_orm")
|
||||
|
||||
# set(SQLITE_ORM_STATIC_LIB
|
||||
# "${SQLITE_ORM_PREFIX}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}sqlite_orm${CMAKE_STATIC_LIBRARY_SUFFIX}")
|
||||
#
|
||||
# set(SQLITE_ORM_CMAKE_CXX_FLAGS "${EP_CXX_FLAGS} -std=c++14")
|
||||
# set(SQLITE_ORM_CMAKE_CXX_FLAGS_DEBUG "${EP_CXX_FLAGS} -std=c++14")
|
||||
#
|
||||
# set(SQLITE_ORM_CMAKE_ARGS
|
||||
# ${EP_COMMON_CMAKE_ARGS}
|
||||
# "-DCMAKE_INSTALL_PREFIX=${SQLITE_ORM_PREFIX}"
|
||||
# #"LDFLAGS=-L${SQLITE_PREFIX}"
|
||||
# #"-DCMAKE_PREFIX_PATH=${SQLITE_PREFIX}/include"
|
||||
# "-DCMAKE_INCLUDE_PATH=${SQLITE_PREFIX}/include"
|
||||
# "-DCMAKE_CXX_FLAGS=${SQLITE_ORM_CMAKE_CXX_FLAGS}"
|
||||
# "-DCMAKE_CXX_FLAGS_DEBUG=${SQLITE_ORM_CMAKE_CXX_FLAGS}"
|
||||
# -DSqliteOrm_BuildTests=off
|
||||
# -DBUILD_TESTING=off)
|
||||
# message(STATUS "SQLITE_INCLUDE: ${SQLITE_ORM_CMAKE_ARGS}")
|
||||
#
|
||||
# message(STATUS "SQLITE_ORM_CMAKE_CXX_FLAGS: ${SQLITE_ORM_CMAKE_CXX_FLAGS}")
|
||||
|
||||
# externalproject_add(sqlite_orm_ep
|
||||
# URL
|
||||
# ${SQLITE_ORM_SOURCE_URL}
|
||||
# PREFIX ${CMAKE_CURRENT_BINARY_DIR}/sqlite_orm_ep-prefix
|
||||
# CONFIGURE_COMMAND
|
||||
# ""
|
||||
# BUILD_COMMAND
|
||||
# ""
|
||||
# INSTALL_COMMAND
|
||||
# ""
|
||||
#${EP_LOG_OPTIONS}
|
||||
#${EP_LOG_OPTIONS}
|
||||
# CMAKE_ARGS
|
||||
# ${SQLITE_ORM_CMAKE_ARGS}
|
||||
# BUILD_COMMAND
|
||||
# ${MAKE}
|
||||
# ${MAKE_BUILD_ARGS}
|
||||
# #"LDFLAGS=-L${SQLITE_PREFIX}"
|
||||
# BUILD_IN_SOURCE
|
||||
# 1
|
||||
# BUILD_BYPRODUCTS
|
||||
# "${SQLITE_ORM_STATIC_LIB}"
|
||||
# )
|
||||
# ExternalProject_Add_StepDependencies(sqlite_orm_ep build sqlite_ep)
|
||||
|
||||
#set(SQLITE_ORM_SQLITE_HEADER ${SQLITE_INCLUDE_DIR}/sqlite3.h)
|
||||
# file(MAKE_DIRECTORY "${SQLITE_ORM_INCLUDE_DIR}")
|
||||
# add_library(sqlite_orm STATIC IMPORTED)
|
||||
## message(STATUS "SQLITE_INCLUDE_DIR: ${SQLITE_INCLUDE_DIR}")
|
||||
# set_target_properties(
|
||||
# sqlite_orm
|
||||
# PROPERTIES
|
||||
# IMPORTED_LOCATION "${SQLITE_ORM_STATIC_LIB}"
|
||||
# INTERFACE_INCLUDE_DIRECTORIES "${SQLITE_ORM_INCLUDE_DIR};${SQLITE_INCLUDE_DIR}")
|
||||
# target_include_directories(sqlite_orm INTERFACE ${SQLITE_PREFIX} ${SQLITE_INCLUDE_DIR})
|
||||
# target_link_libraries(sqlite_orm INTERFACE sqlite)
|
||||
#
|
||||
# add_dependencies(sqlite_orm sqlite_orm_ep)
|
||||
endmacro()
|
||||
|
||||
if(MILVUS_WITH_SQLITE_ORM)
|
||||
resolve_dependency(SQLite_ORM)
|
||||
# ExternalProject_Get_Property(sqlite_orm_ep source_dir)
|
||||
# set(SQLITE_ORM_INCLUDE_DIR ${source_dir}/sqlite_orm_ep)
|
||||
include_directories(SYSTEM "${SQLITE_ORM_INCLUDE_DIR}")
|
||||
#message(STATUS "SQLITE_ORM_INCLUDE_DIR: ${SQLITE_ORM_INCLUDE_DIR}")
|
||||
endif()
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
@ -1591,18 +1389,7 @@ macro(build_thrift)
|
||||
endif()
|
||||
|
||||
set(THRIFT_STATIC_LIB_NAME "${CMAKE_STATIC_LIBRARY_PREFIX}thrift")
|
||||
if(MSVC)
|
||||
if(MILVUS_USE_STATIC_CRT)
|
||||
set(THRIFT_STATIC_LIB_NAME "${THRIFT_STATIC_LIB_NAME}")
|
||||
set(THRIFT_CMAKE_ARGS ${THRIFT_CMAKE_ARGS} "-DWITH_MT=ON")
|
||||
else()
|
||||
set(THRIFT_STATIC_LIB_NAME "${THRIFT_STATIC_LIB_NAME}")
|
||||
set(THRIFT_CMAKE_ARGS ${THRIFT_CMAKE_ARGS} "-DWITH_MT=OFF")
|
||||
endif()
|
||||
endif()
|
||||
if(${UPPERCASE_BUILD_TYPE} STREQUAL "DEBUG")
|
||||
set(THRIFT_STATIC_LIB_NAME "${THRIFT_STATIC_LIB_NAME}")
|
||||
endif()
|
||||
|
||||
set(THRIFT_STATIC_LIB
|
||||
"${THRIFT_PREFIX}/lib/${THRIFT_STATIC_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}")
|
||||
|
||||
@ -1613,60 +1400,6 @@ macro(build_thrift)
|
||||
endif()
|
||||
set(THRIFT_DEPENDENCIES ${THRIFT_DEPENDENCIES} ${ZLIB_LIBRARY})
|
||||
|
||||
if(MSVC)
|
||||
set(WINFLEXBISON_VERSION 2.4.9)
|
||||
set(WINFLEXBISON_PREFIX
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/winflexbison_ep/src/winflexbison_ep-install")
|
||||
externalproject_add(
|
||||
winflexbison_ep
|
||||
URL
|
||||
https://github.com/lexxmark/winflexbison/releases/download/v.${WINFLEXBISON_VERSION}/win_flex_bison-${WINFLEXBISON_VERSION}.zip
|
||||
URL_HASH
|
||||
MD5=a2e979ea9928fbf8567e995e9c0df765
|
||||
SOURCE_DIR
|
||||
${WINFLEXBISON_PREFIX}
|
||||
CONFIGURE_COMMAND
|
||||
""
|
||||
BUILD_COMMAND
|
||||
""
|
||||
INSTALL_COMMAND
|
||||
""
|
||||
${EP_LOG_OPTIONS})
|
||||
set(THRIFT_DEPENDENCIES ${THRIFT_DEPENDENCIES} winflexbison_ep)
|
||||
|
||||
set(THRIFT_CMAKE_ARGS
|
||||
"-DFLEX_EXECUTABLE=${WINFLEXBISON_PREFIX}/win_flex.exe"
|
||||
"-DBISON_EXECUTABLE=${WINFLEXBISON_PREFIX}/win_bison.exe"
|
||||
"-DZLIB_INCLUDE_DIR=${ZLIB_INCLUDE_DIR}"
|
||||
"-DWITH_SHARED_LIB=OFF"
|
||||
"-DWITH_PLUGIN=OFF"
|
||||
${THRIFT_CMAKE_ARGS})
|
||||
elseif(APPLE)
|
||||
# Some other process always resets BISON_EXECUTABLE to the system default,
|
||||
# thus we use our own variable here.
|
||||
if(NOT DEFINED THRIFT_BISON_EXECUTABLE)
|
||||
find_package(BISON 2.5.1)
|
||||
|
||||
# In the case where we cannot find a system-wide installation, look for
|
||||
# homebrew and ask for its bison installation.
|
||||
if(NOT BISON_FOUND)
|
||||
find_program(BREW_BIN brew)
|
||||
if(BREW_BIN)
|
||||
execute_process(COMMAND ${BREW_BIN} --prefix bison
|
||||
OUTPUT_VARIABLE BISON_PREFIX
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
set(BISON_EXECUTABLE "${BISON_PREFIX}/bin/bison")
|
||||
find_package(BISON 2.5.1)
|
||||
set(THRIFT_BISON_EXECUTABLE "${BISON_EXECUTABLE}")
|
||||
endif()
|
||||
else()
|
||||
set(THRIFT_BISON_EXECUTABLE "${BISON_EXECUTABLE}")
|
||||
endif()
|
||||
endif()
|
||||
set(THRIFT_CMAKE_ARGS "-DBISON_EXECUTABLE=${THRIFT_BISON_EXECUTABLE}"
|
||||
${THRIFT_CMAKE_ARGS})
|
||||
endif()
|
||||
|
||||
externalproject_add(thrift_ep
|
||||
URL
|
||||
${THRIFT_SOURCE_URL}
|
||||
@ -1695,8 +1428,7 @@ endmacro()
|
||||
|
||||
if(MILVUS_WITH_THRIFT)
|
||||
resolve_dependency(Thrift)
|
||||
# TODO: Don't use global includes but rather target_include_directories
|
||||
# MESSAGE(STATUS ${THRIFT_PREFIX}/lib/)
|
||||
|
||||
link_directories(SYSTEM ${THRIFT_PREFIX}/lib/)
|
||||
link_directories(SYSTEM ${CMAKE_CURRENT_BINARY_DIR}/thrift_ep-prefix/src/thrift_ep-build/lib)
|
||||
include_directories(SYSTEM ${THRIFT_INCLUDE_DIR})
|
||||
@ -1742,8 +1474,7 @@ endmacro()
|
||||
|
||||
if(MILVUS_WITH_YAMLCPP)
|
||||
resolve_dependency(yaml-cpp)
|
||||
|
||||
# TODO: Don't use global includes but rather target_include_directories
|
||||
|
||||
get_target_property(YAMLCPP_INCLUDE_DIR yaml-cpp INTERFACE_INCLUDE_DIRECTORIES)
|
||||
link_directories(SYSTEM ${YAMLCPP_PREFIX}/lib/)
|
||||
include_directories(SYSTEM ${YAMLCPP_INCLUDE_DIR})
|
||||
@ -1755,15 +1486,7 @@ endif()
|
||||
macro(build_zlib)
|
||||
message(STATUS "Building ZLIB-${ZLIB_VERSION} from source")
|
||||
set(ZLIB_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/zlib_ep-prefix/src/zlib_ep")
|
||||
if(MSVC)
|
||||
if(${UPPERCASE_BUILD_TYPE} STREQUAL "DEBUG")
|
||||
set(ZLIB_STATIC_LIB_NAME zlibstaticd.lib)
|
||||
else()
|
||||
set(ZLIB_STATIC_LIB_NAME zlibstatic.lib)
|
||||
endif()
|
||||
else()
|
||||
set(ZLIB_STATIC_LIB_NAME libz.a)
|
||||
endif()
|
||||
set(ZLIB_STATIC_LIB_NAME libz.a)
|
||||
set(ZLIB_STATIC_LIB "${ZLIB_PREFIX}/lib/${ZLIB_STATIC_LIB_NAME}")
|
||||
set(ZLIB_CMAKE_ARGS ${EP_COMMON_CMAKE_ARGS} "-DCMAKE_INSTALL_PREFIX=${ZLIB_PREFIX}"
|
||||
-DBUILD_SHARED_LIBS=OFF)
|
||||
@ -1792,8 +1515,7 @@ endmacro()
|
||||
|
||||
if(MILVUS_WITH_ZLIB)
|
||||
resolve_dependency(ZLIB)
|
||||
|
||||
# TODO: Don't use global includes but rather target_include_directories
|
||||
|
||||
get_target_property(ZLIB_INCLUDE_DIR zlib INTERFACE_INCLUDE_DIRECTORIES)
|
||||
include_directories(SYSTEM ${ZLIB_INCLUDE_DIR})
|
||||
endif()
|
||||
@ -1815,22 +1537,15 @@ macro(build_zstd)
|
||||
-DZSTD_BUILD_STATIC=on
|
||||
-DZSTD_MULTITHREAD_SUPPORT=off)
|
||||
|
||||
if(MSVC)
|
||||
set(ZSTD_STATIC_LIB "${ZSTD_PREFIX}/lib/zstd_static.lib")
|
||||
if(MILVUS_USE_STATIC_CRT)
|
||||
set(ZSTD_CMAKE_ARGS ${ZSTD_CMAKE_ARGS} "-DZSTD_USE_STATIC_RUNTIME=on")
|
||||
endif()
|
||||
else()
|
||||
set(ZSTD_STATIC_LIB "${ZSTD_PREFIX}/lib/libzstd.a")
|
||||
# Only pass our C flags on Unix as on MSVC it leads to a
|
||||
# "incompatible command-line options" error
|
||||
set(ZSTD_CMAKE_ARGS
|
||||
${ZSTD_CMAKE_ARGS}
|
||||
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
|
||||
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
|
||||
-DCMAKE_C_FLAGS=${EP_C_FLAGS}
|
||||
-DCMAKE_CXX_FLAGS=${EP_CXX_FLAGS})
|
||||
endif()
|
||||
|
||||
set(ZSTD_STATIC_LIB "${ZSTD_PREFIX}/lib/libzstd.a")
|
||||
|
||||
set(ZSTD_CMAKE_ARGS
|
||||
${ZSTD_CMAKE_ARGS}
|
||||
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
|
||||
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
|
||||
-DCMAKE_C_FLAGS=${EP_C_FLAGS}
|
||||
-DCMAKE_CXX_FLAGS=${EP_CXX_FLAGS})
|
||||
|
||||
if(CMAKE_VERSION VERSION_LESS 3.7)
|
||||
message(FATAL_ERROR "Building zstd using ExternalProject requires at least CMake 3.7")
|
||||
@ -1864,8 +1579,7 @@ endmacro()
|
||||
|
||||
if(MILVUS_WITH_ZSTD)
|
||||
resolve_dependency(ZSTD)
|
||||
|
||||
# TODO: Don't use global includes but rather target_include_directories
|
||||
|
||||
get_target_property(ZSTD_INCLUDE_DIR zstd INTERFACE_INCLUDE_DIRECTORIES)
|
||||
link_directories(SYSTEM ${ZSTD_PREFIX}/lib)
|
||||
include_directories(SYSTEM ${ZSTD_INCLUDE_DIR})
|
||||
@ -1881,7 +1595,7 @@ macro(build_aws)
|
||||
${EP_COMMON_TOOLCHAIN}
|
||||
"-DCMAKE_INSTALL_PREFIX=${AWS_PREFIX}"
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DCMAKE_INSTALL_LIBDIR=lib #${CMAKE_INSTALL_LIBDIR}
|
||||
-DCMAKE_INSTALL_LIBDIR=lib
|
||||
-DBUILD_ONLY=s3
|
||||
-DBUILD_SHARED_LIBS=off
|
||||
-DENABLE_TESTING=off
|
||||
@ -1892,8 +1606,7 @@ macro(build_aws)
|
||||
"${AWS_PREFIX}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}aws-cpp-sdk-core${CMAKE_STATIC_LIBRARY_SUFFIX}")
|
||||
set(AWS_CPP_SDK_S3_STATIC_LIB
|
||||
"${AWS_PREFIX}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}aws-cpp-sdk-s3${CMAKE_STATIC_LIBRARY_SUFFIX}")
|
||||
# Only pass our C flags on Unix as on MSVC it leads to a
|
||||
# "incompatible command-line options" error
|
||||
|
||||
set(AWS_CMAKE_ARGS
|
||||
${AWS_CMAKE_ARGS}
|
||||
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
|
||||
@ -1901,10 +1614,6 @@ macro(build_aws)
|
||||
-DCMAKE_C_FLAGS=${EP_C_FLAGS}
|
||||
-DCMAKE_CXX_FLAGS=${EP_CXX_FLAGS})
|
||||
|
||||
if(CMAKE_VERSION VERSION_LESS 3.7)
|
||||
message(FATAL_ERROR "Building AWS using ExternalProject requires at least CMake 3.7")
|
||||
endif()
|
||||
|
||||
externalproject_add(aws_ep
|
||||
${EP_LOG_OPTIONS}
|
||||
CMAKE_ARGS
|
||||
@ -1919,8 +1628,6 @@ macro(build_aws)
|
||||
BUILD_BYPRODUCTS
|
||||
"${AWS_CPP_SDK_S3_STATIC_LIB}"
|
||||
"${AWS_CPP_SDK_CORE_STATIC_LIB}")
|
||||
|
||||
|
||||
file(MAKE_DIRECTORY "${AWS_PREFIX}/include")
|
||||
|
||||
add_library(aws-cpp-sdk-s3 STATIC IMPORTED)
|
||||
@ -1943,8 +1650,7 @@ endmacro()
|
||||
|
||||
if(MILVUS_WITH_AWS)
|
||||
resolve_dependency(AWS)
|
||||
|
||||
# TODO: Don't use global includes but rather target_include_directories
|
||||
|
||||
link_directories(SYSTEM ${AWS_PREFIX}/lib)
|
||||
|
||||
get_target_property(AWS_CPP_SDK_S3_INCLUDE_DIR aws-cpp-sdk-s3 INTERFACE_INCLUDE_DIRECTORIES)
|
||||
@ -1954,3 +1660,96 @@ if(MILVUS_WITH_AWS)
|
||||
include_directories(SYSTEM ${AWS_CPP_SDK_CORE_INCLUDE_DIR})
|
||||
|
||||
endif()
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# libunwind
|
||||
|
||||
macro(build_libunwind)
|
||||
message(STATUS "Building libunwind-${LIBUNWIND_VERSION} from source")
|
||||
set(LIBUNWIND_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/libunwind_ep-prefix/src/libunwind_ep/install")
|
||||
set(LIBUNWIND_INCLUDE_DIR "${LIBUNWIND_PREFIX}/include")
|
||||
set(LIBUNWIND_SHARED_LIB "${LIBUNWIND_PREFIX}/lib/libunwind${CMAKE_SHARED_LIBRARY_SUFFIX}")
|
||||
set(LIBUNWIND_CONFIGURE_ARGS "--prefix=${LIBUNWIND_PREFIX}")
|
||||
|
||||
externalproject_add(libunwind_ep
|
||||
URL
|
||||
${LIBUNWIND_SOURCE_URL}
|
||||
${EP_LOG_OPTIONS}
|
||||
CONFIGURE_COMMAND
|
||||
"./configure"
|
||||
${LIBUNWIND_CONFIGURE_ARGS}
|
||||
BUILD_COMMAND
|
||||
${MAKE} ${MAKE_BUILD_ARGS}
|
||||
BUILD_IN_SOURCE
|
||||
1
|
||||
INSTALL_COMMAND
|
||||
${MAKE} install
|
||||
BUILD_BYPRODUCTS
|
||||
${LIBUNWIND_SHARED_LIB})
|
||||
|
||||
file(MAKE_DIRECTORY "${LIBUNWIND_INCLUDE_DIR}")
|
||||
|
||||
add_library(libunwind SHARED IMPORTED)
|
||||
set_target_properties(libunwind
|
||||
PROPERTIES IMPORTED_LOCATION "${LIBUNWIND_SHARED_LIB}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${LIBUNWIND_INCLUDE_DIR}")
|
||||
|
||||
add_dependencies(libunwind libunwind_ep)
|
||||
endmacro()
|
||||
|
||||
if(MILVUS_WITH_LIBUNWIND)
|
||||
resolve_dependency(libunwind)
|
||||
|
||||
# TODO: Don't use global includes but rather target_include_directories
|
||||
get_target_property(LIBUNWIND_INCLUDE_DIR libunwind INTERFACE_INCLUDE_DIRECTORIES)
|
||||
include_directories(SYSTEM ${LIBUNWIND_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# gperftools
|
||||
|
||||
macro(build_gperftools)
|
||||
message(STATUS "Building gperftools-${GPERFTOOLS_VERSION} from source")
|
||||
set(GPERFTOOLS_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/gperftools_ep-prefix/src/gperftools_ep")
|
||||
set(GPERFTOOLS_INCLUDE_DIR "${GPERFTOOLS_PREFIX}/include")
|
||||
set(GPERFTOOLS_STATIC_LIB "${GPERFTOOLS_PREFIX}/lib/libprofiler${CMAKE_STATIC_LIBRARY_SUFFIX}")
|
||||
set(GPERFTOOLS_CONFIGURE_ARGS "--prefix=${GPERFTOOLS_PREFIX}")
|
||||
|
||||
externalproject_add(gperftools_ep
|
||||
URL
|
||||
${GPERFTOOLS_SOURCE_URL}
|
||||
${EP_LOG_OPTIONS}
|
||||
CONFIGURE_COMMAND
|
||||
"./configure"
|
||||
${GPERFTOOLS_CONFIGURE_ARGS}
|
||||
BUILD_COMMAND
|
||||
${MAKE} ${MAKE_BUILD_ARGS}
|
||||
BUILD_IN_SOURCE
|
||||
1
|
||||
INSTALL_COMMAND
|
||||
${MAKE} install
|
||||
BUILD_BYPRODUCTS
|
||||
${GPERFTOOLS_STATIC_LIB})
|
||||
|
||||
ExternalProject_Add_StepDependencies(gperftools_ep build libunwind_ep)
|
||||
|
||||
file(MAKE_DIRECTORY "${GPERFTOOLS_INCLUDE_DIR}")
|
||||
|
||||
add_library(gperftools STATIC IMPORTED)
|
||||
set_target_properties(gperftools
|
||||
PROPERTIES IMPORTED_LOCATION "${GPERFTOOLS_STATIC_LIB}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${GPERFTOOLS_INCLUDE_DIR}"
|
||||
INTERFACE_LINK_LIBRARIES libunwind)
|
||||
|
||||
add_dependencies(gperftools gperftools_ep)
|
||||
add_dependencies(gperftools libunwind_ep)
|
||||
endmacro()
|
||||
|
||||
if(MILVUS_WITH_GPERFTOOLS)
|
||||
resolve_dependency(gperftools)
|
||||
|
||||
# TODO: Don't use global includes but rather target_include_directories
|
||||
get_target_property(GPERFTOOLS_INCLUDE_DIR gperftools INTERFACE_INCLUDE_DIRECTORIES)
|
||||
include_directories(SYSTEM ${GPERFTOOLS_INCLUDE_DIR})
|
||||
link_directories(SYSTEM ${GPERFTOOLS_PREFIX}/lib)
|
||||
endif()
|
||||
|
@ -6,7 +6,7 @@
|
||||
TO_STANDARD_OUTPUT = false
|
||||
SUBSECOND_PRECISION = 3
|
||||
PERFORMANCE_TRACKING = false
|
||||
MAX_LOG_FILE_SIZE = 2097152 ## Throw log files away after 2MB
|
||||
MAX_LOG_FILE_SIZE = 209715200 ## Throw log files away after 200MB
|
||||
* DEBUG:
|
||||
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%H:%m}-debug.log"
|
||||
ENABLED = true
|
||||
|
@ -6,6 +6,9 @@ server_config:
|
||||
|
||||
db_config:
|
||||
db_path: @MILVUS_DB_PATH@ # milvus data storage path
|
||||
db_slave_path: # secondry data storage path, split by semicolon
|
||||
|
||||
parallel_reduce: false # use multi-threads to reduce topk result
|
||||
|
||||
# URI format: dialect://username:password@host:port/database
|
||||
# All parts except dialect are optional, but you MUST include the delimiters
|
||||
@ -13,10 +16,10 @@ db_config:
|
||||
db_backend_url: sqlite://:@:/
|
||||
|
||||
index_building_threshold: 1024 # index building trigger threshold, default: 1024, unit: MB
|
||||
archive_disk_threshold: 512 # triger archive action if storage size exceed this value, unit: GB
|
||||
archive_days_threshold: 30 # files older than x days will be archived, unit: day
|
||||
maximum_memory: 4 # maximum memory allowed, default: 4, unit: GB, should be at least 1 GB.
|
||||
# the sum of maximum_memory and cpu_cache_capacity should be less than total memory
|
||||
archive_disk_threshold: 0 # triger archive action if storage size exceed this value, 0 means no limit, unit: GB
|
||||
archive_days_threshold: 0 # files older than x days will be archived, 0 means no limit, unit: day
|
||||
insert_buffer_size: 4 # maximum insert buffer size allowed, default: 4, unit: GB, should be at least 1 GB.
|
||||
# the sum of insert_buffer_size and cpu_cache_capacity should be less than total memory, unit: GB
|
||||
|
||||
metric_config:
|
||||
is_startup: off # if monitoring start: on, off
|
||||
@ -33,6 +36,11 @@ license_config: # license configure
|
||||
|
||||
cache_config: # cache configure
|
||||
cpu_cache_capacity: 16 # how many memory are used as cache, unit: GB, range: 0 ~ less than total memory
|
||||
cache_free_percent: 0.85 # old data will be erased from cache when cache is full, this value specify how much memory should be kept, range: greater than zero ~ 1.0
|
||||
insert_cache_immediately: false # insert data will be load into cache immediately for hot query
|
||||
|
||||
engine_config:
|
||||
nprobe: 10
|
||||
nprobe: 10
|
||||
nlist: 16384
|
||||
use_blas_threshold: 20
|
||||
metric_type: L2 # compare vectors by euclidean distance(L2) or inner product(IP), optional: L2 or IP
|
||||
|
@ -13,6 +13,27 @@ DIR_LCOV_OUTPUT="lcov_out"
|
||||
|
||||
DIR_GCNO="cmake_build"
|
||||
DIR_UNITTEST="milvus/bin"
|
||||
|
||||
MYSQL_USER_NAME=root
|
||||
MYSQL_PASSWORD=Fantast1c
|
||||
MYSQL_HOST='192.168.1.194'
|
||||
MYSQL_PORT='3306'
|
||||
|
||||
MYSQL_DB_NAME=milvus_`date +%s%N`
|
||||
|
||||
function mysql_exc()
|
||||
{
|
||||
cmd=$1
|
||||
mysql -h${MYSQL_HOST} -u${MYSQL_USER_NAME} -p${MYSQL_PASSWORD} -e "${cmd}"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "mysql $cmd run failed"
|
||||
fi
|
||||
}
|
||||
|
||||
mysql_exc "CREATE DATABASE IF NOT EXISTS ${MYSQL_DB_NAME};"
|
||||
mysql_exc "GRANT ALL PRIVILEGES ON ${MYSQL_DB_NAME}.* TO '${MYSQL_USER_NAME}'@'%';"
|
||||
mysql_exc "FLUSH PRIVILEGES;"
|
||||
mysql_exc "USE ${MYSQL_DB_NAME};"
|
||||
|
||||
MYSQL_USER_NAME=root
|
||||
MYSQL_PASSWORD=Fantast1c
|
||||
|
10
cpp/scripts/requirements.sh
Executable file
10
cpp/scripts/requirements.sh
Executable file
@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
wget -P /tmp https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB
|
||||
apt-key add /tmp/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB
|
||||
|
||||
sh -c 'echo deb https://apt.repos.intel.com/mkl all main > /etc/apt/sources.list.d/intel-mkl.list'
|
||||
apt -y update && apt-get -y install intel-mkl-gnu-2019.4-243 intel-mkl-core-2019.4-243
|
||||
|
||||
#sh -c 'echo export LD_LIBRARY_PATH=/opt/intel/compilers_and_libraries_2019.4.243/linux/mkl/lib/intel64:\$LD_LIBRARY_PATH > /etc/profile.d/mkl.sh'
|
||||
#source /etc/profile
|
@ -49,7 +49,6 @@ set(engine_files
|
||||
${db_files}
|
||||
${db_scheduler_files}
|
||||
${wrapper_files}
|
||||
# metrics/Metrics.cpp
|
||||
${metrics_files}
|
||||
${knowhere_files}
|
||||
)
|
||||
@ -93,10 +92,26 @@ set(third_party_libs
|
||||
cublas
|
||||
mysqlpp
|
||||
${CUDA_TOOLKIT_ROOT_DIR}/lib64/stubs/libnvidia-ml.so
|
||||
cudart
|
||||
)
|
||||
|
||||
if (MEGASEARCH_WITH_ARROW STREQUAL "ON")
|
||||
set(third_party_libs ${third_party_libs} arrow)
|
||||
endif()
|
||||
endif()
|
||||
if(${BUILD_FAISS_WITH_MKL} STREQUAL "ON")
|
||||
set(third_party_libs ${third_party_libs}
|
||||
${MKL_LIBS})
|
||||
else()
|
||||
set(third_party_libs ${third_party_libs}
|
||||
lapack
|
||||
openblas)
|
||||
endif()
|
||||
|
||||
if (MILVUS_ENABLE_PROFILING STREQUAL "ON")
|
||||
set(third_party_libs ${third_party_libs}
|
||||
gperftools
|
||||
libunwind)
|
||||
endif()
|
||||
|
||||
if (GPU_VERSION STREQUAL "ON")
|
||||
link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib64")
|
||||
@ -147,6 +162,8 @@ if (ENABLE_LICENSE STREQUAL "ON")
|
||||
endif ()
|
||||
|
||||
set(metrics_lib
|
||||
easyloggingpp
|
||||
yaml-cpp
|
||||
prometheus-cpp-push
|
||||
prometheus-cpp-pull
|
||||
prometheus-cpp-core
|
||||
@ -213,6 +230,6 @@ install(FILES
|
||||
${CMAKE_BINARY_DIR}/mysqlpp_ep-prefix/src/mysqlpp_ep/lib/${CMAKE_SHARED_LIBRARY_PREFIX}mysqlpp${CMAKE_SHARED_LIBRARY_SUFFIX}
|
||||
${CMAKE_BINARY_DIR}/mysqlpp_ep-prefix/src/mysqlpp_ep/lib/${CMAKE_SHARED_LIBRARY_PREFIX}mysqlpp${CMAKE_SHARED_LIBRARY_SUFFIX}.3
|
||||
${CMAKE_BINARY_DIR}/mysqlpp_ep-prefix/src/mysqlpp_ep/lib/${CMAKE_SHARED_LIBRARY_PREFIX}mysqlpp${CMAKE_SHARED_LIBRARY_SUFFIX}.3.2.4
|
||||
DESTINATION lib) #need to copy libmysqlpp.so
|
||||
DESTINATION lib)
|
||||
|
||||
#add_subdirectory(sdk)
|
||||
add_subdirectory(sdk)
|
||||
|
55
cpp/src/cache/Cache.cpp
vendored
55
cpp/src/cache/Cache.cpp
vendored
@ -13,9 +13,12 @@ namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace cache {
|
||||
|
||||
constexpr double DEFAULT_THRESHHOLD_PERCENT = 0.85;
|
||||
|
||||
Cache::Cache(int64_t capacity, uint64_t cache_max_count)
|
||||
: usage_(0),
|
||||
capacity_(capacity),
|
||||
freemem_percent_(DEFAULT_THRESHHOLD_PERCENT),
|
||||
lru_(cache_max_count) {
|
||||
// AGENT_LOG_DEBUG << "Construct Cache with capacity " << std::to_string(mem_capacity)
|
||||
}
|
||||
@ -64,15 +67,14 @@ void Cache::insert(const std::string& key, const DataObjPtr& data_ptr) {
|
||||
usage_ += data_ptr->size();
|
||||
}
|
||||
|
||||
// AGENT_LOG_DEBUG << "Insert into LRU(" << (capacity_ > 0 ? std::to_string(usage_ * 100 / capacity_) : "Nan")
|
||||
// << "%, +" << data_ptr->size() << ", " << usage_ << ", " << lru_.size() << "):"
|
||||
// << " " << key;
|
||||
SERVER_LOG_DEBUG << "Insert " << key << " size:" << data_ptr->size()
|
||||
<< " bytes into cache, usage: " << usage_ << " bytes";
|
||||
}
|
||||
|
||||
if (usage_ > capacity_) {
|
||||
// AGENT_LOG_TRACE << "Current usage " << usage_
|
||||
// << " exceeds cache capacity " << capacity_
|
||||
// << ", start free memory";
|
||||
SERVER_LOG_DEBUG << "Current usage " << usage_
|
||||
<< " exceeds cache capacity " << capacity_
|
||||
<< ", start free memory";
|
||||
free_memory();
|
||||
}
|
||||
}
|
||||
@ -86,12 +88,9 @@ void Cache::erase(const std::string& key) {
|
||||
const CacheObjPtr& obj_ptr = lru_.get(key);
|
||||
const DataObjPtr& data_ptr = obj_ptr->data_;
|
||||
usage_ -= data_ptr->size();
|
||||
// AGENT_LOG_DEBUG << "Erase from LRU(" << (capacity_ > 0 ? std::to_string(usage_*100/capacity_) : "Nan")
|
||||
// << "%, -" << data_ptr->size() << ", " << usage_ << ", " << lru_.size() << "): "
|
||||
// << (data_ptr->flags().get_flag(DataObjAttr::kPinned) ? "Pinned " : "")
|
||||
// << (data_ptr->flags().get_flag(DataObjAttr::kValid) ? "Valid " : "")
|
||||
// << "(ref:" << obj_ptr->ref_ << ") "
|
||||
// << key;
|
||||
|
||||
SERVER_LOG_DEBUG << "Erase " << key << " size: " << data_ptr->size();
|
||||
|
||||
lru_.erase(key);
|
||||
}
|
||||
|
||||
@ -99,7 +98,7 @@ void Cache::clear() {
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
lru_.clear();
|
||||
usage_ = 0;
|
||||
// AGENT_LOG_DEBUG << "Clear LRU !";
|
||||
SERVER_LOG_DEBUG << "Clear cache !";
|
||||
}
|
||||
|
||||
#if 0 /* caiyd 20190221, need more testing before enable */
|
||||
@ -162,8 +161,11 @@ void Cache::restore_from_file(const std::string& key, const CacheObjPtr& obj_ptr
|
||||
void Cache::free_memory() {
|
||||
if (usage_ <= capacity_) return;
|
||||
|
||||
int64_t threshhold = capacity_ * THRESHHOLD_PERCENT;
|
||||
int64_t threshhold = capacity_ * freemem_percent_;
|
||||
int64_t delta_size = usage_ - threshhold;
|
||||
if(delta_size <= 0) {
|
||||
delta_size = 1;//ensure at least one item erased
|
||||
}
|
||||
|
||||
std::set<std::string> key_array;
|
||||
int64_t released_size = 0;
|
||||
@ -183,7 +185,7 @@ void Cache::free_memory() {
|
||||
}
|
||||
}
|
||||
|
||||
// AGENT_LOG_DEBUG << "to be released memory size: " << released_size;
|
||||
SERVER_LOG_DEBUG << "to be released memory size: " << released_size;
|
||||
|
||||
for (auto& key : key_array) {
|
||||
erase(key);
|
||||
@ -193,28 +195,15 @@ void Cache::free_memory() {
|
||||
}
|
||||
|
||||
void Cache::print() {
|
||||
int64_t still_pinned_count = 0;
|
||||
int64_t total_pinned_size = 0;
|
||||
int64_t total_valid_empty_size = 0;
|
||||
size_t cache_count = 0;
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
|
||||
for (auto it = lru_.begin(); it != lru_.end(); ++it) {
|
||||
auto& obj_ptr = it->second;
|
||||
const auto& data_ptr = obj_ptr->data_;
|
||||
if (data_ptr != nullptr) {
|
||||
total_pinned_size += data_ptr->size();
|
||||
++still_pinned_count;
|
||||
} else {
|
||||
total_valid_empty_size += data_ptr->size();
|
||||
}
|
||||
}
|
||||
cache_count = lru_.size();
|
||||
}
|
||||
|
||||
SERVER_LOG_DEBUG << "[Still Pinned count]: " << still_pinned_count;
|
||||
SERVER_LOG_DEBUG << "[Pinned Memory total size(byte)]: " << total_pinned_size;
|
||||
SERVER_LOG_DEBUG << "[valid_empty total size(byte)]: " << total_valid_empty_size;
|
||||
SERVER_LOG_DEBUG << "[free memory size(byte)]: " << capacity_ - total_pinned_size - total_valid_empty_size;
|
||||
SERVER_LOG_DEBUG << "[Cache item count]: " << cache_count;
|
||||
SERVER_LOG_DEBUG << "[Cache usage]: " << usage_ << " bytes";
|
||||
SERVER_LOG_DEBUG << "[Cache capacity]: " << capacity_ << " bytes";
|
||||
}
|
||||
|
||||
} // cache
|
||||
|
5
cpp/src/cache/Cache.h
vendored
5
cpp/src/cache/Cache.h
vendored
@ -18,7 +18,6 @@ namespace milvus {
|
||||
namespace cache {
|
||||
|
||||
const std::string SWAP_DIR = ".CACHE";
|
||||
const float THRESHHOLD_PERCENT = 0.75;
|
||||
|
||||
class Cache {
|
||||
private:
|
||||
@ -45,6 +44,9 @@ public:
|
||||
int64_t capacity() const { return capacity_; } //unit: BYTE
|
||||
void set_capacity(int64_t capacity); //unit: BYTE
|
||||
|
||||
double freemem_percent() const { return freemem_percent_; };
|
||||
void set_freemem_percent(double percent) { freemem_percent_ = percent; }
|
||||
|
||||
size_t size() const;
|
||||
bool exists(const std::string& key);
|
||||
DataObjPtr get(const std::string& key);
|
||||
@ -57,6 +59,7 @@ public:
|
||||
private:
|
||||
int64_t usage_;
|
||||
int64_t capacity_;
|
||||
double freemem_percent_;
|
||||
|
||||
LRU<std::string, CacheObjPtr> lru_;
|
||||
mutable std::mutex mutex_;
|
||||
|
12
cpp/src/cache/CacheMgr.cpp
vendored
12
cpp/src/cache/CacheMgr.cpp
vendored
@ -4,6 +4,7 @@
|
||||
// Proprietary and confidential.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "utils/Log.h"
|
||||
#include "CacheMgr.h"
|
||||
#include "metrics/Metrics.h"
|
||||
|
||||
@ -20,6 +21,7 @@ CacheMgr::~CacheMgr() {
|
||||
|
||||
uint64_t CacheMgr::ItemCount() const {
|
||||
if(cache_ == nullptr) {
|
||||
SERVER_LOG_ERROR << "Cache doesn't exist";
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -28,6 +30,7 @@ uint64_t CacheMgr::ItemCount() const {
|
||||
|
||||
bool CacheMgr::ItemExists(const std::string& key) {
|
||||
if(cache_ == nullptr) {
|
||||
SERVER_LOG_ERROR << "Cache doesn't exist";
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -36,6 +39,7 @@ bool CacheMgr::ItemExists(const std::string& key) {
|
||||
|
||||
DataObjPtr CacheMgr::GetItem(const std::string& key) {
|
||||
if(cache_ == nullptr) {
|
||||
SERVER_LOG_ERROR << "Cache doesn't exist";
|
||||
return nullptr;
|
||||
}
|
||||
server::Metrics::GetInstance().CacheAccessTotalIncrement();
|
||||
@ -53,6 +57,7 @@ engine::Index_ptr CacheMgr::GetIndex(const std::string& key) {
|
||||
|
||||
void CacheMgr::InsertItem(const std::string& key, const DataObjPtr& data) {
|
||||
if(cache_ == nullptr) {
|
||||
SERVER_LOG_ERROR << "Cache doesn't exist";
|
||||
return;
|
||||
}
|
||||
|
||||
@ -62,6 +67,7 @@ void CacheMgr::InsertItem(const std::string& key, const DataObjPtr& data) {
|
||||
|
||||
void CacheMgr::InsertItem(const std::string& key, const engine::Index_ptr& index) {
|
||||
if(cache_ == nullptr) {
|
||||
SERVER_LOG_ERROR << "Cache doesn't exist";
|
||||
return;
|
||||
}
|
||||
|
||||
@ -72,6 +78,7 @@ void CacheMgr::InsertItem(const std::string& key, const engine::Index_ptr& index
|
||||
|
||||
void CacheMgr::EraseItem(const std::string& key) {
|
||||
if(cache_ == nullptr) {
|
||||
SERVER_LOG_ERROR << "Cache doesn't exist";
|
||||
return;
|
||||
}
|
||||
|
||||
@ -81,6 +88,7 @@ void CacheMgr::EraseItem(const std::string& key) {
|
||||
|
||||
void CacheMgr::PrintInfo() {
|
||||
if(cache_ == nullptr) {
|
||||
SERVER_LOG_ERROR << "Cache doesn't exist";
|
||||
return;
|
||||
}
|
||||
|
||||
@ -89,6 +97,7 @@ void CacheMgr::PrintInfo() {
|
||||
|
||||
void CacheMgr::ClearCache() {
|
||||
if(cache_ == nullptr) {
|
||||
SERVER_LOG_ERROR << "Cache doesn't exist";
|
||||
return;
|
||||
}
|
||||
|
||||
@ -97,6 +106,7 @@ void CacheMgr::ClearCache() {
|
||||
|
||||
int64_t CacheMgr::CacheUsage() const {
|
||||
if(cache_ == nullptr) {
|
||||
SERVER_LOG_ERROR << "Cache doesn't exist";
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -105,6 +115,7 @@ int64_t CacheMgr::CacheUsage() const {
|
||||
|
||||
int64_t CacheMgr::CacheCapacity() const {
|
||||
if(cache_ == nullptr) {
|
||||
SERVER_LOG_ERROR << "Cache doesn't exist";
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -113,6 +124,7 @@ int64_t CacheMgr::CacheCapacity() const {
|
||||
|
||||
void CacheMgr::SetCapacity(int64_t capacity) {
|
||||
if(cache_ == nullptr) {
|
||||
SERVER_LOG_ERROR << "Cache doesn't exist";
|
||||
return;
|
||||
}
|
||||
cache_->set_capacity(capacity);
|
||||
|
15
cpp/src/cache/CpuCacheMgr.cpp
vendored
15
cpp/src/cache/CpuCacheMgr.cpp
vendored
@ -6,16 +6,29 @@
|
||||
|
||||
#include "CpuCacheMgr.h"
|
||||
#include "server/ServerConfig.h"
|
||||
#include "utils/Log.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace cache {
|
||||
|
||||
namespace {
|
||||
constexpr int64_t unit = 1024 * 1024 * 1024;
|
||||
}
|
||||
|
||||
CpuCacheMgr::CpuCacheMgr() {
|
||||
server::ConfigNode& config = server::ServerConfig::GetInstance().GetConfig(server::CONFIG_CACHE);
|
||||
int64_t cap = config.GetInt64Value(server::CONFIG_CPU_CACHE_CAPACITY, 16);
|
||||
cap *= 1024*1024*1024;
|
||||
cap *= unit;
|
||||
cache_ = std::make_shared<Cache>(cap, 1UL<<32);
|
||||
|
||||
double free_percent = config.GetDoubleValue(server::CACHE_FREE_PERCENT, 0.85);
|
||||
if(free_percent > 0.0 && free_percent <= 1.0) {
|
||||
cache_->set_freemem_percent(free_percent);
|
||||
} else {
|
||||
SERVER_LOG_ERROR << "Invalid cache_free_percent: " << free_percent <<
|
||||
", defaultly set to " << cache_->freemem_percent();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
10
cpp/src/cache/DataObj.h
vendored
10
cpp/src/cache/DataObj.h
vendored
@ -20,6 +20,11 @@ public:
|
||||
: index_(index)
|
||||
{}
|
||||
|
||||
DataObj(const engine::Index_ptr& index, int64_t size)
|
||||
: index_(index),
|
||||
size_(size)
|
||||
{}
|
||||
|
||||
engine::Index_ptr data() { return index_; }
|
||||
const engine::Index_ptr& data() const { return index_; }
|
||||
|
||||
@ -28,11 +33,16 @@ public:
|
||||
return 0;
|
||||
}
|
||||
|
||||
if(size_ > 0) {
|
||||
return size_;
|
||||
}
|
||||
|
||||
return index_->Count() * index_->Dimension() * sizeof(float);
|
||||
}
|
||||
|
||||
private:
|
||||
engine::Index_ptr index_ = nullptr;
|
||||
int64_t size_ = 0;
|
||||
};
|
||||
|
||||
using DataObjPtr = std::shared_ptr<DataObj>;
|
||||
|
6
cpp/src/cache/GpuCacheMgr.cpp
vendored
6
cpp/src/cache/GpuCacheMgr.cpp
vendored
@ -11,10 +11,14 @@ namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace cache {
|
||||
|
||||
namespace {
|
||||
constexpr int64_t unit = 1024 * 1024 * 1024;
|
||||
}
|
||||
|
||||
GpuCacheMgr::GpuCacheMgr() {
|
||||
server::ConfigNode& config = server::ServerConfig::GetInstance().GetConfig(server::CONFIG_CACHE);
|
||||
int64_t cap = config.GetInt64Value(server::CONFIG_GPU_CACHE_CAPACITY, 1);
|
||||
cap *= 1024*1024*1024;
|
||||
cap *= unit;
|
||||
cache_ = std::make_shared<Cache>(cap, 1UL<<32);
|
||||
}
|
||||
|
||||
|
@ -94,7 +94,7 @@ double
|
||||
ConfigNode::GetDoubleValue(const std::string ¶m_key, double default_val) const {
|
||||
std::string val = GetValue(param_key);
|
||||
if (!val.empty()) {
|
||||
return std::strtold(val.c_str(), nullptr);
|
||||
return std::strtod(val.c_str(), nullptr);
|
||||
} else {
|
||||
return default_val;
|
||||
}
|
||||
|
@ -9,14 +9,14 @@ namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
const size_t K = 1024UL;
|
||||
const size_t M = K * K;
|
||||
const size_t G = K * M;
|
||||
const size_t T = K * G;
|
||||
constexpr size_t K = 1024UL;
|
||||
constexpr size_t M = K * K;
|
||||
constexpr size_t G = K * M;
|
||||
constexpr size_t T = K * G;
|
||||
|
||||
const size_t MAX_TABLE_FILE_MEM = 128 * M;
|
||||
constexpr size_t MAX_TABLE_FILE_MEM = 128 * M;
|
||||
|
||||
const int VECTOR_TYPE_SIZE = sizeof(float);
|
||||
constexpr int VECTOR_TYPE_SIZE = sizeof(float);
|
||||
|
||||
} // namespace engine
|
||||
} // namespace milvus
|
||||
|
@ -12,11 +12,10 @@ namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
DB::~DB() {}
|
||||
DB::~DB() = default;
|
||||
|
||||
void DB::Open(const Options& options, DB** dbptr) {
|
||||
*dbptr = DBFactory::Build(options);
|
||||
return;
|
||||
}
|
||||
|
||||
} // namespace engine
|
||||
|
@ -44,13 +44,15 @@ public:
|
||||
|
||||
virtual Status Size(uint64_t& result) = 0;
|
||||
|
||||
virtual Status BuildIndex(const std::string& table_id) = 0;
|
||||
|
||||
virtual Status DropAll() = 0;
|
||||
|
||||
DB() = default;
|
||||
DB(const DB&) = delete;
|
||||
DB& operator=(const DB&) = delete;
|
||||
|
||||
virtual ~DB();
|
||||
virtual ~DB() = 0;
|
||||
}; // DB
|
||||
|
||||
} // namespace engine
|
||||
|
@ -89,8 +89,11 @@ DBImpl::DBImpl(const Options& options)
|
||||
meta_ptr_ = DBMetaImplFactory::Build(options.meta, options.mode);
|
||||
mem_mgr_ = MemManagerFactory::Build(meta_ptr_, options_);
|
||||
if (options.mode != Options::MODE::READ_ONLY) {
|
||||
ENGINE_LOG_TRACE << "StartTimerTasks";
|
||||
StartTimerTasks();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
Status DBImpl::CreateTable(meta::TableSchema& table_schema) {
|
||||
@ -99,6 +102,7 @@ Status DBImpl::CreateTable(meta::TableSchema& table_schema) {
|
||||
|
||||
Status DBImpl::DeleteTable(const std::string& table_id, const meta::DatesT& dates) {
|
||||
//dates partly delete files of the table but currently we don't support
|
||||
ENGINE_LOG_DEBUG << "Prepare to delete table " << table_id;
|
||||
|
||||
mem_mgr_->EraseMemVector(table_id); //not allow insert
|
||||
meta_ptr_->DeleteTable(table_id); //soft delete table
|
||||
@ -129,6 +133,7 @@ Status DBImpl::GetTableRowCount(const std::string& table_id, uint64_t& row_count
|
||||
|
||||
Status DBImpl::InsertVectors(const std::string& table_id_,
|
||||
uint64_t n, const float* vectors, IDNumbers& vector_ids_) {
|
||||
ENGINE_LOG_DEBUG << "Insert " << n << " vectors to cache";
|
||||
|
||||
auto start_time = METRICS_NOW_TIME;
|
||||
Status status = mem_mgr_->InsertVectors(table_id_, n, vectors, vector_ids_);
|
||||
@ -137,6 +142,8 @@ Status DBImpl::InsertVectors(const std::string& table_id_,
|
||||
// std::chrono::microseconds time_span = std::chrono::duration_cast<std::chrono::microseconds>(end_time - start_time);
|
||||
// double average_time = double(time_span.count()) / n;
|
||||
|
||||
ENGINE_LOG_DEBUG << "Insert vectors to cache finished";
|
||||
|
||||
CollectInsertMetrics(total_time, n, status.ok());
|
||||
return status;
|
||||
|
||||
@ -157,6 +164,8 @@ Status DBImpl::Query(const std::string &table_id, uint64_t k, uint64_t nq,
|
||||
|
||||
Status DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq,
|
||||
const float* vectors, const meta::DatesT& dates, QueryResults& results) {
|
||||
ENGINE_LOG_DEBUG << "Query by vectors";
|
||||
|
||||
//get all table files from table
|
||||
meta::DatePartionedTableFilesSchema files;
|
||||
auto status = meta_ptr_->FilesToSearch(table_id, dates, files);
|
||||
@ -169,12 +178,17 @@ Status DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq,
|
||||
}
|
||||
}
|
||||
|
||||
return QueryAsync(table_id, file_id_array, k, nq, vectors, dates, results);
|
||||
cache::CpuCacheMgr::GetInstance()->PrintInfo(); //print cache info before query
|
||||
status = QueryAsync(table_id, file_id_array, k, nq, vectors, dates, results);
|
||||
cache::CpuCacheMgr::GetInstance()->PrintInfo(); //print cache info after query
|
||||
return status;
|
||||
}
|
||||
|
||||
Status DBImpl::Query(const std::string& table_id, const std::vector<std::string>& file_ids,
|
||||
uint64_t k, uint64_t nq, const float* vectors,
|
||||
const meta::DatesT& dates, QueryResults& results) {
|
||||
ENGINE_LOG_DEBUG << "Query by file ids";
|
||||
|
||||
//get specified files
|
||||
std::vector<size_t> ids;
|
||||
for (auto &id : file_ids) {
|
||||
@ -194,15 +208,19 @@ Status DBImpl::Query(const std::string& table_id, const std::vector<std::string>
|
||||
return Status::Error("Invalid file id");
|
||||
}
|
||||
|
||||
return QueryAsync(table_id, files_array, k, nq, vectors, dates, results);
|
||||
cache::CpuCacheMgr::GetInstance()->PrintInfo(); //print cache info before query
|
||||
status = QueryAsync(table_id, files_array, k, nq, vectors, dates, results);
|
||||
cache::CpuCacheMgr::GetInstance()->PrintInfo(); //print cache info after query
|
||||
return status;
|
||||
}
|
||||
|
||||
Status DBImpl::QueryAsync(const std::string& table_id, const meta::TableFilesSchema& files,
|
||||
uint64_t k, uint64_t nq, const float* vectors,
|
||||
const meta::DatesT& dates, QueryResults& results) {
|
||||
server::TimeRecorder rc("");
|
||||
|
||||
//step 1: get files to search
|
||||
ENGINE_LOG_DEBUG << "Search DateT Size=" << files.size();
|
||||
ENGINE_LOG_DEBUG << "Engine query begin, index file count:" << files.size() << " date range count:" << dates.size();
|
||||
SearchContextPtr context = std::make_shared<SearchContext>(k, nq, vectors);
|
||||
for (auto &file : files) {
|
||||
TableFileSchemaPtr file_ptr = std::make_shared<meta::TableFileSchema>(file);
|
||||
@ -215,8 +233,31 @@ Status DBImpl::QueryAsync(const std::string& table_id, const meta::TableFilesSch
|
||||
|
||||
context->WaitResult();
|
||||
|
||||
//step 3: construct results
|
||||
//step 3: print time cost information
|
||||
double load_cost = context->LoadCost();
|
||||
double search_cost = context->SearchCost();
|
||||
double reduce_cost = context->ReduceCost();
|
||||
std::string load_info = server::TimeRecorder::GetTimeSpanStr(load_cost);
|
||||
std::string search_info = server::TimeRecorder::GetTimeSpanStr(search_cost);
|
||||
std::string reduce_info = server::TimeRecorder::GetTimeSpanStr(reduce_cost);
|
||||
if(search_cost > 0.0 || reduce_cost > 0.0) {
|
||||
double total_cost = load_cost + search_cost + reduce_cost;
|
||||
double load_percent = load_cost/total_cost;
|
||||
double search_percent = search_cost/total_cost;
|
||||
double reduce_percent = reduce_cost/total_cost;
|
||||
|
||||
ENGINE_LOG_DEBUG << "Engine load index totally cost:" << load_info << " percent: " << load_percent*100 << "%";
|
||||
ENGINE_LOG_DEBUG << "Engine search index totally cost:" << search_info << " percent: " << search_percent*100 << "%";
|
||||
ENGINE_LOG_DEBUG << "Engine reduce topk totally cost:" << reduce_info << " percent: " << reduce_percent*100 << "%";
|
||||
} else {
|
||||
ENGINE_LOG_DEBUG << "Engine load cost:" << load_info
|
||||
<< " search cost: " << search_info
|
||||
<< " reduce cost: " << reduce_info;
|
||||
}
|
||||
|
||||
//step 4: construct results
|
||||
results = context->GetResult();
|
||||
rc.ElapseFromBegin("Engine query totally cost");
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
@ -236,6 +277,8 @@ void DBImpl::BackgroundTimerTask() {
|
||||
for(auto& iter : index_thread_results_) {
|
||||
iter.wait();
|
||||
}
|
||||
|
||||
ENGINE_LOG_DEBUG << "DB background thread exit";
|
||||
break;
|
||||
}
|
||||
|
||||
@ -254,6 +297,8 @@ void DBImpl::StartMetricTask() {
|
||||
return;
|
||||
}
|
||||
|
||||
ENGINE_LOG_TRACE << "Start metric task";
|
||||
|
||||
server::Metrics::GetInstance().KeepingAliveCounterIncrement(METRIC_ACTION_INTERVAL);
|
||||
int64_t cache_usage = cache::CpuCacheMgr::GetInstance()->CacheUsage();
|
||||
int64_t cache_total = cache::CpuCacheMgr::GetInstance()->CacheCapacity();
|
||||
@ -266,17 +311,14 @@ void DBImpl::StartMetricTask() {
|
||||
server::Metrics::GetInstance().GPUPercentGaugeSet();
|
||||
server::Metrics::GetInstance().GPUMemoryUsageGaugeSet();
|
||||
server::Metrics::GetInstance().OctetsSet();
|
||||
|
||||
ENGINE_LOG_TRACE << "Metric task finished";
|
||||
}
|
||||
|
||||
void DBImpl::StartCompactionTask() {
|
||||
// static int count = 0;
|
||||
// count++;
|
||||
// std::cout << "StartCompactionTask: " << count << std::endl;
|
||||
// std::cout << "c: " << count++ << std::endl;
|
||||
static uint64_t compact_clock_tick = 0;
|
||||
compact_clock_tick++;
|
||||
if(compact_clock_tick%COMPACT_ACTION_INTERVAL != 0) {
|
||||
// std::cout << "c r: " << count++ << std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
@ -287,6 +329,10 @@ void DBImpl::StartCompactionTask() {
|
||||
compact_table_ids_.insert(id);
|
||||
}
|
||||
|
||||
if(!temp_table_ids.empty()) {
|
||||
SERVER_LOG_DEBUG << "Insert cache serialized";
|
||||
}
|
||||
|
||||
//compactiong has been finished?
|
||||
if(!compact_thread_results_.empty()) {
|
||||
std::chrono::milliseconds span(10);
|
||||
@ -305,13 +351,15 @@ void DBImpl::StartCompactionTask() {
|
||||
|
||||
Status DBImpl::MergeFiles(const std::string& table_id, const meta::DateT& date,
|
||||
const meta::TableFilesSchema& files) {
|
||||
ENGINE_LOG_DEBUG << "Merge files for table" << table_id;
|
||||
|
||||
meta::TableFileSchema table_file;
|
||||
table_file.table_id_ = table_id;
|
||||
table_file.date_ = date;
|
||||
Status status = meta_ptr_->CreateTableFile(table_file);
|
||||
|
||||
if (!status.ok()) {
|
||||
ENGINE_LOG_INFO << status.ToString() << std::endl;
|
||||
ENGINE_LOG_ERROR << "Failed to create table: " << status.ToString();
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -350,10 +398,11 @@ Status DBImpl::MergeFiles(const std::string& table_id, const meta::DateT& date,
|
||||
updated.push_back(table_file);
|
||||
status = meta_ptr_->UpdateTableFiles(updated);
|
||||
ENGINE_LOG_DEBUG << "New merged file " << table_file.file_id_ <<
|
||||
" of size=" << index->PhysicalSize()/(1024*1024) << " M";
|
||||
" of size " << index->PhysicalSize() << " bytes";
|
||||
|
||||
//current disable this line to avoid memory
|
||||
//index->Cache();
|
||||
if(options_.insert_cache_immediately_) {
|
||||
index->Cache();
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -362,6 +411,7 @@ Status DBImpl::BackgroundMergeFiles(const std::string& table_id) {
|
||||
meta::DatePartionedTableFilesSchema raw_files;
|
||||
auto status = meta_ptr_->FilesToMerge(table_id, raw_files);
|
||||
if (!status.ok()) {
|
||||
ENGINE_LOG_ERROR << "Failed to get merge files for table: " << table_id;
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -383,16 +433,14 @@ Status DBImpl::BackgroundMergeFiles(const std::string& table_id) {
|
||||
}
|
||||
|
||||
void DBImpl::BackgroundCompaction(std::set<std::string> table_ids) {
|
||||
// static int b_count = 0;
|
||||
// b_count++;
|
||||
// std::cout << "BackgroundCompaction: " << b_count << std::endl;
|
||||
ENGINE_LOG_TRACE << " Background compaction thread start";
|
||||
|
||||
Status status;
|
||||
for (auto& table_id : table_ids) {
|
||||
status = BackgroundMergeFiles(table_id);
|
||||
if (!status.ok()) {
|
||||
ENGINE_LOG_ERROR << "Merge files for table " << table_id << " failed: " << status.ToString();
|
||||
return;
|
||||
continue;//let other table get chance to merge
|
||||
}
|
||||
}
|
||||
|
||||
@ -401,15 +449,16 @@ void DBImpl::BackgroundCompaction(std::set<std::string> table_ids) {
|
||||
int ttl = 1;
|
||||
if (options_.mode == Options::MODE::CLUSTER) {
|
||||
ttl = meta::D_SEC;
|
||||
// ENGINE_LOG_DEBUG << "Server mode is cluster. Clean up files with ttl = " << std::to_string(ttl) << "seconds.";
|
||||
}
|
||||
meta_ptr_->CleanUpFilesWithTTL(ttl);
|
||||
|
||||
ENGINE_LOG_TRACE << " Background compaction thread exit";
|
||||
}
|
||||
|
||||
void DBImpl::StartBuildIndexTask() {
|
||||
void DBImpl::StartBuildIndexTask(bool force) {
|
||||
static uint64_t index_clock_tick = 0;
|
||||
index_clock_tick++;
|
||||
if(index_clock_tick%INDEX_ACTION_INTERVAL != 0) {
|
||||
if(!force && (index_clock_tick%INDEX_ACTION_INTERVAL != 0)) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -428,22 +477,42 @@ void DBImpl::StartBuildIndexTask() {
|
||||
}
|
||||
}
|
||||
|
||||
Status DBImpl::BuildIndex(const std::string& table_id) {
|
||||
bool has = false;
|
||||
meta_ptr_->HasNonIndexFiles(table_id, has);
|
||||
int times = 1;
|
||||
|
||||
while (has) {
|
||||
ENGINE_LOG_DEBUG << "Non index files detected! Will build index " << times;
|
||||
meta_ptr_->UpdateTableFilesToIndex(table_id);
|
||||
/* StartBuildIndexTask(true); */
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(std::min(10*1000, times*100)));
|
||||
meta_ptr_->HasNonIndexFiles(table_id, has);
|
||||
times++;
|
||||
}
|
||||
return Status::OK();
|
||||
/* return BuildIndexByTable(table_id); */
|
||||
}
|
||||
|
||||
Status DBImpl::BuildIndex(const meta::TableFileSchema& file) {
|
||||
ExecutionEnginePtr to_index = EngineFactory::Build(file.dimension_, file.location_, (EngineType)file.engine_type_);
|
||||
if(to_index == nullptr) {
|
||||
ENGINE_LOG_ERROR << "Invalid engine type";
|
||||
return Status::Error("Invalid engine type");
|
||||
}
|
||||
|
||||
try {
|
||||
//step 1: load index
|
||||
to_index->Load();
|
||||
to_index->Load(options_.insert_cache_immediately_);
|
||||
|
||||
//step 2: create table file
|
||||
meta::TableFileSchema table_file;
|
||||
table_file.table_id_ = file.table_id_;
|
||||
table_file.date_ = file.date_;
|
||||
table_file.file_type_ = meta::TableFileSchema::INDEX; //for multi-db-path, distribute index file averagely to each path
|
||||
Status status = meta_ptr_->CreateTableFile(table_file);
|
||||
if (!status.ok()) {
|
||||
ENGINE_LOG_ERROR << "Failed to create table: " << status.ToString();
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -476,25 +545,49 @@ Status DBImpl::BuildIndex(const meta::TableFileSchema& file) {
|
||||
meta_ptr_->UpdateTableFiles(update_files);
|
||||
|
||||
ENGINE_LOG_DEBUG << "New index file " << table_file.file_id_ << " of size "
|
||||
<< index->PhysicalSize()/(1024*1024) << " M"
|
||||
<< index->PhysicalSize() << " bytes"
|
||||
<< " from file " << to_remove.file_id_;
|
||||
|
||||
//current disable this line to avoid memory
|
||||
//index->Cache();
|
||||
if(options_.insert_cache_immediately_) {
|
||||
index->Cache();
|
||||
}
|
||||
|
||||
} catch (std::exception& ex) {
|
||||
return Status::Error("Build index encounter exception", ex.what());
|
||||
std::string msg = "Build index encounter exception" + std::string(ex.what());
|
||||
ENGINE_LOG_ERROR << msg;
|
||||
return Status::Error(msg);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBImpl::BuildIndexByTable(const std::string& table_id) {
|
||||
std::unique_lock<std::mutex> lock(build_index_mutex_);
|
||||
meta::TableFilesSchema to_index_files;
|
||||
meta_ptr_->FilesToIndex(to_index_files);
|
||||
|
||||
Status status;
|
||||
|
||||
for (auto& file : to_index_files) {
|
||||
status = BuildIndex(file);
|
||||
if (!status.ok()) {
|
||||
ENGINE_LOG_ERROR << "Building index for " << file.id_ << " failed: " << status.ToString();
|
||||
return status;
|
||||
}
|
||||
ENGINE_LOG_DEBUG << "Sync building index for " << file.id_ << " passed";
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void DBImpl::BackgroundBuildIndex() {
|
||||
ENGINE_LOG_TRACE << " Background build index thread start";
|
||||
|
||||
std::unique_lock<std::mutex> lock(build_index_mutex_);
|
||||
meta::TableFilesSchema to_index_files;
|
||||
meta_ptr_->FilesToIndex(to_index_files);
|
||||
Status status;
|
||||
for (auto& file : to_index_files) {
|
||||
/* ENGINE_LOG_DEBUG << "Buiding index for " << file.location; */
|
||||
status = BuildIndex(file);
|
||||
if (!status.ok()) {
|
||||
ENGINE_LOG_ERROR << "Building index for " << file.id_ << " failed: " << status.ToString();
|
||||
@ -505,7 +598,8 @@ void DBImpl::BackgroundBuildIndex() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* ENGINE_LOG_DEBUG << "All Buiding index Done"; */
|
||||
|
||||
ENGINE_LOG_TRACE << " Background build index thread exit";
|
||||
}
|
||||
|
||||
Status DBImpl::DropAll() {
|
||||
|
@ -82,6 +82,8 @@ class DBImpl : public DB {
|
||||
|
||||
Status Size(uint64_t &result) override;
|
||||
|
||||
Status BuildIndex(const std::string& table_id) override;
|
||||
|
||||
~DBImpl() override;
|
||||
|
||||
private:
|
||||
@ -107,9 +109,11 @@ class DBImpl : public DB {
|
||||
Status BackgroundMergeFiles(const std::string &table_id);
|
||||
void BackgroundCompaction(std::set<std::string> table_ids);
|
||||
|
||||
void StartBuildIndexTask();
|
||||
void StartBuildIndexTask(bool force=false);
|
||||
void BackgroundBuildIndex();
|
||||
|
||||
Status
|
||||
BuildIndexByTable(const std::string& table_id);
|
||||
Status
|
||||
BuildIndex(const meta::TableFileSchema &);
|
||||
|
||||
@ -130,6 +134,8 @@ class DBImpl : public DB {
|
||||
server::ThreadPool index_thread_pool_;
|
||||
std::list<std::future<void>> index_thread_results_;
|
||||
|
||||
std::mutex build_index_mutex_;
|
||||
|
||||
}; // DBImpl
|
||||
|
||||
|
||||
|
@ -83,26 +83,6 @@ using ConnectorT = decltype(StoragePrototype(""));
|
||||
static std::unique_ptr<ConnectorT> ConnectorPtr;
|
||||
using ConditionT = decltype(c(&TableFileSchema::id_) == 1UL);
|
||||
|
||||
std::string DBMetaImpl::GetTablePath(const std::string &table_id) {
|
||||
return options_.path + "/tables/" + table_id;
|
||||
}
|
||||
|
||||
std::string DBMetaImpl::GetTableDatePartitionPath(const std::string &table_id, DateT &date) {
|
||||
std::stringstream ss;
|
||||
ss << GetTablePath(table_id) << "/" << date;
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
void DBMetaImpl::GetTableFilePath(TableFileSchema &group_file) {
|
||||
if (group_file.date_ == EmptyDate) {
|
||||
group_file.date_ = Meta::GetDate();
|
||||
}
|
||||
std::stringstream ss;
|
||||
ss << GetTableDatePartitionPath(group_file.table_id_, group_file.date_)
|
||||
<< "/" << group_file.file_id_;
|
||||
group_file.location_ = ss.str();
|
||||
}
|
||||
|
||||
Status DBMetaImpl::NextTableId(std::string &table_id) {
|
||||
std::stringstream ss;
|
||||
SimpleIDGenerator g;
|
||||
@ -196,7 +176,8 @@ Status DBMetaImpl::CreateTable(TableSchema &table_schema) {
|
||||
if(TableSchema::TO_DELETE == std::get<0>(table[0])) {
|
||||
return Status::Error("Table already exists and it is in delete state, please wait a second");
|
||||
} else {
|
||||
return Status::OK();//table already exists, no error
|
||||
// Change from no error to already exist.
|
||||
return Status::AlreadyExist("Table already exists");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -212,15 +193,7 @@ Status DBMetaImpl::CreateTable(TableSchema &table_schema) {
|
||||
return Status::DBTransactionError("Add Table Error");
|
||||
}
|
||||
|
||||
auto table_path = GetTablePath(table_schema.table_id_);
|
||||
table_schema.location_ = table_path;
|
||||
if (!boost::filesystem::is_directory(table_path)) {
|
||||
auto ret = boost::filesystem::create_directories(table_path);
|
||||
if (!ret) {
|
||||
ENGINE_LOG_ERROR << "Create directory " << table_path << " Error";
|
||||
return Status::Error("Failed to create table path");
|
||||
}
|
||||
}
|
||||
return utils::CreateTablePath(options_, table_schema.table_id_);
|
||||
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when create table", e);
|
||||
@ -306,9 +279,6 @@ Status DBMetaImpl::DescribeTable(TableSchema &table_schema) {
|
||||
return Status::NotFound("Table " + table_schema.table_id_ + " not found");
|
||||
}
|
||||
|
||||
auto table_path = GetTablePath(table_schema.table_id_);
|
||||
table_schema.location_ = table_path;
|
||||
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when describe table", e);
|
||||
}
|
||||
@ -316,6 +286,30 @@ Status DBMetaImpl::DescribeTable(TableSchema &table_schema) {
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBMetaImpl::HasNonIndexFiles(const std::string& table_id, bool& has) {
|
||||
has = false;
|
||||
try {
|
||||
auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_),
|
||||
where((c(&TableFileSchema::file_type_) == (int) TableFileSchema::RAW
|
||||
or
|
||||
c(&TableFileSchema::file_type_) == (int) TableFileSchema::NEW
|
||||
or
|
||||
c(&TableFileSchema::file_type_) == (int) TableFileSchema::TO_INDEX)
|
||||
and c(&TableFileSchema::table_id_) == table_id
|
||||
));
|
||||
|
||||
if (selected.size() >= 1) {
|
||||
has = true;
|
||||
} else {
|
||||
has = false;
|
||||
}
|
||||
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when check non index files", e);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBMetaImpl::HasTable(const std::string &table_id, bool &has_or_not) {
|
||||
has_or_not = false;
|
||||
|
||||
@ -388,21 +382,11 @@ Status DBMetaImpl::CreateTableFile(TableFileSchema &file_schema) {
|
||||
file_schema.created_on_ = utils::GetMicroSecTimeStamp();
|
||||
file_schema.updated_time_ = file_schema.created_on_;
|
||||
file_schema.engine_type_ = table_schema.engine_type_;
|
||||
ENGINE_LOG_DEBUG << "CreateTableFile EngineTypee: " << table_schema.engine_type_;
|
||||
GetTableFilePath(file_schema);
|
||||
|
||||
auto id = ConnectorPtr->insert(file_schema);
|
||||
file_schema.id_ = id;
|
||||
|
||||
auto partition_path = GetTableDatePartitionPath(file_schema.table_id_, file_schema.date_);
|
||||
|
||||
if (!boost::filesystem::is_directory(partition_path)) {
|
||||
auto ret = boost::filesystem::create_directory(partition_path);
|
||||
if (!ret) {
|
||||
ENGINE_LOG_ERROR << "Create directory " << partition_path << " Error";
|
||||
return Status::DBTransactionError("Failed to create partition directory");
|
||||
}
|
||||
}
|
||||
return utils::CreateTableFilePath(options_, file_schema);
|
||||
|
||||
} catch (std::exception& ex) {
|
||||
return HandleException("Encounter exception when create table file", ex);
|
||||
@ -439,7 +423,7 @@ Status DBMetaImpl::FilesToIndex(TableFilesSchema &files) {
|
||||
table_file.date_ = std::get<5>(file);
|
||||
table_file.engine_type_ = std::get<6>(file);
|
||||
|
||||
GetTableFilePath(table_file);
|
||||
utils::GetTableFilePath(options_, table_file);
|
||||
auto groupItr = groups.find(table_file.table_id_);
|
||||
if (groupItr == groups.end()) {
|
||||
TableSchema table_schema;
|
||||
@ -502,7 +486,7 @@ Status DBMetaImpl::FilesToSearch(const std::string &table_id,
|
||||
table_file.date_ = std::get<5>(file);
|
||||
table_file.engine_type_ = std::get<6>(file);
|
||||
table_file.dimension_ = table_schema.dimension_;
|
||||
GetTableFilePath(table_file);
|
||||
utils::GetTableFilePath(options_, table_file);
|
||||
auto dateItr = files.find(table_file.date_);
|
||||
if (dateItr == files.end()) {
|
||||
files[table_file.date_] = TableFilesSchema();
|
||||
@ -544,7 +528,7 @@ Status DBMetaImpl::FilesToSearch(const std::string &table_id,
|
||||
table_file.date_ = std::get<5>(file);
|
||||
table_file.engine_type_ = std::get<6>(file);
|
||||
table_file.dimension_ = table_schema.dimension_;
|
||||
GetTableFilePath(table_file);
|
||||
utils::GetTableFilePath(options_, table_file);
|
||||
auto dateItr = files.find(table_file.date_);
|
||||
if (dateItr == files.end()) {
|
||||
files[table_file.date_] = TableFilesSchema();
|
||||
@ -594,7 +578,7 @@ Status DBMetaImpl::FilesToMerge(const std::string &table_id,
|
||||
table_file.size_ = std::get<4>(file);
|
||||
table_file.date_ = std::get<5>(file);
|
||||
table_file.dimension_ = table_schema.dimension_;
|
||||
GetTableFilePath(table_file);
|
||||
utils::GetTableFilePath(options_, table_file);
|
||||
auto dateItr = files.find(table_file.date_);
|
||||
if (dateItr == files.end()) {
|
||||
files[table_file.date_] = TableFilesSchema();
|
||||
@ -640,7 +624,7 @@ Status DBMetaImpl::GetTableFiles(const std::string& table_id,
|
||||
file_schema.date_ = std::get<4>(file);
|
||||
file_schema.engine_type_ = std::get<5>(file);
|
||||
file_schema.dimension_ = table_schema.dimension_;
|
||||
GetTableFilePath(file_schema);
|
||||
utils::GetTableFilePath(options_, file_schema);
|
||||
|
||||
table_files.emplace_back(file_schema);
|
||||
}
|
||||
@ -692,16 +676,22 @@ Status DBMetaImpl::Archive() {
|
||||
Status DBMetaImpl::Size(uint64_t &result) {
|
||||
result = 0;
|
||||
try {
|
||||
auto selected = ConnectorPtr->select(columns(sum(&TableFileSchema::size_)),
|
||||
where(
|
||||
c(&TableFileSchema::file_type_) != (int) TableFileSchema::TO_DELETE
|
||||
));
|
||||
auto files = ConnectorPtr->select(columns(&TableFileSchema::size_,
|
||||
&TableFileSchema::file_type_,
|
||||
&TableFileSchema::engine_type_),
|
||||
where(
|
||||
c(&TableFileSchema::file_type_) != (int) TableFileSchema::TO_DELETE
|
||||
));
|
||||
|
||||
for (auto &sub_query : selected) {
|
||||
if (!std::get<0>(sub_query)) {
|
||||
continue;
|
||||
for (auto &file : files) {
|
||||
auto file_size = std::get<0>(file);
|
||||
auto file_type = std::get<1>(file);
|
||||
auto engine_type = std::get<2>(file);
|
||||
if(file_type == (int)TableFileSchema::INDEX && engine_type == (int)EngineType::FAISS_IVFSQ8) {
|
||||
result += (uint64_t)file_size/4;//hardcode for sq8
|
||||
} else {
|
||||
result += (uint64_t)file_size;
|
||||
}
|
||||
result += (uint64_t) (*std::get<0>(sub_query));
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when calculte db size", e);
|
||||
@ -792,6 +782,23 @@ Status DBMetaImpl::UpdateTableFile(TableFileSchema &file_schema) {
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBMetaImpl::UpdateTableFilesToIndex(const std::string& table_id) {
|
||||
try {
|
||||
ConnectorPtr->update_all(
|
||||
set(
|
||||
c(&TableFileSchema::file_type_) = (int) TableFileSchema::TO_INDEX
|
||||
),
|
||||
where(
|
||||
c(&TableFileSchema::table_id_) == table_id and
|
||||
c(&TableFileSchema::file_type_) == (int) TableFileSchema::RAW
|
||||
));
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when update table files to to_index", e);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBMetaImpl::UpdateTableFiles(TableFilesSchema &files) {
|
||||
try {
|
||||
MetricCollector metric;
|
||||
@ -856,10 +863,9 @@ Status DBMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
|
||||
table_file.table_id_ = std::get<1>(file);
|
||||
table_file.file_id_ = std::get<2>(file);
|
||||
table_file.date_ = std::get<3>(file);
|
||||
GetTableFilePath(table_file);
|
||||
|
||||
ENGINE_LOG_DEBUG << "Removing deleted id =" << table_file.id_ << " location = " << table_file.location_ << std::endl;
|
||||
boost::filesystem::remove(table_file.location_);
|
||||
utils::DeleteTableFilePath(options_, table_file);
|
||||
ENGINE_LOG_DEBUG << "Removing file id:" << table_file.id_ << " location:" << table_file.location_;
|
||||
ConnectorPtr->remove<TableFileSchema>(table_file.id_);
|
||||
|
||||
}
|
||||
@ -883,10 +889,7 @@ Status DBMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
|
||||
|
||||
auto commited = ConnectorPtr->transaction([&]() mutable {
|
||||
for (auto &table : tables) {
|
||||
auto table_path = GetTablePath(std::get<1>(table));
|
||||
|
||||
ENGINE_LOG_DEBUG << "Remove table folder: " << table_path;
|
||||
boost::filesystem::remove_all(table_path);
|
||||
utils::DeleteTablePath(options_, std::get<1>(table));
|
||||
ConnectorPtr->remove<TableSchema>(std::get<0>(table));
|
||||
}
|
||||
|
||||
|
@ -8,67 +8,89 @@
|
||||
#include "Meta.h"
|
||||
#include "Options.h"
|
||||
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
namespace meta {
|
||||
|
||||
auto StoragePrototype(const std::string& path);
|
||||
auto StoragePrototype(const std::string &path);
|
||||
|
||||
class DBMetaImpl : public Meta {
|
||||
public:
|
||||
DBMetaImpl(const DBMetaOptions& options_);
|
||||
public:
|
||||
explicit DBMetaImpl(const DBMetaOptions &options_);
|
||||
|
||||
virtual Status CreateTable(TableSchema& table_schema) override;
|
||||
virtual Status DescribeTable(TableSchema& group_info_) override;
|
||||
virtual Status HasTable(const std::string& table_id, bool& has_or_not) override;
|
||||
virtual Status AllTables(std::vector<TableSchema>& table_schema_array) override;
|
||||
Status
|
||||
CreateTable(TableSchema &table_schema) override;
|
||||
|
||||
virtual Status DeleteTable(const std::string& table_id) override;
|
||||
virtual Status DeleteTableFiles(const std::string& table_id) override;
|
||||
Status
|
||||
DescribeTable(TableSchema &group_info_) override;
|
||||
|
||||
virtual Status CreateTableFile(TableFileSchema& file_schema) override;
|
||||
virtual Status DropPartitionsByDates(const std::string& table_id,
|
||||
const DatesT& dates) override;
|
||||
Status
|
||||
HasTable(const std::string &table_id, bool &has_or_not) override;
|
||||
|
||||
virtual Status GetTableFiles(const std::string& table_id,
|
||||
const std::vector<size_t>& ids,
|
||||
TableFilesSchema& table_files) override;
|
||||
Status
|
||||
AllTables(std::vector<TableSchema> &table_schema_array) override;
|
||||
|
||||
virtual Status UpdateTableFile(TableFileSchema& file_schema) override;
|
||||
Status
|
||||
DeleteTable(const std::string &table_id) override;
|
||||
|
||||
virtual Status UpdateTableFiles(TableFilesSchema& files) override;
|
||||
Status
|
||||
DeleteTableFiles(const std::string &table_id) override;
|
||||
|
||||
virtual Status FilesToSearch(const std::string& table_id,
|
||||
const DatesT& partition,
|
||||
DatePartionedTableFilesSchema& files) override;
|
||||
Status
|
||||
CreateTableFile(TableFileSchema &file_schema) override;
|
||||
|
||||
virtual Status FilesToMerge(const std::string& table_id,
|
||||
DatePartionedTableFilesSchema& files) override;
|
||||
Status
|
||||
DropPartitionsByDates(const std::string &table_id, const DatesT &dates) override;
|
||||
|
||||
virtual Status FilesToIndex(TableFilesSchema&) override;
|
||||
Status
|
||||
GetTableFiles(const std::string &table_id, const std::vector<size_t> &ids, TableFilesSchema &table_files) override;
|
||||
|
||||
virtual Status Archive() override;
|
||||
Status
|
||||
HasNonIndexFiles(const std::string &table_id, bool &has) override;
|
||||
|
||||
virtual Status Size(uint64_t& result) override;
|
||||
Status
|
||||
UpdateTableFilesToIndex(const std::string &table_id) override;
|
||||
|
||||
virtual Status CleanUp() override;
|
||||
Status
|
||||
UpdateTableFile(TableFileSchema &file_schema) override;
|
||||
|
||||
virtual Status CleanUpFilesWithTTL(uint16_t seconds) override;
|
||||
Status
|
||||
UpdateTableFiles(TableFilesSchema &files) override;
|
||||
|
||||
virtual Status DropAll() override;
|
||||
Status
|
||||
FilesToSearch(const std::string &table_id, const DatesT &partition, DatePartionedTableFilesSchema &files) override;
|
||||
|
||||
virtual Status Count(const std::string& table_id, uint64_t& result) override;
|
||||
Status
|
||||
FilesToMerge(const std::string &table_id, DatePartionedTableFilesSchema &files) override;
|
||||
|
||||
virtual ~DBMetaImpl();
|
||||
Status
|
||||
FilesToIndex(TableFilesSchema &) override;
|
||||
|
||||
private:
|
||||
Status NextFileId(std::string& file_id);
|
||||
Status NextTableId(std::string& table_id);
|
||||
Status
|
||||
Archive() override;
|
||||
|
||||
Status
|
||||
Size(uint64_t &result) override;
|
||||
|
||||
Status
|
||||
CleanUp() override;
|
||||
|
||||
Status
|
||||
CleanUpFilesWithTTL(uint16_t seconds) override;
|
||||
|
||||
Status
|
||||
DropAll() override;
|
||||
|
||||
Status Count(const std::string &table_id, uint64_t &result) override;
|
||||
|
||||
~DBMetaImpl() override;
|
||||
|
||||
private:
|
||||
Status NextFileId(std::string &file_id);
|
||||
Status NextTableId(std::string &table_id);
|
||||
Status DiscardFiles(long to_discard_size);
|
||||
std::string GetTablePath(const std::string& table_id);
|
||||
std::string GetTableDatePartitionPath(const std::string& table_id, DateT& date);
|
||||
void GetTableFilePath(TableFileSchema& group_file);
|
||||
Status Initialize();
|
||||
|
||||
const DBMetaOptions options_;
|
||||
|
@ -23,13 +23,19 @@ EngineFactory::Build(uint16_t dimension,
|
||||
switch (type) {
|
||||
case EngineType::FAISS_IDMAP: {
|
||||
execution_engine_ptr =
|
||||
ExecutionEnginePtr(new FaissExecutionEngine(dimension, location, "IDMap", "IDMap,Flat"));
|
||||
ExecutionEnginePtr(new FaissExecutionEngine(dimension, location, BUILD_INDEX_TYPE_IDMAP, "IDMap,Flat"));
|
||||
break;
|
||||
}
|
||||
|
||||
case EngineType::FAISS_IVFFLAT_GPU: {
|
||||
execution_engine_ptr =
|
||||
ExecutionEnginePtr(new FaissExecutionEngine(dimension, location, "IVF", "IDMap,Flat"));
|
||||
ExecutionEnginePtr(new FaissExecutionEngine(dimension, location, BUILD_INDEX_TYPE_IVF, "IDMap,Flat"));
|
||||
break;
|
||||
}
|
||||
|
||||
case EngineType::FAISS_IVFSQ8: {
|
||||
execution_engine_ptr =
|
||||
ExecutionEnginePtr(new FaissExecutionEngine(dimension, location, BUILD_INDEX_TYPE_IVFSQ8, "IDMap,Flat"));
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -17,9 +17,9 @@ namespace engine {
|
||||
enum class EngineType {
|
||||
INVALID = 0,
|
||||
FAISS_IDMAP = 1,
|
||||
FAISS_IVFFLAT_GPU,
|
||||
FAISS_IVFFLAT_CPU,
|
||||
SPTAG_KDT_RNT_CPU,
|
||||
FAISS_IVFFLAT,
|
||||
FAISS_IVFSQ8,
|
||||
MAX_VALUE = FAISS_IVFSQ8,
|
||||
};
|
||||
|
||||
class ExecutionEngine {
|
||||
@ -39,7 +39,7 @@ public:
|
||||
|
||||
virtual Status Serialize() = 0;
|
||||
|
||||
virtual Status Load() = 0;
|
||||
virtual Status Load(bool to_cache = true) = 0;
|
||||
|
||||
virtual Status Merge(const std::string& location) = 0;
|
||||
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <src/server/ServerConfig.h>
|
||||
#include <src/metrics/Metrics.h>
|
||||
#include "Log.h"
|
||||
#include "utils/CommonUtil.h"
|
||||
|
||||
#include "src/cache/CpuCacheMgr.h"
|
||||
#include "ExecutionEngineImpl.h"
|
||||
@ -51,16 +52,12 @@ VecIndexPtr ExecutionEngineImpl::CreatetVecIndex(EngineType type) {
|
||||
index = GetVecIndexFactory(IndexType::FAISS_IDMAP);
|
||||
break;
|
||||
}
|
||||
case EngineType::FAISS_IVFFLAT_GPU: {
|
||||
case EngineType::FAISS_IVFFLAT: {
|
||||
index = GetVecIndexFactory(IndexType::FAISS_IVFFLAT_MIX);
|
||||
break;
|
||||
}
|
||||
case EngineType::FAISS_IVFFLAT_CPU: {
|
||||
index = GetVecIndexFactory(IndexType::FAISS_IVFFLAT_CPU);
|
||||
break;
|
||||
}
|
||||
case EngineType::SPTAG_KDT_RNT_CPU: {
|
||||
index = GetVecIndexFactory(IndexType::SPTAG_KDT_RNT_CPU);
|
||||
case EngineType::FAISS_IVFSQ8: {
|
||||
index = GetVecIndexFactory(IndexType::FAISS_IVFSQ8_MIX);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
@ -92,7 +89,7 @@ size_t ExecutionEngineImpl::Dimension() const {
|
||||
}
|
||||
|
||||
size_t ExecutionEngineImpl::PhysicalSize() const {
|
||||
return (size_t) (Count() * Dimension()) * sizeof(float);
|
||||
return server::CommonUtil::GetFileSize(location_);
|
||||
}
|
||||
|
||||
Status ExecutionEngineImpl::Serialize() {
|
||||
@ -103,14 +100,13 @@ Status ExecutionEngineImpl::Serialize() {
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status ExecutionEngineImpl::Load() {
|
||||
Status ExecutionEngineImpl::Load(bool to_cache) {
|
||||
index_ = zilliz::milvus::cache::CpuCacheMgr::GetInstance()->GetIndex(location_);
|
||||
bool to_cache = false;
|
||||
bool already_in_cache = (index_ != nullptr);
|
||||
auto start_time = METRICS_NOW_TIME;
|
||||
if (!index_) {
|
||||
try {
|
||||
index_ = read_index(location_);
|
||||
to_cache = true;
|
||||
ENGINE_LOG_DEBUG << "Disk io from: " << location_;
|
||||
} catch (knowhere::KnowhereException &e) {
|
||||
ENGINE_LOG_ERROR << e.what();
|
||||
@ -120,16 +116,16 @@ Status ExecutionEngineImpl::Load() {
|
||||
}
|
||||
}
|
||||
|
||||
if (to_cache) {
|
||||
if (!already_in_cache && to_cache) {
|
||||
Cache();
|
||||
auto end_time = METRICS_NOW_TIME;
|
||||
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
|
||||
|
||||
server::Metrics::GetInstance().FaissDiskLoadDurationSecondsHistogramObserve(total_time);
|
||||
double total_size = Size();
|
||||
double physical_size = PhysicalSize();
|
||||
|
||||
server::Metrics::GetInstance().FaissDiskLoadSizeBytesHistogramObserve(total_size);
|
||||
server::Metrics::GetInstance().FaissDiskLoadIOSpeedGaugeSet(total_size / double(total_time));
|
||||
server::Metrics::GetInstance().FaissDiskLoadSizeBytesHistogramObserve(physical_size);
|
||||
server::Metrics::GetInstance().FaissDiskLoadIOSpeedGaugeSet(physical_size / double(total_time));
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
@ -215,9 +211,8 @@ Status ExecutionEngineImpl::Init() {
|
||||
gpu_num = server_config.GetInt32Value("gpu_index", 0);
|
||||
|
||||
switch (build_type) {
|
||||
case EngineType::FAISS_IVFFLAT_GPU: {
|
||||
}
|
||||
case EngineType::FAISS_IVFFLAT_CPU: {
|
||||
case EngineType::FAISS_IVFSQ8:
|
||||
case EngineType::FAISS_IVFFLAT: {
|
||||
ConfigNode engine_config = config.GetConfig(CONFIG_ENGINE);
|
||||
nprobe_ = engine_config.GetInt32Value(CONFIG_NPROBE, 1);
|
||||
break;
|
||||
|
@ -40,7 +40,7 @@ class ExecutionEngineImpl : public ExecutionEngine {
|
||||
|
||||
Status Serialize() override;
|
||||
|
||||
Status Load() override;
|
||||
Status Load(bool to_cache) override;
|
||||
|
||||
Status Merge(const std::string &location) override;
|
||||
|
||||
|
@ -6,6 +6,7 @@
|
||||
#if 0
|
||||
#include "FaissExecutionEngine.h"
|
||||
#include "Log.h"
|
||||
#include "utils/CommonUtil.h"
|
||||
|
||||
#include <faiss/AutoTune.h>
|
||||
#include <faiss/MetaIndexes.h>
|
||||
@ -22,21 +23,52 @@ namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
namespace {
|
||||
std::string GetMetricType() {
|
||||
server::ServerConfig &config = server::ServerConfig::GetInstance();
|
||||
server::ConfigNode engine_config = config.GetConfig(server::CONFIG_ENGINE);
|
||||
return engine_config.GetValue(server::CONFIG_METRICTYPE, "L2");
|
||||
}
|
||||
}
|
||||
|
||||
std::string IndexStatsHelper::ToString(const std::string &prefix) const {
|
||||
return "";
|
||||
}
|
||||
|
||||
void IndexStatsHelper::Reset() const {
|
||||
faiss::indexIVF_stats.reset();
|
||||
}
|
||||
|
||||
std::string FaissIndexIVFStatsHelper::ToString(const std::string &prefix) const {
|
||||
std::stringstream ss;
|
||||
ss << prefix;
|
||||
ss << identifier_ << ":";
|
||||
ss << " NQ=" << faiss::indexIVF_stats.nq;
|
||||
ss << " NL=" << faiss::indexIVF_stats.nlist;
|
||||
ss << " ND=" << faiss::indexIVF_stats.ndis;
|
||||
ss << " NH=" << faiss::indexIVF_stats.nheap_updates;
|
||||
ss << " Q=" << faiss::indexIVF_stats.quantization_time;
|
||||
ss << " S=" << faiss::indexIVF_stats.search_time;
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
FaissExecutionEngine::FaissExecutionEngine(uint16_t dimension,
|
||||
const std::string& location,
|
||||
const std::string& build_index_type,
|
||||
const std::string& raw_index_type)
|
||||
: pIndex_(faiss::index_factory(dimension, raw_index_type.c_str())),
|
||||
location_(location),
|
||||
const std::string &location,
|
||||
const std::string &build_index_type,
|
||||
const std::string &raw_index_type)
|
||||
: location_(location),
|
||||
build_index_type_(build_index_type),
|
||||
raw_index_type_(raw_index_type) {
|
||||
|
||||
std::string metric_type = GetMetricType();
|
||||
faiss::MetricType faiss_metric_type = (metric_type == "L2") ? faiss::METRIC_L2 : faiss::METRIC_INNER_PRODUCT;
|
||||
pIndex_.reset(faiss::index_factory(dimension, raw_index_type.c_str(), faiss_metric_type));
|
||||
}
|
||||
|
||||
FaissExecutionEngine::FaissExecutionEngine(std::shared_ptr<faiss::Index> index,
|
||||
const std::string& location,
|
||||
const std::string& build_index_type,
|
||||
const std::string& raw_index_type)
|
||||
const std::string &location,
|
||||
const std::string &build_index_type,
|
||||
const std::string &raw_index_type)
|
||||
: pIndex_(index),
|
||||
location_(location),
|
||||
build_index_type_(build_index_type),
|
||||
@ -49,11 +81,11 @@ Status FaissExecutionEngine::AddWithIds(long n, const float *xdata, const long *
|
||||
}
|
||||
|
||||
size_t FaissExecutionEngine::Count() const {
|
||||
return (size_t)(pIndex_->ntotal);
|
||||
return (size_t) (pIndex_->ntotal);
|
||||
}
|
||||
|
||||
size_t FaissExecutionEngine::Size() const {
|
||||
return (size_t)(Count() * pIndex_->d)*sizeof(float);
|
||||
return (size_t) (Count() * pIndex_->d) * sizeof(float);
|
||||
}
|
||||
|
||||
size_t FaissExecutionEngine::Dimension() const {
|
||||
@ -61,7 +93,7 @@ size_t FaissExecutionEngine::Dimension() const {
|
||||
}
|
||||
|
||||
size_t FaissExecutionEngine::PhysicalSize() const {
|
||||
return (size_t)(Count() * pIndex_->d)*sizeof(float);
|
||||
return server::CommonUtil::GetFileSize(location_);
|
||||
}
|
||||
|
||||
Status FaissExecutionEngine::Serialize() {
|
||||
@ -69,18 +101,17 @@ Status FaissExecutionEngine::Serialize() {
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status FaissExecutionEngine::Load() {
|
||||
auto index = zilliz::milvus::cache::CpuCacheMgr::GetInstance()->GetIndex(location_);
|
||||
bool to_cache = false;
|
||||
Status FaissExecutionEngine::Load(bool to_cache) {
|
||||
auto index = zilliz::milvus::cache::CpuCacheMgr::GetInstance()->GetIndex(location_);
|
||||
bool already_in_cache = (index != nullptr);
|
||||
auto start_time = METRICS_NOW_TIME;
|
||||
if (!index) {
|
||||
index = read_index(location_);
|
||||
to_cache = true;
|
||||
ENGINE_LOG_DEBUG << "Disk io from: " << location_;
|
||||
}
|
||||
|
||||
pIndex_ = index->data();
|
||||
if (to_cache) {
|
||||
if (!already_in_cache && to_cache) {
|
||||
Cache();
|
||||
auto end_time = METRICS_NOW_TIME;
|
||||
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
|
||||
@ -88,44 +119,44 @@ Status FaissExecutionEngine::Load() {
|
||||
server::Metrics::GetInstance().FaissDiskLoadDurationSecondsHistogramObserve(total_time);
|
||||
double total_size = (pIndex_->d) * (pIndex_->ntotal) * 4;
|
||||
|
||||
|
||||
server::Metrics::GetInstance().FaissDiskLoadSizeBytesHistogramObserve(total_size);
|
||||
// server::Metrics::GetInstance().FaissDiskLoadIOSpeedHistogramObserve(total_size/double(total_time));
|
||||
server::Metrics::GetInstance().FaissDiskLoadIOSpeedGaugeSet(total_size/double(total_time));
|
||||
server::Metrics::GetInstance().FaissDiskLoadIOSpeedGaugeSet(total_size / double(total_time));
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status FaissExecutionEngine::Merge(const std::string& location) {
|
||||
Status FaissExecutionEngine::Merge(const std::string &location) {
|
||||
if (location == location_) {
|
||||
return Status::Error("Cannot Merge Self");
|
||||
}
|
||||
ENGINE_LOG_DEBUG << "Merge index file: " << location << " to: " << location_;
|
||||
ENGINE_LOG_DEBUG << "Merge raw file: " << location << " to: " << location_;
|
||||
|
||||
auto to_merge = zilliz::milvus::cache::CpuCacheMgr::GetInstance()->GetIndex(location);
|
||||
if (!to_merge) {
|
||||
to_merge = read_index(location);
|
||||
}
|
||||
auto file_index = dynamic_cast<faiss::IndexIDMap*>(to_merge->data().get());
|
||||
pIndex_->add_with_ids(file_index->ntotal, dynamic_cast<faiss::IndexFlat*>(file_index->index)->xb.data(),
|
||||
file_index->id_map.data());
|
||||
auto file_index = dynamic_cast<faiss::IndexIDMap *>(to_merge->data().get());
|
||||
pIndex_->add_with_ids(file_index->ntotal, dynamic_cast<faiss::IndexFlat *>(file_index->index)->xb.data(),
|
||||
file_index->id_map.data());
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
ExecutionEnginePtr
|
||||
FaissExecutionEngine::BuildIndex(const std::string& location) {
|
||||
FaissExecutionEngine::BuildIndex(const std::string &location) {
|
||||
ENGINE_LOG_DEBUG << "Build index file: " << location << " from: " << location_;
|
||||
|
||||
auto opd = std::make_shared<Operand>();
|
||||
opd->d = pIndex_->d;
|
||||
opd->index_type = build_index_type_;
|
||||
opd->metric_type = GetMetricType();
|
||||
IndexBuilderPtr pBuilder = GetIndexBuilder(opd);
|
||||
|
||||
auto from_index = dynamic_cast<faiss::IndexIDMap*>(pIndex_.get());
|
||||
auto from_index = dynamic_cast<faiss::IndexIDMap *>(pIndex_.get());
|
||||
|
||||
auto index = pBuilder->build_all(from_index->ntotal,
|
||||
dynamic_cast<faiss::IndexFlat*>(from_index->index)->xb.data(),
|
||||
from_index->id_map.data());
|
||||
dynamic_cast<faiss::IndexFlat *>(from_index->index)->xb.data(),
|
||||
from_index->id_map.data());
|
||||
|
||||
ExecutionEnginePtr new_ee(new FaissExecutionEngine(index->data(), location, build_index_type_, raw_index_type_));
|
||||
return new_ee;
|
||||
@ -139,38 +170,44 @@ Status FaissExecutionEngine::Search(long n,
|
||||
auto start_time = METRICS_NOW_TIME;
|
||||
|
||||
std::shared_ptr<faiss::IndexIVF> ivf_index = std::dynamic_pointer_cast<faiss::IndexIVF>(pIndex_);
|
||||
if(ivf_index) {
|
||||
ENGINE_LOG_DEBUG << "Index type: IVFFLAT nProbe: " << nprobe_;
|
||||
if (ivf_index) {
|
||||
std::string stats_prefix = "K=" + std::to_string(k) + ":";
|
||||
ENGINE_LOG_DEBUG << "Searching index type: " << build_index_type_ << " nProbe: " << nprobe_;
|
||||
ivf_index->nprobe = nprobe_;
|
||||
ivf_stats_helper_.Reset();
|
||||
ivf_index->search(n, data, k, distances, labels);
|
||||
ENGINE_LOG_INFO << ivf_stats_helper_.ToString(stats_prefix);
|
||||
} else {
|
||||
ENGINE_LOG_DEBUG << "Searching raw file";
|
||||
pIndex_->search(n, data, k, distances, labels);
|
||||
}
|
||||
|
||||
auto end_time = METRICS_NOW_TIME;
|
||||
auto total_time = METRICS_MICROSECONDS(start_time,end_time);
|
||||
server::Metrics::GetInstance().QueryIndexTypePerSecondSet(build_index_type_, double(n)/double(total_time));
|
||||
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
|
||||
server::Metrics::GetInstance().QueryIndexTypePerSecondSet(build_index_type_, double(n) / double(total_time));
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status FaissExecutionEngine::Cache() {
|
||||
zilliz::milvus::cache::CpuCacheMgr::GetInstance(
|
||||
)->InsertItem(location_, std::make_shared<Index>(pIndex_));
|
||||
auto index = std::make_shared<Index>(pIndex_);
|
||||
cache::DataObjPtr data_obj = std::make_shared<cache::DataObj>(index, PhysicalSize());
|
||||
zilliz::milvus::cache::CpuCacheMgr::GetInstance()->InsertItem(location_, data_obj);
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status FaissExecutionEngine::Init() {
|
||||
|
||||
if(build_index_type_ == "IVF") {
|
||||
if (build_index_type_ == BUILD_INDEX_TYPE_IVF ||
|
||||
build_index_type_ == BUILD_INDEX_TYPE_IVFSQ8) {
|
||||
|
||||
using namespace zilliz::milvus::server;
|
||||
ServerConfig &config = ServerConfig::GetInstance();
|
||||
ConfigNode engine_config = config.GetConfig(CONFIG_ENGINE);
|
||||
nprobe_ = engine_config.GetInt32Value(CONFIG_NPROBE, 1000);
|
||||
nlist_ = engine_config.GetInt32Value(CONFIG_NLIST, 16384);
|
||||
|
||||
} else if(build_index_type_ == "IDMap") {
|
||||
;
|
||||
} else if (build_index_type_ == BUILD_INDEX_TYPE_IDMAP) { ;
|
||||
} else {
|
||||
return Status::Error("Wrong index type: ", build_index_type_);
|
||||
}
|
||||
|
@ -12,23 +12,44 @@
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
const static std::string BUILD_INDEX_TYPE_IDMAP = "IDMap";
|
||||
const static std::string BUILD_INDEX_TYPE_IVF = "IVF";
|
||||
const static std::string BUILD_INDEX_TYPE_IVFSQ8 = "IVFSQ8";
|
||||
|
||||
class IndexStatsHelper {
|
||||
|
||||
public:
|
||||
using Ptr = std::shared_ptr<IndexStatsHelper>;
|
||||
virtual std::string ToString(const std::string &prefix = "") const;
|
||||
virtual void Reset() const;
|
||||
virtual ~IndexStatsHelper() {}
|
||||
};
|
||||
|
||||
class FaissIndexIVFStatsHelper : public IndexStatsHelper {
|
||||
public:
|
||||
std::string ToString(const std::string &prefix = "") const override;
|
||||
|
||||
private:
|
||||
const std::string identifier_ = BUILD_INDEX_TYPE_IVF;
|
||||
};
|
||||
|
||||
class FaissExecutionEngine : public ExecutionEngine {
|
||||
public:
|
||||
public:
|
||||
|
||||
FaissExecutionEngine(uint16_t dimension,
|
||||
const std::string& location,
|
||||
const std::string& build_index_type,
|
||||
const std::string& raw_index_type);
|
||||
const std::string &location,
|
||||
const std::string &build_index_type,
|
||||
const std::string &raw_index_type);
|
||||
|
||||
FaissExecutionEngine(std::shared_ptr<faiss::Index> index,
|
||||
const std::string& location,
|
||||
const std::string& build_index_type,
|
||||
const std::string& raw_index_type);
|
||||
const std::string &location,
|
||||
const std::string &build_index_type,
|
||||
const std::string &raw_index_type);
|
||||
|
||||
Status AddWithIds(long n, const float *xdata, const long *xids) override;
|
||||
|
||||
@ -42,9 +63,9 @@ public:
|
||||
|
||||
Status Serialize() override;
|
||||
|
||||
Status Load() override;
|
||||
Status Load(bool to_cache) override;
|
||||
|
||||
Status Merge(const std::string& location) override;
|
||||
Status Merge(const std::string &location) override;
|
||||
|
||||
Status Search(long n,
|
||||
const float *data,
|
||||
@ -52,13 +73,14 @@ public:
|
||||
float *distances,
|
||||
long *labels) const override;
|
||||
|
||||
ExecutionEnginePtr BuildIndex(const std::string&) override;
|
||||
ExecutionEnginePtr BuildIndex(const std::string &) override;
|
||||
|
||||
Status Cache() override;
|
||||
|
||||
Status Init() override;
|
||||
|
||||
protected:
|
||||
protected:
|
||||
FaissIndexIVFStatsHelper ivf_stats_helper_;
|
||||
std::shared_ptr<faiss::Index> pIndex_;
|
||||
std::string location_;
|
||||
|
||||
@ -66,6 +88,7 @@ protected:
|
||||
std::string raw_index_type_;
|
||||
|
||||
size_t nprobe_ = 0;
|
||||
size_t nlist_ = 0;
|
||||
};
|
||||
|
||||
|
||||
|
@ -13,7 +13,9 @@ namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
IDGenerator::~IDGenerator() {}
|
||||
IDGenerator::~IDGenerator() = default;
|
||||
|
||||
constexpr size_t SimpleIDGenerator::MAX_IDS_PER_MICRO;
|
||||
|
||||
IDNumber SimpleIDGenerator::GetNextIDNumber() {
|
||||
auto now = std::chrono::system_clock::now();
|
||||
|
@ -10,28 +10,39 @@
|
||||
#include <cstddef>
|
||||
#include <vector>
|
||||
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
class IDGenerator {
|
||||
public:
|
||||
virtual IDNumber GetNextIDNumber() = 0;
|
||||
virtual void GetNextIDNumbers(size_t n, IDNumbers& ids) = 0;
|
||||
public:
|
||||
virtual
|
||||
IDNumber GetNextIDNumber() = 0;
|
||||
|
||||
virtual ~IDGenerator();
|
||||
virtual void
|
||||
GetNextIDNumbers(size_t n, IDNumbers &ids) = 0;
|
||||
|
||||
virtual
|
||||
~IDGenerator() = 0;
|
||||
}; // IDGenerator
|
||||
|
||||
|
||||
class SimpleIDGenerator : public IDGenerator {
|
||||
public:
|
||||
virtual IDNumber GetNextIDNumber() override;
|
||||
virtual void GetNextIDNumbers(size_t n, IDNumbers& ids) override;
|
||||
public:
|
||||
~SimpleIDGenerator() override = default;
|
||||
|
||||
private:
|
||||
void NextIDNumbers(size_t n, IDNumbers& ids);
|
||||
const size_t MAX_IDS_PER_MICRO = 1000;
|
||||
IDNumber
|
||||
GetNextIDNumber() override;
|
||||
|
||||
void
|
||||
GetNextIDNumbers(size_t n, IDNumbers &ids) override;
|
||||
|
||||
private:
|
||||
void
|
||||
NextIDNumbers(size_t n, IDNumbers &ids);
|
||||
|
||||
static constexpr size_t MAX_IDS_PER_MICRO = 1000;
|
||||
|
||||
}; // SimpleIDGenerator
|
||||
|
||||
|
@ -83,11 +83,12 @@ Status MemVectors::Serialize(std::string &table_id) {
|
||||
|
||||
auto status = meta_->UpdateTableFile(schema_);
|
||||
|
||||
LOG(DEBUG) << "New " << ((schema_.file_type_ == meta::TableFileSchema::RAW) ? "raw" : "to_index")
|
||||
<< " file " << schema_.file_id_ << " of size " << (double) (active_engine_->Size()) / (double) meta::M
|
||||
<< " M";
|
||||
ENGINE_LOG_DEBUG << "New " << ((schema_.file_type_ == meta::TableFileSchema::RAW) ? "raw" : "to_index")
|
||||
<< " file " << schema_.file_id_ << " of size " << active_engine_->Size() << " bytes";
|
||||
|
||||
active_engine_->Cache();
|
||||
if(options_.insert_cache_immediately_) {
|
||||
active_engine_->Cache();
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -125,9 +126,6 @@ Status MemManager::InsertVectors(const std::string &table_id_,
|
||||
const float *vectors_,
|
||||
IDNumbers &vector_ids_) {
|
||||
|
||||
LOG(DEBUG) << "MemManager::InsertVectors: mutable mem = " << GetCurrentMutableMem() <<
|
||||
", immutable mem = " << GetCurrentImmutableMem() << ", total mem = " << GetCurrentMem();
|
||||
|
||||
std::unique_lock<std::mutex> lock(mutex_);
|
||||
|
||||
return InsertVectorsNoLock(table_id_, n_, vectors_, vector_ids_);
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <mutex>
|
||||
|
||||
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
@ -95,10 +95,12 @@ Status MemTableFile::Serialize() {
|
||||
|
||||
auto status = meta_->UpdateTableFile(table_file_schema_);
|
||||
|
||||
LOG(DEBUG) << "New " << ((table_file_schema_.file_type_ == meta::TableFileSchema::RAW) ? "raw" : "to_index")
|
||||
<< " file " << table_file_schema_.file_id_ << " of size " << (double) size / (double) M << " M";
|
||||
ENGINE_LOG_DEBUG << "New " << ((table_file_schema_.file_type_ == meta::TableFileSchema::RAW) ? "raw" : "to_index")
|
||||
<< " file " << table_file_schema_.file_id_ << " of size " << size << " bytes";
|
||||
|
||||
execution_engine_->Cache();
|
||||
if(options_.insert_cache_immediately_) {
|
||||
execution_engine_->Cache();
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
@ -13,6 +13,8 @@ namespace milvus {
|
||||
namespace engine {
|
||||
namespace meta {
|
||||
|
||||
Meta::~Meta() = default;
|
||||
|
||||
DateT Meta::GetDate(const std::time_t& t, int day_delta) {
|
||||
struct tm ltm;
|
||||
localtime_r(&t, <m);
|
||||
|
@ -20,52 +20,86 @@ namespace meta {
|
||||
|
||||
|
||||
class Meta {
|
||||
public:
|
||||
public:
|
||||
using Ptr = std::shared_ptr<Meta>;
|
||||
|
||||
virtual Status CreateTable(TableSchema& table_schema) = 0;
|
||||
virtual Status DescribeTable(TableSchema& table_schema) = 0;
|
||||
virtual Status HasTable(const std::string& table_id, bool& has_or_not) = 0;
|
||||
virtual Status AllTables(std::vector<TableSchema>& table_schema_array) = 0;
|
||||
virtual
|
||||
~Meta() = 0;
|
||||
|
||||
virtual Status DeleteTable(const std::string& table_id) = 0;
|
||||
virtual Status DeleteTableFiles(const std::string& table_id) = 0;
|
||||
virtual Status
|
||||
CreateTable(TableSchema &table_schema) = 0;
|
||||
|
||||
virtual Status CreateTableFile(TableFileSchema& file_schema) = 0;
|
||||
virtual Status DropPartitionsByDates(const std::string& table_id,
|
||||
const DatesT& dates) = 0;
|
||||
virtual Status
|
||||
DescribeTable(TableSchema &table_schema) = 0;
|
||||
|
||||
virtual Status GetTableFiles(const std::string& table_id,
|
||||
const std::vector<size_t>& ids,
|
||||
TableFilesSchema& table_files) = 0;
|
||||
virtual Status
|
||||
HasTable(const std::string &table_id, bool &has_or_not) = 0;
|
||||
|
||||
virtual Status UpdateTableFile(TableFileSchema& file_schema) = 0;
|
||||
virtual Status
|
||||
AllTables(std::vector<TableSchema> &table_schema_array) = 0;
|
||||
|
||||
virtual Status UpdateTableFiles(TableFilesSchema& files) = 0;
|
||||
virtual Status
|
||||
DeleteTable(const std::string &table_id) = 0;
|
||||
|
||||
virtual Status FilesToSearch(const std::string &table_id,
|
||||
const DatesT &partition,
|
||||
DatePartionedTableFilesSchema& files) = 0;
|
||||
virtual Status
|
||||
DeleteTableFiles(const std::string &table_id) = 0;
|
||||
|
||||
virtual Status FilesToMerge(const std::string& table_id,
|
||||
DatePartionedTableFilesSchema& files) = 0;
|
||||
virtual Status
|
||||
CreateTableFile(TableFileSchema &file_schema) = 0;
|
||||
|
||||
virtual Status Size(uint64_t& result) = 0;
|
||||
virtual Status
|
||||
DropPartitionsByDates(const std::string &table_id, const DatesT &dates) = 0;
|
||||
|
||||
virtual Status Archive() = 0;
|
||||
virtual Status
|
||||
GetTableFiles(const std::string &table_id, const std::vector<size_t> &ids, TableFilesSchema &table_files) = 0;
|
||||
|
||||
virtual Status FilesToIndex(TableFilesSchema&) = 0;
|
||||
virtual Status
|
||||
UpdateTableFilesToIndex(const std::string &table_id) = 0;
|
||||
|
||||
virtual Status CleanUp() = 0;
|
||||
virtual Status CleanUpFilesWithTTL(uint16_t) = 0;
|
||||
virtual Status
|
||||
UpdateTableFile(TableFileSchema &file_schema) = 0;
|
||||
|
||||
virtual Status DropAll() = 0;
|
||||
virtual Status
|
||||
UpdateTableFiles(TableFilesSchema &files) = 0;
|
||||
|
||||
virtual Status Count(const std::string& table_id, uint64_t& result) = 0;
|
||||
virtual Status
|
||||
FilesToSearch(const std::string &table_id, const DatesT &partition, DatePartionedTableFilesSchema &files) = 0;
|
||||
|
||||
static DateT GetDate(const std::time_t& t, int day_delta = 0);
|
||||
static DateT GetDate();
|
||||
static DateT GetDateWithDelta(int day_delta);
|
||||
virtual Status
|
||||
FilesToMerge(const std::string &table_id, DatePartionedTableFilesSchema &files) = 0;
|
||||
|
||||
virtual Status
|
||||
Size(uint64_t &result) = 0;
|
||||
|
||||
virtual Status
|
||||
Archive() = 0;
|
||||
|
||||
virtual Status
|
||||
FilesToIndex(TableFilesSchema &) = 0;
|
||||
|
||||
virtual Status
|
||||
HasNonIndexFiles(const std::string &table_id, bool &has) = 0;
|
||||
|
||||
virtual Status
|
||||
CleanUp() = 0;
|
||||
|
||||
virtual Status
|
||||
CleanUpFilesWithTTL(uint16_t) = 0;
|
||||
|
||||
virtual Status
|
||||
DropAll() = 0;
|
||||
|
||||
virtual Status
|
||||
Count(const std::string &table_id, uint64_t &result) = 0;
|
||||
|
||||
static DateT
|
||||
GetDate(const std::time_t &t, int day_delta = 0);
|
||||
|
||||
static DateT
|
||||
GetDate();
|
||||
|
||||
static DateT
|
||||
GetDateWithDelta(int day_delta);
|
||||
|
||||
}; // MetaData
|
||||
|
||||
|
@ -31,7 +31,6 @@ struct TableSchema {
|
||||
int state_ = (int)NORMAL;
|
||||
size_t files_cnt_ = 0;
|
||||
uint16_t dimension_ = 0;
|
||||
std::string location_;
|
||||
long created_on_ = 0;
|
||||
int engine_type_ = (int)EngineType::FAISS_IDMAP;
|
||||
bool store_raw_data_ = false;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -12,78 +12,80 @@
|
||||
#include "mysql++/mysql++.h"
|
||||
#include <mutex>
|
||||
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
namespace meta {
|
||||
|
||||
// auto StoragePrototype(const std::string& path);
|
||||
using namespace mysqlpp;
|
||||
using namespace mysqlpp;
|
||||
|
||||
class MySQLMetaImpl : public Meta {
|
||||
public:
|
||||
MySQLMetaImpl(const DBMetaOptions& options_, const int& mode);
|
||||
class MySQLMetaImpl : public Meta {
|
||||
public:
|
||||
MySQLMetaImpl(const DBMetaOptions &options_, const int &mode);
|
||||
|
||||
virtual Status CreateTable(TableSchema& table_schema) override;
|
||||
virtual Status DescribeTable(TableSchema& group_info_) override;
|
||||
virtual Status HasTable(const std::string& table_id, bool& has_or_not) override;
|
||||
virtual Status AllTables(std::vector<TableSchema>& table_schema_array) override;
|
||||
Status CreateTable(TableSchema &table_schema) override;
|
||||
Status DescribeTable(TableSchema &group_info_) override;
|
||||
Status HasTable(const std::string &table_id, bool &has_or_not) override;
|
||||
Status AllTables(std::vector<TableSchema> &table_schema_array) override;
|
||||
|
||||
virtual Status DeleteTable(const std::string& table_id) override;
|
||||
virtual Status DeleteTableFiles(const std::string& table_id) override;
|
||||
Status DeleteTable(const std::string &table_id) override;
|
||||
Status DeleteTableFiles(const std::string &table_id) override;
|
||||
|
||||
virtual Status CreateTableFile(TableFileSchema& file_schema) override;
|
||||
virtual Status DropPartitionsByDates(const std::string& table_id,
|
||||
const DatesT& dates) override;
|
||||
Status CreateTableFile(TableFileSchema &file_schema) override;
|
||||
Status DropPartitionsByDates(const std::string &table_id,
|
||||
const DatesT &dates) override;
|
||||
|
||||
virtual Status GetTableFiles(const std::string& table_id,
|
||||
const std::vector<size_t>& ids,
|
||||
TableFilesSchema& table_files) override;
|
||||
Status GetTableFiles(const std::string &table_id,
|
||||
const std::vector<size_t> &ids,
|
||||
TableFilesSchema &table_files) override;
|
||||
|
||||
virtual Status UpdateTableFile(TableFileSchema& file_schema) override;
|
||||
Status HasNonIndexFiles(const std::string &table_id, bool &has) override;
|
||||
|
||||
virtual Status UpdateTableFiles(TableFilesSchema& files) override;
|
||||
Status UpdateTableFile(TableFileSchema &file_schema) override;
|
||||
|
||||
virtual Status FilesToSearch(const std::string& table_id,
|
||||
const DatesT& partition,
|
||||
DatePartionedTableFilesSchema& files) override;
|
||||
Status UpdateTableFilesToIndex(const std::string &table_id) override;
|
||||
|
||||
virtual Status FilesToMerge(const std::string& table_id,
|
||||
DatePartionedTableFilesSchema& files) override;
|
||||
Status UpdateTableFiles(TableFilesSchema &files) override;
|
||||
|
||||
virtual Status FilesToIndex(TableFilesSchema&) override;
|
||||
Status FilesToSearch(const std::string &table_id,
|
||||
const DatesT &partition,
|
||||
DatePartionedTableFilesSchema &files) override;
|
||||
|
||||
virtual Status Archive() override;
|
||||
Status FilesToMerge(const std::string &table_id,
|
||||
DatePartionedTableFilesSchema &files) override;
|
||||
|
||||
virtual Status Size(uint64_t& result) override;
|
||||
Status FilesToIndex(TableFilesSchema &) override;
|
||||
|
||||
virtual Status CleanUp() override;
|
||||
Status Archive() override;
|
||||
|
||||
virtual Status CleanUpFilesWithTTL(uint16_t seconds) override;
|
||||
Status Size(uint64_t &result) override;
|
||||
|
||||
virtual Status DropAll() override;
|
||||
Status CleanUp() override;
|
||||
|
||||
virtual Status Count(const std::string& table_id, uint64_t& result) override;
|
||||
Status CleanUpFilesWithTTL(uint16_t seconds) override;
|
||||
|
||||
virtual ~MySQLMetaImpl();
|
||||
Status DropAll() override;
|
||||
|
||||
private:
|
||||
Status NextFileId(std::string& file_id);
|
||||
Status NextTableId(std::string& table_id);
|
||||
Status DiscardFiles(long long to_discard_size);
|
||||
std::string GetTablePath(const std::string& table_id);
|
||||
std::string GetTableDatePartitionPath(const std::string& table_id, DateT& date);
|
||||
void GetTableFilePath(TableFileSchema& group_file);
|
||||
Status Initialize();
|
||||
Status Count(const std::string &table_id, uint64_t &result) override;
|
||||
|
||||
const DBMetaOptions options_;
|
||||
const int mode_;
|
||||
virtual ~MySQLMetaImpl();
|
||||
|
||||
std::shared_ptr<MySQLConnectionPool> mysql_connection_pool_;
|
||||
bool safe_grab = false;
|
||||
private:
|
||||
Status NextFileId(std::string &file_id);
|
||||
Status NextTableId(std::string &table_id);
|
||||
Status DiscardFiles(long long to_discard_size);
|
||||
Status Initialize();
|
||||
|
||||
const DBMetaOptions options_;
|
||||
const int mode_;
|
||||
|
||||
std::shared_ptr<MySQLConnectionPool> mysql_connection_pool_;
|
||||
bool safe_grab = false;
|
||||
|
||||
// std::mutex connectionMutex_;
|
||||
}; // DBMetaImpl
|
||||
}; // DBMetaImpl
|
||||
|
||||
} // namespace meta
|
||||
} // namespace engine
|
||||
|
@ -25,13 +25,10 @@ Status NewMemManager::InsertVectors(const std::string &table_id_,
|
||||
const float *vectors_,
|
||||
IDNumbers &vector_ids_) {
|
||||
|
||||
while (GetCurrentMem() > options_.maximum_memory) {
|
||||
while (GetCurrentMem() > options_.insert_buffer_size) {
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
||||
}
|
||||
|
||||
LOG(DEBUG) << "NewMemManager::InsertVectors: mutable mem = " << GetCurrentMutableMem() <<
|
||||
", immutable mem = " << GetCurrentImmutableMem() << ", total mem = " << GetCurrentMem();
|
||||
|
||||
std::unique_lock<std::mutex> lock(mutex_);
|
||||
|
||||
return InsertVectorsNoLock(table_id_, n_, vectors_, vector_ids_);
|
||||
@ -77,7 +74,6 @@ Status NewMemManager::Serialize(std::set<std::string> &table_ids) {
|
||||
table_ids.insert(mem->GetTableId());
|
||||
}
|
||||
immu_mem_list_.clear();
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
|
@ -41,6 +41,10 @@ void ArchiveConf::ParseCritirias(const std::string& criterias) {
|
||||
}
|
||||
|
||||
for (auto& token : tokens) {
|
||||
if(token.empty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
std::vector<std::string> kv;
|
||||
boost::algorithm::split(kv, token, boost::is_any_of(":"));
|
||||
if (kv.size() != 2) {
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <map>
|
||||
#include <vector>
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
@ -21,7 +22,7 @@ static constexpr uint64_t ONE_GB = ONE_KB*ONE_MB;
|
||||
|
||||
static const std::string ARCHIVE_CONF_DISK = "disk";
|
||||
static const std::string ARCHIVE_CONF_DAYS = "days";
|
||||
static const std::string ARCHIVE_CONF_DEFAULT = ARCHIVE_CONF_DISK + ":512";
|
||||
static const std::string ARCHIVE_CONF_DEFAULT = "";
|
||||
|
||||
struct ArchiveConf {
|
||||
using CriteriaT = std::map<std::string, int>;
|
||||
@ -43,6 +44,7 @@ private:
|
||||
|
||||
struct DBMetaOptions {
|
||||
std::string path;
|
||||
std::vector<std::string> slave_paths;
|
||||
std::string backend_uri;
|
||||
ArchiveConf archive_conf = ArchiveConf("delete");
|
||||
}; // DBMetaOptions
|
||||
@ -61,7 +63,9 @@ struct Options {
|
||||
size_t index_trigger_size = ONE_GB; //unit: byte
|
||||
DBMetaOptions meta;
|
||||
int mode = MODE::SINGLE;
|
||||
float maximum_memory = 4 * ONE_GB;
|
||||
|
||||
size_t insert_buffer_size = 4 * ONE_GB;
|
||||
bool insert_cache_immediately_ = false;
|
||||
}; // Options
|
||||
|
||||
|
||||
|
@ -4,14 +4,58 @@
|
||||
* Proprietary and confidential.
|
||||
******************************************************************************/
|
||||
#include "Utils.h"
|
||||
#include "utils/CommonUtil.h"
|
||||
#include "Log.h"
|
||||
|
||||
#include <mutex>
|
||||
#include <chrono>
|
||||
#include <boost/filesystem.hpp>
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
namespace utils {
|
||||
|
||||
namespace {
|
||||
|
||||
static const std::string TABLES_FOLDER = "/tables/";
|
||||
|
||||
static uint64_t index_file_counter = 0;
|
||||
static std::mutex index_file_counter_mutex;
|
||||
|
||||
std::string ConstructParentFolder(const std::string& db_path, const meta::TableFileSchema& table_file) {
|
||||
std::string table_path = db_path + TABLES_FOLDER + table_file.table_id_;
|
||||
std::string partition_path = table_path + "/" + std::to_string(table_file.date_);
|
||||
return partition_path;
|
||||
}
|
||||
|
||||
std::string GetTableFileParentFolder(const DBMetaOptions& options, const meta::TableFileSchema& table_file) {
|
||||
uint64_t path_count = options.slave_paths.size() + 1;
|
||||
std::string target_path = options.path;
|
||||
uint64_t index = 0;
|
||||
|
||||
if(meta::TableFileSchema::INDEX == table_file.file_type_) {
|
||||
// index file is large file and to be persisted permanently
|
||||
// we need to distribute index files to each db_path averagely
|
||||
// round robin according to a file counter
|
||||
std::lock_guard<std::mutex> lock(index_file_counter_mutex);
|
||||
index = index_file_counter % path_count;
|
||||
index_file_counter++;
|
||||
} else {
|
||||
// for other type files, they could be merged or deleted
|
||||
// so we round robin according to their file id
|
||||
index = table_file.id_ % path_count;
|
||||
}
|
||||
|
||||
if (index > 0) {
|
||||
target_path = options.slave_paths[index - 1];
|
||||
}
|
||||
|
||||
return ConstructParentFolder(target_path, table_file);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
long GetMicroSecTimeStamp() {
|
||||
auto now = std::chrono::system_clock::now();
|
||||
auto micros = std::chrono::duration_cast<std::chrono::microseconds>(
|
||||
@ -20,6 +64,82 @@ long GetMicroSecTimeStamp() {
|
||||
return micros;
|
||||
}
|
||||
|
||||
Status CreateTablePath(const DBMetaOptions& options, const std::string& table_id) {
|
||||
std::string db_path = options.path;
|
||||
std::string table_path = db_path + TABLES_FOLDER + table_id;
|
||||
auto status = server::CommonUtil::CreateDirectory(table_path);
|
||||
if (status != 0) {
|
||||
ENGINE_LOG_ERROR << "Create directory " << table_path << " Error";
|
||||
return Status::Error("Failed to create table path");
|
||||
}
|
||||
|
||||
for(auto& path : options.slave_paths) {
|
||||
table_path = path + TABLES_FOLDER + table_id;
|
||||
status = server::CommonUtil::CreateDirectory(table_path);
|
||||
if (status != 0) {
|
||||
ENGINE_LOG_ERROR << "Create directory " << table_path << " Error";
|
||||
return Status::Error("Failed to create table path");
|
||||
}
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DeleteTablePath(const DBMetaOptions& options, const std::string& table_id) {
|
||||
std::string db_path = options.path;
|
||||
std::string table_path = db_path + TABLES_FOLDER + table_id;
|
||||
boost::filesystem::remove_all(table_path);
|
||||
ENGINE_LOG_DEBUG << "Remove table folder: " << table_path;
|
||||
|
||||
for(auto& path : options.slave_paths) {
|
||||
table_path = path + TABLES_FOLDER + table_id;
|
||||
boost::filesystem::remove_all(table_path);
|
||||
ENGINE_LOG_DEBUG << "Remove table folder: " << table_path;
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status CreateTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file) {
|
||||
std::string parent_path = GetTableFileParentFolder(options, table_file);
|
||||
|
||||
auto status = server::CommonUtil::CreateDirectory(parent_path);
|
||||
if (status != 0) {
|
||||
ENGINE_LOG_ERROR << "Create directory " << parent_path << " Error";
|
||||
return Status::DBTransactionError("Failed to create partition directory");
|
||||
}
|
||||
|
||||
table_file.location_ = parent_path + "/" + table_file.file_id_;
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status GetTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file) {
|
||||
std::string parent_path = ConstructParentFolder(options.path, table_file);
|
||||
std::string file_path = parent_path + "/" + table_file.file_id_;
|
||||
if(boost::filesystem::exists(file_path)) {
|
||||
table_file.location_ = file_path;
|
||||
return Status::OK();
|
||||
} else {
|
||||
for(auto& path : options.slave_paths) {
|
||||
parent_path = ConstructParentFolder(path, table_file);
|
||||
file_path = parent_path + "/" + table_file.file_id_;
|
||||
if(boost::filesystem::exists(file_path)) {
|
||||
table_file.location_ = file_path;
|
||||
return Status::OK();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Status::Error("Table file doesn't exist: " + table_file.file_id_);
|
||||
}
|
||||
|
||||
Status DeleteTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file) {
|
||||
utils::GetTableFilePath(options, table_file);
|
||||
boost::filesystem::remove(table_file.location_);
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
} // namespace utils
|
||||
} // namespace engine
|
||||
} // namespace milvus
|
||||
|
@ -5,6 +5,10 @@
|
||||
******************************************************************************/
|
||||
#pragma once
|
||||
|
||||
#include "Options.h"
|
||||
#include "MetaTypes.h"
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
@ -13,6 +17,13 @@ namespace utils {
|
||||
|
||||
long GetMicroSecTimeStamp();
|
||||
|
||||
Status CreateTablePath(const DBMetaOptions& options, const std::string& table_id);
|
||||
Status DeleteTablePath(const DBMetaOptions& options, const std::string& table_id);
|
||||
|
||||
Status CreateTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file);
|
||||
Status GetTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file);
|
||||
Status DeleteTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file);
|
||||
|
||||
} // namespace utils
|
||||
} // namespace engine
|
||||
} // namespace milvus
|
||||
|
@ -20,6 +20,7 @@ class ReuseCacheIndexStrategy {
|
||||
public:
|
||||
bool Schedule(const SearchContextPtr &context, std::list<ScheduleTaskPtr>& task_list) {
|
||||
if(context == nullptr) {
|
||||
ENGINE_LOG_ERROR << "Task Dispatch context doesn't exist";
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -32,7 +33,7 @@ public:
|
||||
|
||||
IndexLoadTaskPtr loader = std::static_pointer_cast<IndexLoadTask>(task);
|
||||
if(index_files.find(loader->file_->id_) != index_files.end()){
|
||||
ENGINE_LOG_INFO << "Append SearchContext to exist IndexLoaderContext";
|
||||
ENGINE_LOG_DEBUG << "Append SearchContext to exist IndexLoaderContext";
|
||||
index_files.erase(loader->file_->id_);
|
||||
loader->search_contexts_.push_back(context);
|
||||
}
|
||||
@ -40,7 +41,7 @@ public:
|
||||
|
||||
//index_files still contains some index files, create new loader
|
||||
for(auto& pair : index_files) {
|
||||
ENGINE_LOG_INFO << "Create new IndexLoaderContext for: " << pair.second->location_;
|
||||
ENGINE_LOG_DEBUG << "Create new IndexLoaderContext for: " << pair.second->location_;
|
||||
IndexLoadTaskPtr new_loader = std::make_shared<IndexLoadTask>();
|
||||
new_loader->search_contexts_.push_back(context);
|
||||
new_loader->file_ = pair.second;
|
||||
@ -64,6 +65,7 @@ class DeleteTableStrategy {
|
||||
public:
|
||||
bool Schedule(const DeleteContextPtr &context, std::list<ScheduleTaskPtr> &task_list) {
|
||||
if (context == nullptr) {
|
||||
ENGINE_LOG_ERROR << "Task Dispatch context doesn't exist";
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -103,6 +105,7 @@ public:
|
||||
bool TaskDispatchStrategy::Schedule(const ScheduleContextPtr &context_ptr,
|
||||
std::list<zilliz::milvus::engine::ScheduleTaskPtr> &task_list) {
|
||||
if(context_ptr == nullptr) {
|
||||
ENGINE_LOG_ERROR << "Task Dispatch context doesn't exist";
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -31,6 +31,7 @@ TaskScheduler& TaskScheduler::GetInstance() {
|
||||
bool
|
||||
TaskScheduler::Start() {
|
||||
if(!stopped_) {
|
||||
SERVER_LOG_INFO << "Task Scheduler isn't started";
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -47,6 +48,7 @@ TaskScheduler::Start() {
|
||||
bool
|
||||
TaskScheduler::Stop() {
|
||||
if(stopped_) {
|
||||
SERVER_LOG_INFO << "Task Scheduler already stopped";
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -80,7 +82,7 @@ TaskScheduler::TaskDispatchWorker() {
|
||||
ScheduleTaskPtr task_ptr = task_dispatch_queue_.Take();
|
||||
if(task_ptr == nullptr) {
|
||||
SERVER_LOG_INFO << "Stop db task dispatch thread";
|
||||
break;//exit
|
||||
return true;
|
||||
}
|
||||
|
||||
//execute task
|
||||
@ -98,8 +100,8 @@ TaskScheduler::TaskWorker() {
|
||||
while(true) {
|
||||
ScheduleTaskPtr task_ptr = task_queue_.Take();
|
||||
if(task_ptr == nullptr) {
|
||||
SERVER_LOG_INFO << "Stop db task thread";
|
||||
break;//exit
|
||||
SERVER_LOG_INFO << "Stop db task worker thread";
|
||||
return true;
|
||||
}
|
||||
|
||||
//execute task
|
||||
|
@ -31,7 +31,7 @@ SearchContext::AddIndexFile(TableFileSchemaPtr& index_file) {
|
||||
return false;
|
||||
}
|
||||
|
||||
SERVER_LOG_INFO << "SearchContext " << identity_ << " add index file: " << index_file->id_;
|
||||
SERVER_LOG_DEBUG << "SearchContext " << identity_ << " add index file: " << index_file->id_;
|
||||
|
||||
map_index_files_[index_file->id_] = index_file;
|
||||
return true;
|
||||
@ -42,7 +42,7 @@ SearchContext::IndexSearchDone(size_t index_id) {
|
||||
std::unique_lock <std::mutex> lock(mtx_);
|
||||
map_index_files_.erase(index_id);
|
||||
done_cond_.notify_all();
|
||||
SERVER_LOG_INFO << "SearchContext " << identity_ << " finish index file: " << index_id;
|
||||
SERVER_LOG_DEBUG << "SearchContext " << identity_ << " finish index file: " << index_id;
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -37,9 +37,19 @@ public:
|
||||
const ResultSet& GetResult() const { return result_; }
|
||||
ResultSet& GetResult() { return result_; }
|
||||
|
||||
std::string Identity() const { return identity_; }
|
||||
|
||||
void IndexSearchDone(size_t index_id);
|
||||
void WaitResult();
|
||||
|
||||
void AccumLoadCost(double span) { time_cost_load_ += span; }
|
||||
void AccumSearchCost(double span) { time_cost_search_ += span; }
|
||||
void AccumReduceCost(double span) { time_cost_reduce_ += span; }
|
||||
|
||||
double LoadCost() const { return time_cost_load_; }
|
||||
double SearchCost() const { return time_cost_search_; }
|
||||
double ReduceCost() const { return time_cost_reduce_; }
|
||||
|
||||
private:
|
||||
uint64_t topk_ = 0;
|
||||
uint64_t nq_ = 0;
|
||||
@ -52,6 +62,10 @@ private:
|
||||
std::condition_variable done_cond_;
|
||||
|
||||
std::string identity_; //for debug
|
||||
|
||||
double time_cost_load_ = 0.0; //time cost for load all index files, unit: us
|
||||
double time_cost_search_ = 0.0; //time cost for entire search, unit: us
|
||||
double time_cost_reduce_ = 0.0; //time cost for entire reduce, unit: us
|
||||
};
|
||||
|
||||
using SearchContextPtr = std::shared_ptr<SearchContext>;
|
||||
|
@ -41,20 +41,21 @@ IndexLoadTask::IndexLoadTask()
|
||||
}
|
||||
|
||||
std::shared_ptr<IScheduleTask> IndexLoadTask::Execute() {
|
||||
ENGINE_LOG_INFO << "Loading index(" << file_->id_ << ") from location: " << file_->location_;
|
||||
|
||||
server::TimeRecorder rc("Load index");
|
||||
server::TimeRecorder rc("");
|
||||
//step 1: load index
|
||||
ExecutionEnginePtr index_ptr = EngineFactory::Build(file_->dimension_,
|
||||
file_->location_,
|
||||
(EngineType)file_->engine_type_);
|
||||
index_ptr->Load();
|
||||
|
||||
rc.Record("load index file to memory");
|
||||
|
||||
size_t file_size = index_ptr->PhysicalSize();
|
||||
LOG(DEBUG) << "Index file type " << file_->file_type_ << " Of Size: "
|
||||
<< file_size/(1024*1024) << " M";
|
||||
|
||||
std::string info = "Load file id:" + std::to_string(file_->id_) + " file type:" + std::to_string(file_->file_type_)
|
||||
+ " size:" + std::to_string(file_size) + " bytes from location: " + file_->location_ + " totally cost";
|
||||
double span = rc.ElapseFromBegin(info);
|
||||
for(auto& context : search_contexts_) {
|
||||
context->AccumLoadCost(span);
|
||||
}
|
||||
|
||||
CollectFileMetrics(file_->file_type_, file_size);
|
||||
|
||||
|
@ -5,14 +5,60 @@
|
||||
******************************************************************************/
|
||||
#include "SearchTask.h"
|
||||
#include "metrics/Metrics.h"
|
||||
#include "utils/Log.h"
|
||||
#include "db/Log.h"
|
||||
#include "utils/TimeRecorder.h"
|
||||
|
||||
#include <thread>
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
namespace {
|
||||
|
||||
static constexpr size_t PARALLEL_REDUCE_THRESHOLD = 10000;
|
||||
static constexpr size_t PARALLEL_REDUCE_BATCH = 1000;
|
||||
|
||||
bool NeedParallelReduce(uint64_t nq, uint64_t topk) {
|
||||
server::ServerConfig &config = server::ServerConfig::GetInstance();
|
||||
server::ConfigNode& db_config = config.GetConfig(server::CONFIG_DB);
|
||||
bool need_parallel = db_config.GetBoolValue(server::CONFIG_DB_PARALLEL_REDUCE, true);
|
||||
if(!need_parallel) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return nq*topk >= PARALLEL_REDUCE_THRESHOLD;
|
||||
}
|
||||
|
||||
void ParallelReduce(std::function<void(size_t, size_t)>& reduce_function, size_t max_index) {
|
||||
size_t reduce_batch = PARALLEL_REDUCE_BATCH;
|
||||
|
||||
auto thread_count = std::thread::hardware_concurrency() - 1; //not all core do this work
|
||||
if(thread_count > 0) {
|
||||
reduce_batch = max_index/thread_count + 1;
|
||||
}
|
||||
ENGINE_LOG_DEBUG << "use " << thread_count <<
|
||||
" thread parallelly do reduce, each thread process " << reduce_batch << " vectors";
|
||||
|
||||
std::vector<std::shared_ptr<std::thread> > thread_array;
|
||||
size_t from_index = 0;
|
||||
while(from_index < max_index) {
|
||||
size_t to_index = from_index + reduce_batch;
|
||||
if(to_index > max_index) {
|
||||
to_index = max_index;
|
||||
}
|
||||
|
||||
auto reduce_thread = std::make_shared<std::thread>(reduce_function, from_index, to_index);
|
||||
thread_array.push_back(reduce_thread);
|
||||
|
||||
from_index = to_index;
|
||||
}
|
||||
|
||||
for(auto& thread_ptr : thread_array) {
|
||||
thread_ptr->join();
|
||||
}
|
||||
}
|
||||
|
||||
void CollectDurationMetrics(int index_type, double total_time) {
|
||||
switch(index_type) {
|
||||
case meta::TableFileSchema::RAW: {
|
||||
@ -30,11 +76,20 @@ void CollectDurationMetrics(int index_type, double total_time) {
|
||||
}
|
||||
}
|
||||
|
||||
std::string GetMetricType() {
|
||||
server::ServerConfig &config = server::ServerConfig::GetInstance();
|
||||
server::ConfigNode& engine_config = config.GetConfig(server::CONFIG_ENGINE);
|
||||
return engine_config.GetValue(server::CONFIG_METRICTYPE, "L2");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
SearchTask::SearchTask()
|
||||
: IScheduleTask(ScheduleTaskType::kSearch) {
|
||||
|
||||
std::string metric_type = GetMetricType();
|
||||
if(metric_type != "L2") {
|
||||
metric_l2 = false;
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<IScheduleTask> SearchTask::Execute() {
|
||||
@ -42,10 +97,10 @@ std::shared_ptr<IScheduleTask> SearchTask::Execute() {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
SERVER_LOG_INFO << "Searching in index(" << index_id_<< ") with "
|
||||
ENGINE_LOG_DEBUG << "Searching in file id:" << index_id_<< " with "
|
||||
<< search_contexts_.size() << " tasks";
|
||||
|
||||
server::TimeRecorder rc("DoSearch index(" + std::to_string(index_id_) + ")");
|
||||
server::TimeRecorder rc("DoSearch file id:" + std::to_string(index_id_));
|
||||
|
||||
auto start_time = METRICS_NOW_TIME;
|
||||
|
||||
@ -62,20 +117,25 @@ std::shared_ptr<IScheduleTask> SearchTask::Execute() {
|
||||
index_engine_->Search(context->nq(), context->vectors(), inner_k, output_distence.data(),
|
||||
output_ids.data());
|
||||
|
||||
rc.Record("do search");
|
||||
double span = rc.RecordSection("do search for context:" + context->Identity());
|
||||
context->AccumSearchCost(span);
|
||||
|
||||
//step 3: cluster result
|
||||
SearchContext::ResultSet result_set;
|
||||
auto spec_k = index_engine_->Count() < context->topk() ? index_engine_->Count() : context->topk();
|
||||
SearchTask::ClusterResult(output_ids, output_distence, context->nq(), spec_k, result_set);
|
||||
rc.Record("cluster result");
|
||||
|
||||
span = rc.RecordSection("cluster result for context:" + context->Identity());
|
||||
context->AccumReduceCost(span);
|
||||
|
||||
//step 4: pick up topk result
|
||||
SearchTask::TopkResult(result_set, inner_k, context->GetResult());
|
||||
rc.Record("reduce topk");
|
||||
SearchTask::TopkResult(result_set, inner_k, metric_l2, context->GetResult());
|
||||
|
||||
span = rc.RecordSection("reduce topk for context:" + context->Identity());
|
||||
context->AccumReduceCost(span);
|
||||
|
||||
} catch (std::exception& ex) {
|
||||
SERVER_LOG_ERROR << "SearchTask encounter exception: " << ex.what();
|
||||
ENGINE_LOG_ERROR << "SearchTask encounter exception: " << ex.what();
|
||||
context->IndexSearchDone(index_id_);//mark as done avoid dead lock, even search failed
|
||||
continue;
|
||||
}
|
||||
@ -88,7 +148,7 @@ std::shared_ptr<IScheduleTask> SearchTask::Execute() {
|
||||
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
|
||||
CollectDurationMetrics(index_type_, total_time);
|
||||
|
||||
rc.Elapse("totally cost");
|
||||
rc.ElapseFromBegin("totally cost");
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
@ -98,26 +158,35 @@ Status SearchTask::ClusterResult(const std::vector<long> &output_ids,
|
||||
uint64_t nq,
|
||||
uint64_t topk,
|
||||
SearchContext::ResultSet &result_set) {
|
||||
if(output_ids.size() != nq*topk || output_distence.size() != nq*topk) {
|
||||
if(output_ids.size() < nq*topk || output_distence.size() < nq*topk) {
|
||||
std::string msg = "Invalid id array size: " + std::to_string(output_ids.size()) +
|
||||
" distance array size: " + std::to_string(output_distence.size());
|
||||
SERVER_LOG_ERROR << msg;
|
||||
ENGINE_LOG_ERROR << msg;
|
||||
return Status::Error(msg);
|
||||
}
|
||||
|
||||
result_set.clear();
|
||||
result_set.reserve(nq);
|
||||
for (auto i = 0; i < nq; i++) {
|
||||
SearchContext::Id2DistanceMap id_distance;
|
||||
id_distance.reserve(topk);
|
||||
for (auto k = 0; k < topk; k++) {
|
||||
uint64_t index = i * topk + k;
|
||||
if(output_ids[index] < 0) {
|
||||
continue;
|
||||
result_set.resize(nq);
|
||||
|
||||
std::function<void(size_t, size_t)> reduce_worker = [&](size_t from_index, size_t to_index) {
|
||||
for (auto i = from_index; i < to_index; i++) {
|
||||
SearchContext::Id2DistanceMap id_distance;
|
||||
id_distance.reserve(topk);
|
||||
for (auto k = 0; k < topk; k++) {
|
||||
uint64_t index = i * topk + k;
|
||||
if(output_ids[index] < 0) {
|
||||
continue;
|
||||
}
|
||||
id_distance.push_back(std::make_pair(output_ids[index], output_distence[index]));
|
||||
}
|
||||
id_distance.push_back(std::make_pair(output_ids[index], output_distence[index]));
|
||||
result_set[i] = id_distance;
|
||||
}
|
||||
result_set.emplace_back(id_distance);
|
||||
};
|
||||
|
||||
if(NeedParallelReduce(nq, topk)) {
|
||||
ParallelReduce(reduce_worker, nq);
|
||||
} else {
|
||||
reduce_worker(0, nq);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
@ -125,10 +194,11 @@ Status SearchTask::ClusterResult(const std::vector<long> &output_ids,
|
||||
|
||||
Status SearchTask::MergeResult(SearchContext::Id2DistanceMap &distance_src,
|
||||
SearchContext::Id2DistanceMap &distance_target,
|
||||
uint64_t topk) {
|
||||
uint64_t topk,
|
||||
bool ascending) {
|
||||
//Note: the score_src and score_target are already arranged by score in ascending order
|
||||
if(distance_src.empty()) {
|
||||
SERVER_LOG_WARNING << "Empty distance source array";
|
||||
ENGINE_LOG_WARNING << "Empty distance source array";
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
@ -161,15 +231,27 @@ Status SearchTask::MergeResult(SearchContext::Id2DistanceMap &distance_src,
|
||||
break;
|
||||
}
|
||||
|
||||
//compare score, put smallest score to score_merged one by one
|
||||
//compare score,
|
||||
// if ascending = true, put smallest score to score_merged one by one
|
||||
// else, put largest score to score_merged one by one
|
||||
auto& src_pair = distance_src[src_index];
|
||||
auto& target_pair = distance_target[target_index];
|
||||
if(src_pair.second > target_pair.second) {
|
||||
distance_merged.push_back(target_pair);
|
||||
target_index++;
|
||||
if(ascending){
|
||||
if(src_pair.second > target_pair.second) {
|
||||
distance_merged.push_back(target_pair);
|
||||
target_index++;
|
||||
} else {
|
||||
distance_merged.push_back(src_pair);
|
||||
src_index++;
|
||||
}
|
||||
} else {
|
||||
distance_merged.push_back(src_pair);
|
||||
src_index++;
|
||||
if(src_pair.second < target_pair.second) {
|
||||
distance_merged.push_back(target_pair);
|
||||
target_index++;
|
||||
} else {
|
||||
distance_merged.push_back(src_pair);
|
||||
src_index++;
|
||||
}
|
||||
}
|
||||
|
||||
//score_merged.size() already equal topk
|
||||
@ -185,6 +267,7 @@ Status SearchTask::MergeResult(SearchContext::Id2DistanceMap &distance_src,
|
||||
|
||||
Status SearchTask::TopkResult(SearchContext::ResultSet &result_src,
|
||||
uint64_t topk,
|
||||
bool ascending,
|
||||
SearchContext::ResultSet &result_target) {
|
||||
if (result_target.empty()) {
|
||||
result_target.swap(result_src);
|
||||
@ -193,14 +276,22 @@ Status SearchTask::TopkResult(SearchContext::ResultSet &result_src,
|
||||
|
||||
if (result_src.size() != result_target.size()) {
|
||||
std::string msg = "Invalid result set size";
|
||||
SERVER_LOG_ERROR << msg;
|
||||
ENGINE_LOG_ERROR << msg;
|
||||
return Status::Error(msg);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < result_src.size(); i++) {
|
||||
SearchContext::Id2DistanceMap &score_src = result_src[i];
|
||||
SearchContext::Id2DistanceMap &score_target = result_target[i];
|
||||
SearchTask::MergeResult(score_src, score_target, topk);
|
||||
std::function<void(size_t, size_t)> ReduceWorker = [&](size_t from_index, size_t to_index) {
|
||||
for (size_t i = from_index; i < to_index; i++) {
|
||||
SearchContext::Id2DistanceMap &score_src = result_src[i];
|
||||
SearchContext::Id2DistanceMap &score_target = result_target[i];
|
||||
SearchTask::MergeResult(score_src, score_target, topk, ascending);
|
||||
}
|
||||
};
|
||||
|
||||
if(NeedParallelReduce(result_src.size(), topk)) {
|
||||
ParallelReduce(ReduceWorker, result_src.size());
|
||||
} else {
|
||||
ReduceWorker(0, result_src.size());
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -27,10 +27,12 @@ public:
|
||||
|
||||
static Status MergeResult(SearchContext::Id2DistanceMap &distance_src,
|
||||
SearchContext::Id2DistanceMap &distance_target,
|
||||
uint64_t topk);
|
||||
uint64_t topk,
|
||||
bool ascending);
|
||||
|
||||
static Status TopkResult(SearchContext::ResultSet &result_src,
|
||||
uint64_t topk,
|
||||
bool ascending,
|
||||
SearchContext::ResultSet &result_target);
|
||||
|
||||
public:
|
||||
@ -38,6 +40,7 @@ public:
|
||||
int index_type_ = 0; //for metrics
|
||||
ExecutionEnginePtr index_engine_;
|
||||
std::vector<SearchContextPtr> search_contexts_;
|
||||
bool metric_l2 = true;
|
||||
};
|
||||
|
||||
using SearchTaskPtr = std::shared_ptr<SearchTask>;
|
||||
|
@ -8,6 +8,7 @@
|
||||
|
||||
#include <iostream>
|
||||
#include <time.h>
|
||||
#include <chrono>
|
||||
#include <unistd.h>
|
||||
|
||||
using namespace ::milvus;
|
||||
@ -21,7 +22,8 @@ namespace {
|
||||
static constexpr int64_t NQ = 10;
|
||||
static constexpr int64_t TOP_K = 10;
|
||||
static constexpr int64_t SEARCH_TARGET = 5000; //change this value, result is different
|
||||
static constexpr int64_t ADD_VECTOR_LOOP = 5;
|
||||
static constexpr int64_t ADD_VECTOR_LOOP = 10;
|
||||
static constexpr int64_t SECONDS_EACH_HOUR = 3600;
|
||||
|
||||
#define BLOCK_SPLITER std::cout << "===========================================" << std::endl;
|
||||
|
||||
@ -34,26 +36,17 @@ namespace {
|
||||
BLOCK_SPLITER
|
||||
}
|
||||
|
||||
void PrintRecordIdArray(const std::vector<int64_t>& record_ids) {
|
||||
BLOCK_SPLITER
|
||||
std::cout << "Returned id array count: " << record_ids.size() << std::endl;
|
||||
#if 0
|
||||
for(auto id : record_ids) {
|
||||
std::cout << std::to_string(id) << std::endl;
|
||||
}
|
||||
#endif
|
||||
BLOCK_SPLITER
|
||||
}
|
||||
|
||||
void PrintSearchResult(const std::vector<TopKQueryResult>& topk_query_result_array) {
|
||||
void PrintSearchResult(const std::vector<std::pair<int64_t, RowRecord>>& search_record_array,
|
||||
const std::vector<TopKQueryResult>& topk_query_result_array) {
|
||||
BLOCK_SPLITER
|
||||
std::cout << "Returned result count: " << topk_query_result_array.size() << std::endl;
|
||||
|
||||
int32_t index = 0;
|
||||
for(auto& result : topk_query_result_array) {
|
||||
auto search_id = search_record_array[index].first;
|
||||
index++;
|
||||
std::cout << "No." << std::to_string(index) << " vector top "
|
||||
<< std::to_string(result.query_result_arrays.size())
|
||||
std::cout << "No." << std::to_string(index) << " vector " << std::to_string(search_id)
|
||||
<< " top " << std::to_string(result.query_result_arrays.size())
|
||||
<< " search result:" << std::endl;
|
||||
for(auto& item : result.query_result_arrays) {
|
||||
std::cout << "\t" << std::to_string(item.id) << "\tdistance:" << std::to_string(item.distance);
|
||||
@ -67,7 +60,7 @@ namespace {
|
||||
std::string CurrentTime() {
|
||||
time_t tt;
|
||||
time( &tt );
|
||||
tt = tt + 8*3600;
|
||||
tt = tt + 8*SECONDS_EACH_HOUR;
|
||||
tm* t= gmtime( &tt );
|
||||
|
||||
std::string str = std::to_string(t->tm_year + 1900) + "_" + std::to_string(t->tm_mon + 1)
|
||||
@ -77,10 +70,11 @@ namespace {
|
||||
return str;
|
||||
}
|
||||
|
||||
std::string CurrentTmDate() {
|
||||
std::string CurrentTmDate(int64_t offset_day = 0) {
|
||||
time_t tt;
|
||||
time( &tt );
|
||||
tt = tt + 8*3600;
|
||||
tt = tt + 8*SECONDS_EACH_HOUR;
|
||||
tt = tt + 24*SECONDS_EACH_HOUR*offset_day;
|
||||
tm* t= gmtime( &tt );
|
||||
|
||||
std::string str = std::to_string(t->tm_year + 1900) + "-" + std::to_string(t->tm_mon + 1)
|
||||
@ -91,13 +85,13 @@ namespace {
|
||||
|
||||
std::string GetTableName() {
|
||||
static std::string s_id(CurrentTime());
|
||||
return s_id;
|
||||
return "tbl_" + s_id;
|
||||
}
|
||||
|
||||
TableSchema BuildTableSchema() {
|
||||
TableSchema tb_schema;
|
||||
tb_schema.table_name = TABLE_NAME;
|
||||
tb_schema.index_type = IndexType::cpu_idmap;
|
||||
tb_schema.index_type = IndexType::gpu_ivfflat;
|
||||
tb_schema.dimension = TABLE_DIMENSION;
|
||||
tb_schema.store_raw_vector = true;
|
||||
|
||||
@ -126,6 +120,66 @@ namespace {
|
||||
std::cout << "Waiting " << seconds << " seconds ..." << std::endl;
|
||||
sleep(seconds);
|
||||
}
|
||||
|
||||
class TimeRecorder {
|
||||
public:
|
||||
TimeRecorder(const std::string& title)
|
||||
: title_(title) {
|
||||
start_ = std::chrono::system_clock::now();
|
||||
}
|
||||
|
||||
~TimeRecorder() {
|
||||
std::chrono::system_clock::time_point end = std::chrono::system_clock::now();
|
||||
long span = (std::chrono::duration_cast<std::chrono::milliseconds> (end - start_)).count();
|
||||
std::cout << title_ << " totally cost: " << span << " ms" << std::endl;
|
||||
}
|
||||
|
||||
private:
|
||||
std::string title_;
|
||||
std::chrono::system_clock::time_point start_;
|
||||
};
|
||||
|
||||
void CheckResult(const std::vector<std::pair<int64_t, RowRecord>>& search_record_array,
|
||||
const std::vector<TopKQueryResult>& topk_query_result_array) {
|
||||
BLOCK_SPLITER
|
||||
int64_t index = 0;
|
||||
for(auto& result : topk_query_result_array) {
|
||||
auto result_id = result.query_result_arrays[0].id;
|
||||
auto search_id = search_record_array[index++].first;
|
||||
if(result_id != search_id) {
|
||||
std::cout << "The top 1 result is wrong: " << result_id
|
||||
<< " vs. " << search_id << std::endl;
|
||||
} else {
|
||||
std::cout << "No." << index-1 << " Check result successfully" << std::endl;
|
||||
}
|
||||
}
|
||||
BLOCK_SPLITER
|
||||
}
|
||||
|
||||
void DoSearch(std::shared_ptr<Connection> conn,
|
||||
const std::vector<std::pair<int64_t, RowRecord>>& search_record_array,
|
||||
const std::string& phase_name) {
|
||||
std::vector<Range> query_range_array;
|
||||
Range rg;
|
||||
rg.start_value = CurrentTmDate();
|
||||
rg.end_value = CurrentTmDate(1);
|
||||
query_range_array.emplace_back(rg);
|
||||
|
||||
std::vector<RowRecord> record_array;
|
||||
for(auto& pair : search_record_array) {
|
||||
record_array.push_back(pair.second);
|
||||
}
|
||||
|
||||
std::vector<TopKQueryResult> topk_query_result_array;
|
||||
{
|
||||
TimeRecorder rc(phase_name);
|
||||
Status stat = conn->SearchVector(TABLE_NAME, record_array, query_range_array, TOP_K, topk_query_result_array);
|
||||
std::cout << "SearchVector function call status: " << stat.ToString() << std::endl;
|
||||
}
|
||||
|
||||
PrintSearchResult(search_record_array, topk_query_result_array);
|
||||
CheckResult(search_record_array, topk_query_result_array);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
@ -179,30 +233,40 @@ ClientTest::Test(const std::string& address, const std::string& port) {
|
||||
PrintTableSchema(tb_schema);
|
||||
}
|
||||
|
||||
for(int i = 0; i < ADD_VECTOR_LOOP; i++){//add vectors
|
||||
//add vectors
|
||||
std::vector<std::pair<int64_t, RowRecord>> search_record_array;
|
||||
for (int i = 0; i < ADD_VECTOR_LOOP; i++) {
|
||||
TimeRecorder recorder("Add vector No." + std::to_string(i));
|
||||
std::vector<RowRecord> record_array;
|
||||
BuildVectors(i*BATCH_ROW_COUNT, (i+1)*BATCH_ROW_COUNT, record_array);
|
||||
int64_t begin_index = i * BATCH_ROW_COUNT;
|
||||
BuildVectors(begin_index, begin_index + BATCH_ROW_COUNT, record_array);
|
||||
std::vector<int64_t> record_ids;
|
||||
Status stat = conn->AddVector(TABLE_NAME, record_array, record_ids);
|
||||
std::cout << "AddVector function call status: " << stat.ToString() << std::endl;
|
||||
PrintRecordIdArray(record_ids);
|
||||
std::cout << "Returned id array count: " << record_ids.size() << std::endl;
|
||||
|
||||
if(i == 0) {
|
||||
for(int64_t k = SEARCH_TARGET; k < SEARCH_TARGET + NQ; k++) {
|
||||
search_record_array.push_back(
|
||||
std::make_pair(record_ids[k], record_array[k]));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{//search vectors
|
||||
{//search vectors without index
|
||||
Sleep(2);
|
||||
DoSearch(conn, search_record_array, "Search without index");
|
||||
}
|
||||
|
||||
std::vector<RowRecord> record_array;
|
||||
BuildVectors(SEARCH_TARGET, SEARCH_TARGET + NQ, record_array);
|
||||
{//wait unit build index finish
|
||||
TimeRecorder recorder("Build index");
|
||||
std::cout << "Wait until build all index done" << std::endl;
|
||||
Status stat = conn->BuildIndex(TABLE_NAME);
|
||||
std::cout << "BuildIndex function call status: " << stat.ToString() << std::endl;
|
||||
}
|
||||
|
||||
std::vector<Range> query_range_array;
|
||||
Range rg;
|
||||
rg.start_value = CurrentTmDate();
|
||||
rg.end_value = CurrentTmDate();
|
||||
query_range_array.emplace_back(rg);
|
||||
std::vector<TopKQueryResult> topk_query_result_array;
|
||||
Status stat = conn->SearchVector(TABLE_NAME, record_array, query_range_array, TOP_K, topk_query_result_array);
|
||||
std::cout << "SearchVector function call status: " << stat.ToString() << std::endl;
|
||||
PrintSearchResult(topk_query_result_array);
|
||||
{//search vectors after build index finish
|
||||
DoSearch(conn, search_record_array, "Search after build index finish");
|
||||
}
|
||||
|
||||
{//delete table
|
||||
|
@ -18,6 +18,7 @@ enum class IndexType {
|
||||
invalid = 0,
|
||||
cpu_idmap,
|
||||
gpu_ivfflat,
|
||||
gpu_ivfsq8,
|
||||
};
|
||||
|
||||
/**
|
||||
@ -180,6 +181,17 @@ public:
|
||||
virtual Status DeleteTable(const std::string &table_name) = 0;
|
||||
|
||||
|
||||
/**
|
||||
* @brief Build index method
|
||||
*
|
||||
* This method is used to build index for whole table
|
||||
*
|
||||
* @param table_name, table name is going to be build index.
|
||||
*
|
||||
* @return Indicate if build index successfully.
|
||||
*/
|
||||
virtual Status BuildIndex(const std::string &table_name) = 0;
|
||||
|
||||
/**
|
||||
* @brief Add vector to table
|
||||
*
|
||||
|
@ -126,6 +126,22 @@ ClientProxy::DeleteTable(const std::string &table_name) {
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
ClientProxy::BuildIndex(const std::string &table_name) {
|
||||
if(!IsConnected()) {
|
||||
return Status(StatusCode::NotConnected, "not connected to server");
|
||||
}
|
||||
|
||||
try {
|
||||
ClientPtr()->interface()->BuildIndex(table_name);
|
||||
|
||||
} catch ( std::exception& ex) {
|
||||
return Status(StatusCode::UnknownError, "failed to build index: " + std::string(ex.what()));
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
ClientProxy::AddVector(const std::string &table_name,
|
||||
const std::vector<RowRecord> &record_array,
|
||||
@ -193,17 +209,25 @@ ClientProxy::SearchVector(const std::string &table_name,
|
||||
}
|
||||
|
||||
//step 3: search vectors
|
||||
std::vector<thrift::TopKQueryResult> result_array;
|
||||
ClientPtr()->interface()->SearchVector(result_array, table_name, thrift_records, thrift_ranges, topk);
|
||||
std::vector<thrift::TopKQueryBinResult> result_array;
|
||||
ClientPtr()->interface()->SearchVector2(result_array, table_name, thrift_records, thrift_ranges, topk);
|
||||
|
||||
//step 4: convert result array
|
||||
for(auto& thrift_topk_result : result_array) {
|
||||
TopKQueryResult result;
|
||||
|
||||
for(auto& thrift_query_result : thrift_topk_result.query_result_arrays) {
|
||||
size_t id_count = thrift_topk_result.id_array.size()/sizeof(int64_t);
|
||||
size_t dist_count = thrift_topk_result.distance_array.size()/ sizeof(double);
|
||||
if(id_count != dist_count) {
|
||||
return Status(StatusCode::UnknownError, "illegal result");
|
||||
}
|
||||
|
||||
int64_t* id_ptr = (int64_t*)thrift_topk_result.id_array.data();
|
||||
double* dist_ptr = (double*)thrift_topk_result.distance_array.data();
|
||||
for(size_t i = 0; i < id_count; i++) {
|
||||
QueryResult query_result;
|
||||
query_result.id = thrift_query_result.id;
|
||||
query_result.distance = thrift_query_result.distance;
|
||||
query_result.id = id_ptr[i];
|
||||
query_result.distance = dist_ptr[i];
|
||||
result.query_result_arrays.emplace_back(query_result);
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,8 @@ public:
|
||||
|
||||
virtual Status DeleteTable(const std::string &table_name) override;
|
||||
|
||||
virtual Status BuildIndex(const std::string &table_name) override;
|
||||
|
||||
virtual Status AddVector(const std::string &table_name,
|
||||
const std::vector<RowRecord> &record_array,
|
||||
std::vector<int64_t> &id_array) override;
|
||||
|
@ -66,6 +66,11 @@ ConnectionImpl::DeleteTable(const std::string &table_name) {
|
||||
return client_proxy_->DeleteTable(table_name);
|
||||
}
|
||||
|
||||
Status
|
||||
ConnectionImpl::BuildIndex(const std::string &table_name) {
|
||||
return client_proxy_->BuildIndex(table_name);
|
||||
}
|
||||
|
||||
Status
|
||||
ConnectionImpl::AddVector(const std::string &table_name,
|
||||
const std::vector<RowRecord> &record_array,
|
||||
|
@ -29,6 +29,8 @@ public:
|
||||
|
||||
virtual Status DeleteTable(const std::string &table_name) override;
|
||||
|
||||
virtual Status BuildIndex(const std::string &table_name) override;
|
||||
|
||||
virtual Status AddVector(const std::string &table_name,
|
||||
const std::vector<RowRecord> &record_array,
|
||||
std::vector<int64_t> &id_array) override;
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "ServerConfig.h"
|
||||
#include "utils/CommonUtil.h"
|
||||
#include "utils/Log.h"
|
||||
#include "utils/StringHelpFunctions.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
@ -15,23 +16,30 @@ namespace server {
|
||||
|
||||
DBWrapper::DBWrapper() {
|
||||
zilliz::milvus::engine::Options opt;
|
||||
ConfigNode& config = ServerConfig::GetInstance().GetConfig(CONFIG_DB);
|
||||
opt.meta.backend_uri = config.GetValue(CONFIG_DB_URL);
|
||||
std::string db_path = config.GetValue(CONFIG_DB_PATH);
|
||||
ConfigNode& db_config = ServerConfig::GetInstance().GetConfig(CONFIG_DB);
|
||||
opt.meta.backend_uri = db_config.GetValue(CONFIG_DB_URL);
|
||||
std::string db_path = db_config.GetValue(CONFIG_DB_PATH);
|
||||
opt.meta.path = db_path + "/db";
|
||||
int64_t index_size = config.GetInt64Value(CONFIG_DB_INDEX_TRIGGER_SIZE);
|
||||
|
||||
std::string db_slave_path = db_config.GetValue(CONFIG_DB_SLAVE_PATH);
|
||||
StringHelpFunctions::SplitStringByDelimeter(db_slave_path, ";", opt.meta.slave_paths);
|
||||
|
||||
int64_t index_size = db_config.GetInt64Value(CONFIG_DB_INDEX_TRIGGER_SIZE);
|
||||
if(index_size > 0) {//ensure larger than zero, unit is MB
|
||||
opt.index_trigger_size = (size_t)index_size * engine::ONE_MB;
|
||||
}
|
||||
float maximum_memory = config.GetFloatValue(CONFIG_MAXMIMUM_MEMORY);
|
||||
if (maximum_memory > 1.0) {
|
||||
opt.maximum_memory = maximum_memory * engine::ONE_GB;
|
||||
int64_t insert_buffer_size = db_config.GetInt64Value(CONFIG_DB_INSERT_BUFFER_SIZE, 4);
|
||||
if (insert_buffer_size >= 1) {
|
||||
opt.insert_buffer_size = insert_buffer_size * engine::ONE_GB;
|
||||
}
|
||||
else {
|
||||
std::cout << "ERROR: maximum_memory should be at least 1 GB" << std::endl;
|
||||
std::cout << "ERROR: insert_buffer_size should be at least 1 GB" << std::endl;
|
||||
kill(0, SIGUSR1);
|
||||
}
|
||||
|
||||
ConfigNode& cache_config = ServerConfig::GetInstance().GetConfig(CONFIG_CACHE);
|
||||
opt.insert_cache_immediately_ = cache_config.GetBoolValue(CONFIG_INSERT_CACHE_IMMEDIATELY, false);
|
||||
|
||||
ConfigNode& serverConfig = ServerConfig::GetInstance().GetConfig(CONFIG_SERVER);
|
||||
std::string mode = serverConfig.GetValue(CONFIG_CLUSTER_MODE, "single");
|
||||
if (mode == "single") {
|
||||
@ -50,8 +58,8 @@ DBWrapper::DBWrapper() {
|
||||
|
||||
//set archive config
|
||||
engine::ArchiveConf::CriteriaT criterial;
|
||||
int64_t disk = config.GetInt64Value(CONFIG_DB_ARCHIVE_DISK, 0);
|
||||
int64_t days = config.GetInt64Value(CONFIG_DB_ARCHIVE_DAYS, 0);
|
||||
int64_t disk = db_config.GetInt64Value(CONFIG_DB_ARCHIVE_DISK, 0);
|
||||
int64_t days = db_config.GetInt64Value(CONFIG_DB_ARCHIVE_DAYS, 0);
|
||||
if(disk > 0) {
|
||||
criterial[engine::ARCHIVE_CONF_DISK] = disk;
|
||||
}
|
||||
@ -67,6 +75,14 @@ DBWrapper::DBWrapper() {
|
||||
kill(0, SIGUSR1);
|
||||
}
|
||||
|
||||
for(auto& path : opt.meta.slave_paths) {
|
||||
err = CommonUtil::CreateDirectory(path);
|
||||
if(err != SERVER_SUCCESS) {
|
||||
std::cout << "ERROR! Failed to create database slave path: " << path << std::endl;
|
||||
kill(0, SIGUSR1);
|
||||
}
|
||||
}
|
||||
|
||||
std::string msg = opt.meta.path;
|
||||
try {
|
||||
zilliz::milvus::engine::DB::Open(opt, &db_);
|
||||
|
@ -8,9 +8,11 @@
|
||||
#include "ServerConfig.h"
|
||||
#include "ThreadPoolServer.h"
|
||||
#include "DBWrapper.h"
|
||||
#include "utils/Log.h"
|
||||
|
||||
#include "milvus_types.h"
|
||||
#include "milvus_constants.h"
|
||||
#include "faiss/utils.h"
|
||||
|
||||
#include <thrift/protocol/TBinaryProtocol.h>
|
||||
#include <thrift/protocol/TJSONProtocol.h>
|
||||
@ -25,6 +27,8 @@
|
||||
#include <thread>
|
||||
#include <iostream>
|
||||
|
||||
//extern int distance_compute_blas_threshold;
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
@ -46,11 +50,12 @@ MilvusServer::StartService() {
|
||||
|
||||
ServerConfig &config = ServerConfig::GetInstance();
|
||||
ConfigNode server_config = config.GetConfig(CONFIG_SERVER);
|
||||
|
||||
ConfigNode engine_config = config.GetConfig(CONFIG_ENGINE);
|
||||
std::string address = server_config.GetValue(CONFIG_SERVER_ADDRESS, "127.0.0.1");
|
||||
int32_t port = server_config.GetInt32Value(CONFIG_SERVER_PORT, 19530);
|
||||
std::string protocol = server_config.GetValue(CONFIG_SERVER_PROTOCOL, "binary");
|
||||
|
||||
faiss::distance_compute_blas_threshold = engine_config.GetInt32Value(CONFIG_DCBT,20);
|
||||
// std::cout<<"distance_compute_blas_threshold = "<< faiss::distance_compute_blas_threshold << std::endl;
|
||||
try {
|
||||
DBWrapper::DB();//initialize db
|
||||
|
||||
@ -71,7 +76,7 @@ MilvusServer::StartService() {
|
||||
return;
|
||||
}
|
||||
|
||||
stdcxx::shared_ptr<ThreadManager> threadManager(ThreadManager::newSimpleThreadManager());
|
||||
stdcxx::shared_ptr<ThreadManager> threadManager(ThreadManager::newSimpleThreadManager(16));
|
||||
stdcxx::shared_ptr<PosixThreadFactory> threadFactory(new PosixThreadFactory());
|
||||
threadManager->threadFactory(threadFactory);
|
||||
threadManager->start();
|
||||
|
@ -39,6 +39,12 @@ RequestHandler::DeleteTable(const std::string &table_name) {
|
||||
RequestScheduler::ExecTask(task_ptr);
|
||||
}
|
||||
|
||||
void
|
||||
RequestHandler::BuildIndex(const std::string &table_name) {
|
||||
BaseTaskPtr task_ptr = BuildIndexTask::Create(table_name);
|
||||
RequestScheduler::ExecTask(task_ptr);
|
||||
}
|
||||
|
||||
void
|
||||
RequestHandler::AddVector(std::vector<int64_t> &_return,
|
||||
const std::string &table_name,
|
||||
@ -54,11 +60,22 @@ RequestHandler::SearchVector(std::vector<thrift::TopKQueryResult> &_return,
|
||||
const std::vector<thrift::Range> &query_range_array,
|
||||
const int64_t topk) {
|
||||
// SERVER_LOG_DEBUG << "Entering RequestHandler::SearchVector";
|
||||
BaseTaskPtr task_ptr = SearchVectorTask::Create(table_name, std::vector<std::string>(), query_record_array,
|
||||
BaseTaskPtr task_ptr = SearchVectorTask1::Create(table_name, std::vector<std::string>(), query_record_array,
|
||||
query_range_array, topk, _return);
|
||||
RequestScheduler::ExecTask(task_ptr);
|
||||
}
|
||||
|
||||
void
|
||||
RequestHandler::SearchVector2(std::vector<thrift::TopKQueryBinResult> & _return,
|
||||
const std::string& table_name,
|
||||
const std::vector<thrift::RowRecord> & query_record_array,
|
||||
const std::vector<thrift::Range> & query_range_array,
|
||||
const int64_t topk) {
|
||||
BaseTaskPtr task_ptr = SearchVectorTask2::Create(table_name, std::vector<std::string>(), query_record_array,
|
||||
query_range_array, topk, _return);
|
||||
RequestScheduler::ExecTask(task_ptr);
|
||||
}
|
||||
|
||||
void
|
||||
RequestHandler::SearchVectorInFiles(std::vector<::milvus::thrift::TopKQueryResult> &_return,
|
||||
const std::string& table_name,
|
||||
@ -67,7 +84,7 @@ RequestHandler::SearchVectorInFiles(std::vector<::milvus::thrift::TopKQueryResul
|
||||
const std::vector<::milvus::thrift::Range> &query_range_array,
|
||||
const int64_t topk) {
|
||||
// SERVER_LOG_DEBUG << "Entering RequestHandler::SearchVectorInFiles. file_id_array size = " << std::to_string(file_id_array.size());
|
||||
BaseTaskPtr task_ptr = SearchVectorTask::Create(table_name, file_id_array, query_record_array,
|
||||
BaseTaskPtr task_ptr = SearchVectorTask1::Create(table_name, file_id_array, query_record_array,
|
||||
query_range_array, topk, _return);
|
||||
RequestScheduler::ExecTask(task_ptr);
|
||||
}
|
||||
|
@ -54,6 +54,18 @@ public:
|
||||
*/
|
||||
void DeleteTable(const std::string& table_name);
|
||||
|
||||
/**
|
||||
* @brief build index by table method
|
||||
*
|
||||
* This method is used to build index by table in sync.
|
||||
*
|
||||
* @param table_name, table name is going to be built index.
|
||||
*
|
||||
*
|
||||
* @param table_name
|
||||
*/
|
||||
void BuildIndex(const std::string &table_name);
|
||||
|
||||
/**
|
||||
* @brief Add vector array to table
|
||||
*
|
||||
@ -94,6 +106,29 @@ public:
|
||||
const std::vector<::milvus::thrift::Range> & query_range_array,
|
||||
const int64_t topk);
|
||||
|
||||
/**
|
||||
* @brief Query vector
|
||||
*
|
||||
* This method is used to query vector in table.
|
||||
*
|
||||
* @param table_name, table_name is queried.
|
||||
* @param query_record_array, all vector are going to be queried.
|
||||
* @param query_range_array, optional ranges for conditional search. If not specified, search whole table
|
||||
* @param topk, how many similarity vectors will be searched.
|
||||
*
|
||||
* @return query binary result array.
|
||||
*
|
||||
* @param table_name
|
||||
* @param query_record_array
|
||||
* @param query_range_array
|
||||
* @param topk
|
||||
*/
|
||||
void SearchVector2(std::vector<::milvus::thrift::TopKQueryBinResult> & _return,
|
||||
const std::string& table_name,
|
||||
const std::vector<::milvus::thrift::RowRecord> & query_record_array,
|
||||
const std::vector<::milvus::thrift::Range> & query_range_array,
|
||||
const int64_t topk);
|
||||
|
||||
/**
|
||||
* @brief Internal use query interface
|
||||
*
|
||||
|
@ -12,7 +12,7 @@
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
|
||||
|
||||
using namespace ::milvus;
|
||||
|
||||
namespace {
|
||||
@ -43,6 +43,7 @@ namespace {
|
||||
{SERVER_ILLEGAL_SEARCH_RESULT, thrift::ErrorCode::ILLEGAL_SEARCH_RESULT},
|
||||
{SERVER_CACHE_ERROR, thrift::ErrorCode::CACHE_FAILED},
|
||||
{DB_META_TRANSACTION_FAILED, thrift::ErrorCode::META_FAILED},
|
||||
{SERVER_BUILD_INDEX_ERROR, thrift::ErrorCode::BUILD_INDEX_ERROR},
|
||||
};
|
||||
|
||||
return code_map;
|
||||
|
@ -12,6 +12,10 @@
|
||||
#include "DBWrapper.h"
|
||||
#include "version.h"
|
||||
|
||||
#ifdef MILVUS_ENABLE_PROFILING
|
||||
#include "gperftools/profiler.h"
|
||||
#endif
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
@ -30,7 +34,8 @@ namespace {
|
||||
static std::map<int, engine::EngineType> map_type = {
|
||||
{0, engine::EngineType::INVALID},
|
||||
{1, engine::EngineType::FAISS_IDMAP},
|
||||
{2, engine::EngineType::FAISS_IVFFLAT_GPU},
|
||||
{2, engine::EngineType::FAISS_IVFFLAT},
|
||||
{3, engine::EngineType::FAISS_IVFSQ8},
|
||||
};
|
||||
|
||||
if(map_type.find(type) == map_type.end()) {
|
||||
@ -44,7 +49,8 @@ namespace {
|
||||
static std::map<engine::EngineType, int> map_type = {
|
||||
{engine::EngineType::INVALID, 0},
|
||||
{engine::EngineType::FAISS_IDMAP, 1},
|
||||
{engine::EngineType::FAISS_IVFFLAT_GPU, 2},
|
||||
{engine::EngineType::FAISS_IVFFLAT, 2},
|
||||
{engine::EngineType::FAISS_IVFSQ8, 3},
|
||||
};
|
||||
|
||||
if(map_type.find(type) == map_type.end()) {
|
||||
@ -125,6 +131,18 @@ namespace {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::string
|
||||
GetCurrTimeStr() {
|
||||
char tm_buf[20] = {0};
|
||||
time_t tt;
|
||||
time(&tt);
|
||||
tt = tt + 8 * 60 * 60;
|
||||
tm* t = gmtime(&tt);
|
||||
sprintf(tm_buf, "%4d%02d%02d_%02d%02d%02d", (t->tm_year+1900), (t->tm_mon+1), (t->tm_mday),
|
||||
(t->tm_hour), (t->tm_min), (t->tm_sec));
|
||||
return tm_buf;
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -144,20 +162,19 @@ ServerError CreateTableTask::OnExecute() {
|
||||
try {
|
||||
//step 1: check arguments
|
||||
ServerError res = SERVER_SUCCESS;
|
||||
res = ValidateTableName(schema_.table_name);
|
||||
res = ValidationUtil::ValidateTableName(schema_.table_name);
|
||||
if(res != SERVER_SUCCESS) {
|
||||
return res;
|
||||
return SetError(res, "Invalid table name: " + schema_.table_name);
|
||||
}
|
||||
|
||||
res = ValidateTableDimension(schema_.dimension);
|
||||
res = ValidationUtil::ValidateTableDimension(schema_.dimension);
|
||||
if(res != SERVER_SUCCESS) {
|
||||
return res;
|
||||
return SetError(res, "Invalid table dimension: " + std::to_string(schema_.dimension));
|
||||
}
|
||||
|
||||
res = ValidateTableIndexType(schema_.index_type);
|
||||
SERVER_LOG_DEBUG << "Createtbale EngineTypee: " << schema_.index_type;
|
||||
res = ValidationUtil::ValidateTableIndexType(schema_.index_type);
|
||||
if(res != SERVER_SUCCESS) {
|
||||
return res;
|
||||
return SetError(res, "Invalid index type: " + std::to_string(schema_.index_type));
|
||||
}
|
||||
|
||||
//step 2: construct table schema
|
||||
@ -174,10 +191,11 @@ ServerError CreateTableTask::OnExecute() {
|
||||
}
|
||||
|
||||
} catch (std::exception& ex) {
|
||||
SERVER_LOG_ERROR << "CreateTableTask encounter exception: " << ex.what();
|
||||
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
|
||||
}
|
||||
|
||||
rc.Record("done");
|
||||
rc.ElapseFromBegin("totally cost");
|
||||
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
@ -200,9 +218,9 @@ ServerError DescribeTableTask::OnExecute() {
|
||||
try {
|
||||
//step 1: check arguments
|
||||
ServerError res = SERVER_SUCCESS;
|
||||
res = ValidateTableName(table_name_);
|
||||
res = ValidationUtil::ValidateTableName(table_name_);
|
||||
if(res != SERVER_SUCCESS) {
|
||||
return res;
|
||||
return SetError(res, "Invalid table name: " + table_name_);
|
||||
}
|
||||
|
||||
//step 2: get table info
|
||||
@ -219,10 +237,53 @@ ServerError DescribeTableTask::OnExecute() {
|
||||
schema_.store_raw_vector = table_info.store_raw_data_;
|
||||
|
||||
} catch (std::exception& ex) {
|
||||
SERVER_LOG_ERROR << "DescribeTableTask encounter exception: " << ex.what();
|
||||
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
|
||||
}
|
||||
|
||||
rc.Record("done");
|
||||
rc.ElapseFromBegin("totally cost");
|
||||
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
BuildIndexTask::BuildIndexTask(const std::string& table_name)
|
||||
: BaseTask(DDL_DML_TASK_GROUP),
|
||||
table_name_(table_name) {
|
||||
}
|
||||
|
||||
BaseTaskPtr BuildIndexTask::Create(const std::string& table_name) {
|
||||
return std::shared_ptr<BaseTask>(new BuildIndexTask(table_name));
|
||||
}
|
||||
|
||||
ServerError BuildIndexTask::OnExecute() {
|
||||
try {
|
||||
TimeRecorder rc("BuildIndexTask");
|
||||
|
||||
//step 1: check arguments
|
||||
ServerError res = SERVER_SUCCESS;
|
||||
res = ValidationUtil::ValidateTableName(table_name_);
|
||||
if(res != SERVER_SUCCESS) {
|
||||
return SetError(res, "Invalid table name: " + table_name_);
|
||||
}
|
||||
|
||||
bool has_table = false;
|
||||
engine::Status stat = DBWrapper::DB()->HasTable(table_name_, has_table);
|
||||
if(!has_table) {
|
||||
return SetError(SERVER_TABLE_NOT_EXIST, "Table " + table_name_ + " not exists");
|
||||
}
|
||||
|
||||
//step 2: check table existence
|
||||
stat = DBWrapper::DB()->BuildIndex(table_name_);
|
||||
if(!stat.ok()) {
|
||||
return SetError(SERVER_BUILD_INDEX_ERROR, "Engine failed: " + stat.ToString());
|
||||
}
|
||||
|
||||
rc.ElapseFromBegin("totally cost");
|
||||
} catch (std::exception& ex) {
|
||||
SERVER_LOG_ERROR << "BuildIndexTask encounter exception: " << ex.what();
|
||||
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
|
||||
}
|
||||
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
@ -245,9 +306,9 @@ ServerError HasTableTask::OnExecute() {
|
||||
|
||||
//step 1: check arguments
|
||||
ServerError res = SERVER_SUCCESS;
|
||||
res = ValidateTableName(table_name_);
|
||||
res = ValidationUtil::ValidateTableName(table_name_);
|
||||
if(res != SERVER_SUCCESS) {
|
||||
return res;
|
||||
return SetError(res, "Invalid table name: " + table_name_);
|
||||
}
|
||||
//step 2: check table existence
|
||||
engine::Status stat = DBWrapper::DB()->HasTable(table_name_, has_table_);
|
||||
@ -255,8 +316,9 @@ ServerError HasTableTask::OnExecute() {
|
||||
return SetError(DB_META_TRANSACTION_FAILED, "Engine failed: " + stat.ToString());
|
||||
}
|
||||
|
||||
rc.Elapse("totally cost");
|
||||
rc.ElapseFromBegin("totally cost");
|
||||
} catch (std::exception& ex) {
|
||||
SERVER_LOG_ERROR << "HasTableTask encounter exception: " << ex.what();
|
||||
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
|
||||
}
|
||||
|
||||
@ -280,9 +342,9 @@ ServerError DeleteTableTask::OnExecute() {
|
||||
|
||||
//step 1: check arguments
|
||||
ServerError res = SERVER_SUCCESS;
|
||||
res = ValidateTableName(table_name_);
|
||||
res = ValidationUtil::ValidateTableName(table_name_);
|
||||
if(res != SERVER_SUCCESS) {
|
||||
return res;
|
||||
return SetError(res, "Invalid table name: " + table_name_);
|
||||
}
|
||||
|
||||
//step 2: check table existence
|
||||
@ -297,8 +359,6 @@ ServerError DeleteTableTask::OnExecute() {
|
||||
}
|
||||
}
|
||||
|
||||
rc.Record("check validation");
|
||||
|
||||
//step 3: delete table
|
||||
std::vector<DB_DATE> dates;
|
||||
stat = DBWrapper::DB()->DeleteTable(table_name_, dates);
|
||||
@ -306,9 +366,9 @@ ServerError DeleteTableTask::OnExecute() {
|
||||
return SetError(DB_META_TRANSACTION_FAILED, "Engine failed: " + stat.ToString());
|
||||
}
|
||||
|
||||
rc.Record("deleta table");
|
||||
rc.Elapse("totally cost");
|
||||
rc.ElapseFromBegin("totally cost");
|
||||
} catch (std::exception& ex) {
|
||||
SERVER_LOG_ERROR << "DeleteTableTask encounter exception: " << ex.what();
|
||||
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
|
||||
}
|
||||
|
||||
@ -364,9 +424,9 @@ ServerError AddVectorTask::OnExecute() {
|
||||
|
||||
//step 1: check arguments
|
||||
ServerError res = SERVER_SUCCESS;
|
||||
res = ValidateTableName(table_name_);
|
||||
res = ValidationUtil::ValidateTableName(table_name_);
|
||||
if(res != SERVER_SUCCESS) {
|
||||
return res;
|
||||
return SetError(res, "Invalid table name: " + table_name_);
|
||||
}
|
||||
|
||||
if(record_array_.empty()) {
|
||||
@ -385,7 +445,13 @@ ServerError AddVectorTask::OnExecute() {
|
||||
}
|
||||
}
|
||||
|
||||
rc.Record("check validation");
|
||||
rc.RecordSection("check validation");
|
||||
|
||||
#ifdef MILVUS_ENABLE_PROFILING
|
||||
std::string fname = "/tmp/insert_" + std::to_string(this->record_array_.size()) +
|
||||
"_" + GetCurrTimeStr() + ".profiling";
|
||||
ProfilerStart(fname.c_str());
|
||||
#endif
|
||||
|
||||
//step 3: prepare float data
|
||||
std::vector<float> vec_f;
|
||||
@ -396,12 +462,11 @@ ServerError AddVectorTask::OnExecute() {
|
||||
return SetError(error_code, error_msg);
|
||||
}
|
||||
|
||||
rc.Record("prepare vectors data");
|
||||
rc.RecordSection("prepare vectors data");
|
||||
|
||||
//step 4: insert vectors
|
||||
uint64_t vec_count = (uint64_t)record_array_.size();
|
||||
stat = DBWrapper::DB()->InsertVectors(table_name_, vec_count, vec_f.data(), record_ids_);
|
||||
rc.Record("add vectors to engine");
|
||||
if(!stat.ok()) {
|
||||
return SetError(SERVER_CACHE_ERROR, "Cache error: " + stat.ToString());
|
||||
}
|
||||
@ -412,10 +477,15 @@ ServerError AddVectorTask::OnExecute() {
|
||||
return SetError(SERVER_ILLEGAL_VECTOR_ID, msg);
|
||||
}
|
||||
|
||||
rc.Record("do insert");
|
||||
rc.Elapse("totally cost");
|
||||
#ifdef MILVUS_ENABLE_PROFILING
|
||||
ProfilerStop();
|
||||
#endif
|
||||
|
||||
rc.RecordSection("add vectors to engine");
|
||||
rc.ElapseFromBegin("totally cost");
|
||||
|
||||
} catch (std::exception& ex) {
|
||||
SERVER_LOG_ERROR << "AddVectorTask encounter exception: " << ex.what();
|
||||
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
|
||||
}
|
||||
|
||||
@ -423,44 +493,34 @@ ServerError AddVectorTask::OnExecute() {
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
SearchVectorTask::SearchVectorTask(const std::string &table_name,
|
||||
const std::vector<std::string>& file_id_array,
|
||||
const std::vector<thrift::RowRecord> &query_record_array,
|
||||
const std::vector<thrift::Range> &query_range_array,
|
||||
const int64_t top_k,
|
||||
std::vector<thrift::TopKQueryResult> &result_array)
|
||||
SearchVectorTaskBase::SearchVectorTaskBase(const std::string &table_name,
|
||||
const std::vector<std::string>& file_id_array,
|
||||
const std::vector<thrift::RowRecord> &query_record_array,
|
||||
const std::vector<thrift::Range> &query_range_array,
|
||||
const int64_t top_k)
|
||||
: BaseTask(DQL_TASK_GROUP),
|
||||
table_name_(table_name),
|
||||
file_id_array_(file_id_array),
|
||||
record_array_(query_record_array),
|
||||
range_array_(query_range_array),
|
||||
top_k_(top_k),
|
||||
result_array_(result_array) {
|
||||
top_k_(top_k) {
|
||||
|
||||
}
|
||||
|
||||
BaseTaskPtr SearchVectorTask::Create(const std::string& table_name,
|
||||
const std::vector<std::string>& file_id_array,
|
||||
const std::vector<thrift::RowRecord> & query_record_array,
|
||||
const std::vector<thrift::Range> & query_range_array,
|
||||
const int64_t top_k,
|
||||
std::vector<thrift::TopKQueryResult>& result_array) {
|
||||
return std::shared_ptr<BaseTask>(new SearchVectorTask(table_name, file_id_array,
|
||||
query_record_array, query_range_array, top_k, result_array));
|
||||
}
|
||||
|
||||
ServerError SearchVectorTask::OnExecute() {
|
||||
ServerError SearchVectorTaskBase::OnExecute() {
|
||||
try {
|
||||
TimeRecorder rc("SearchVectorTask");
|
||||
std::string title = "SearchVectorTask(n=" + std::to_string(record_array_.size())
|
||||
+ " k=" + std::to_string(top_k_) + ")";
|
||||
TimeRecorder rc(title);
|
||||
|
||||
//step 1: check arguments
|
||||
ServerError res = SERVER_SUCCESS;
|
||||
res = ValidateTableName(table_name_);
|
||||
res = ValidationUtil::ValidateTableName(table_name_);
|
||||
if(res != SERVER_SUCCESS) {
|
||||
return res;
|
||||
return SetError(res, "Invalid table name: " + table_name_);
|
||||
}
|
||||
|
||||
if(top_k_ <= 0) {
|
||||
if(top_k_ <= 0 || top_k_ > 1024) {
|
||||
return SetError(SERVER_INVALID_TOPK, "Invalid topk: " + std::to_string(top_k_));
|
||||
}
|
||||
if(record_array_.empty()) {
|
||||
@ -488,7 +548,14 @@ ServerError SearchVectorTask::OnExecute() {
|
||||
return SetError(error_code, error_msg);
|
||||
}
|
||||
|
||||
rc.Record("check validation");
|
||||
double span_check = rc.RecordSection("check validation");
|
||||
|
||||
#ifdef MILVUS_ENABLE_PROFILING
|
||||
std::string fname = "/tmp/search_nq_" + std::to_string(this->record_array_.size()) +
|
||||
"_top_" + std::to_string(this->top_k_) + "_" +
|
||||
GetCurrTimeStr() + ".profiling";
|
||||
ProfilerStart(fname.c_str());
|
||||
#endif
|
||||
|
||||
//step 3: prepare float data
|
||||
std::vector<float> vec_f;
|
||||
@ -497,7 +564,7 @@ ServerError SearchVectorTask::OnExecute() {
|
||||
return SetError(error_code, error_msg);
|
||||
}
|
||||
|
||||
rc.Record("prepare vector data");
|
||||
double span_prepare = rc.RecordSection("prepare vector data");
|
||||
|
||||
//step 4: search vectors
|
||||
engine::QueryResults results;
|
||||
@ -509,7 +576,7 @@ ServerError SearchVectorTask::OnExecute() {
|
||||
stat = DBWrapper::DB()->Query(table_name_, file_id_array_, (size_t) top_k_, record_count, vec_f.data(), dates, results);
|
||||
}
|
||||
|
||||
rc.Record("search vectors from engine");
|
||||
double span_search = rc.RecordSection("search vectors from engine");
|
||||
if(!stat.ok()) {
|
||||
return SetError(DB_META_TRANSACTION_FAILED, "Engine failed: " + stat.ToString());
|
||||
}
|
||||
@ -524,34 +591,125 @@ ServerError SearchVectorTask::OnExecute() {
|
||||
return SetError(SERVER_ILLEGAL_SEARCH_RESULT, msg);
|
||||
}
|
||||
|
||||
rc.Record("do search");
|
||||
|
||||
//step 5: construct result array
|
||||
for(uint64_t i = 0; i < record_count; i++) {
|
||||
auto& result = results[i];
|
||||
const auto& record = record_array_[i];
|
||||
ConstructResult(results);
|
||||
|
||||
thrift::TopKQueryResult thrift_topk_result;
|
||||
for(auto& pair : result) {
|
||||
thrift::QueryResult thrift_result;
|
||||
thrift_result.__set_id(pair.first);
|
||||
thrift_result.__set_distance(pair.second);
|
||||
#ifdef MILVUS_ENABLE_PROFILING
|
||||
ProfilerStop();
|
||||
#endif
|
||||
|
||||
thrift_topk_result.query_result_arrays.emplace_back(thrift_result);
|
||||
}
|
||||
double span_result = rc.RecordSection("construct result");
|
||||
rc.ElapseFromBegin("totally cost");
|
||||
|
||||
result_array_.emplace_back(thrift_topk_result);
|
||||
}
|
||||
rc.Record("construct result");
|
||||
rc.Elapse("totally cost");
|
||||
//step 6: print time cost percent
|
||||
double total_cost = span_check + span_prepare + span_search + span_result;
|
||||
SERVER_LOG_DEBUG << title << ": check validation(" << (span_check/total_cost)*100.0 << "%)"
|
||||
<< " prepare data(" << (span_prepare/total_cost)*100.0 << "%)"
|
||||
<< " search(" << (span_search/total_cost)*100.0 << "%)"
|
||||
<< " construct result(" << (span_result/total_cost)*100.0 << "%)";
|
||||
|
||||
} catch (std::exception& ex) {
|
||||
SERVER_LOG_ERROR << "SearchVectorTask encounter exception: " << ex.what();
|
||||
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
|
||||
}
|
||||
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
SearchVectorTask1::SearchVectorTask1(const std::string &table_name,
|
||||
const std::vector<std::string>& file_id_array,
|
||||
const std::vector<thrift::RowRecord> &query_record_array,
|
||||
const std::vector<thrift::Range> &query_range_array,
|
||||
const int64_t top_k,
|
||||
std::vector<thrift::TopKQueryResult> &result_array)
|
||||
: SearchVectorTaskBase(table_name, file_id_array, query_record_array, query_range_array, top_k),
|
||||
result_array_(result_array) {
|
||||
|
||||
}
|
||||
|
||||
BaseTaskPtr SearchVectorTask1::Create(const std::string& table_name,
|
||||
const std::vector<std::string>& file_id_array,
|
||||
const std::vector<thrift::RowRecord> & query_record_array,
|
||||
const std::vector<thrift::Range> & query_range_array,
|
||||
const int64_t top_k,
|
||||
std::vector<thrift::TopKQueryResult>& result_array) {
|
||||
return std::shared_ptr<BaseTask>(new SearchVectorTask1(table_name, file_id_array,
|
||||
query_record_array, query_range_array, top_k, result_array));
|
||||
}
|
||||
|
||||
ServerError SearchVectorTask1::ConstructResult(engine::QueryResults& results) {
|
||||
for(uint64_t i = 0; i < results.size(); i++) {
|
||||
auto& result = results[i];
|
||||
const auto& record = record_array_[i];
|
||||
|
||||
thrift::TopKQueryResult thrift_topk_result;
|
||||
for(auto& pair : result) {
|
||||
thrift::QueryResult thrift_result;
|
||||
thrift_result.__set_id(pair.first);
|
||||
thrift_result.__set_distance(pair.second);
|
||||
|
||||
thrift_topk_result.query_result_arrays.emplace_back(thrift_result);
|
||||
}
|
||||
|
||||
result_array_.emplace_back(thrift_topk_result);
|
||||
}
|
||||
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
SearchVectorTask2::SearchVectorTask2(const std::string &table_name,
|
||||
const std::vector<std::string>& file_id_array,
|
||||
const std::vector<thrift::RowRecord> &query_record_array,
|
||||
const std::vector<thrift::Range> &query_range_array,
|
||||
const int64_t top_k,
|
||||
std::vector<thrift::TopKQueryBinResult> &result_array)
|
||||
: SearchVectorTaskBase(table_name, file_id_array, query_record_array, query_range_array, top_k),
|
||||
result_array_(result_array) {
|
||||
|
||||
}
|
||||
|
||||
BaseTaskPtr SearchVectorTask2::Create(const std::string& table_name,
|
||||
const std::vector<std::string>& file_id_array,
|
||||
const std::vector<thrift::RowRecord> & query_record_array,
|
||||
const std::vector<thrift::Range> & query_range_array,
|
||||
const int64_t top_k,
|
||||
std::vector<thrift::TopKQueryBinResult>& result_array) {
|
||||
return std::shared_ptr<BaseTask>(new SearchVectorTask2(table_name, file_id_array,
|
||||
query_record_array, query_range_array, top_k, result_array));
|
||||
}
|
||||
|
||||
ServerError SearchVectorTask2::ConstructResult(engine::QueryResults& results) {
|
||||
for(size_t i = 0; i < results.size(); i++) {
|
||||
auto& result = results[i];
|
||||
|
||||
thrift::TopKQueryBinResult thrift_topk_result;
|
||||
if(result.empty()) {
|
||||
result_array_.emplace_back(thrift_topk_result);
|
||||
continue;
|
||||
}
|
||||
|
||||
std::string str_ids, str_distances;
|
||||
str_ids.resize(sizeof(engine::IDNumber)*result.size());
|
||||
str_distances.resize(sizeof(double)*result.size());
|
||||
|
||||
engine::IDNumber* ids_ptr = (engine::IDNumber*)str_ids.data();
|
||||
double* distance_ptr = (double*)str_distances.data();
|
||||
for(size_t k = 0; k < result.size(); k++) {
|
||||
auto& pair = result[k];
|
||||
ids_ptr[k] = pair.first;
|
||||
distance_ptr[k] = pair.second;
|
||||
}
|
||||
|
||||
thrift_topk_result.__set_id_array(str_ids);
|
||||
thrift_topk_result.__set_distance_array(str_distances);
|
||||
result_array_.emplace_back(thrift_topk_result);
|
||||
}
|
||||
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
GetTableRowCountTask::GetTableRowCountTask(const std::string& table_name, int64_t& row_count)
|
||||
: BaseTask(DDL_DML_TASK_GROUP),
|
||||
@ -570,9 +728,9 @@ ServerError GetTableRowCountTask::OnExecute() {
|
||||
|
||||
//step 1: check arguments
|
||||
ServerError res = SERVER_SUCCESS;
|
||||
res = ValidateTableName(table_name_);
|
||||
res = ValidationUtil::ValidateTableName(table_name_);
|
||||
if(res != SERVER_SUCCESS) {
|
||||
return res;
|
||||
return SetError(res, "Invalid table name: " + table_name_);
|
||||
}
|
||||
|
||||
//step 2: get row count
|
||||
@ -584,9 +742,10 @@ ServerError GetTableRowCountTask::OnExecute() {
|
||||
|
||||
row_count_ = (int64_t) row_count;
|
||||
|
||||
rc.Elapse("totally cost");
|
||||
rc.ElapseFromBegin("totally cost");
|
||||
|
||||
} catch (std::exception& ex) {
|
||||
SERVER_LOG_ERROR << "GetTableRowCountTask encounter exception: " << ex.what();
|
||||
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
|
||||
}
|
||||
|
||||
|
@ -75,6 +75,21 @@ protected:
|
||||
ServerError OnExecute() override;
|
||||
|
||||
|
||||
private:
|
||||
std::string table_name_;
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
class BuildIndexTask : public BaseTask {
|
||||
public:
|
||||
static BaseTaskPtr Create(const std::string& table_name);
|
||||
|
||||
protected:
|
||||
BuildIndexTask(const std::string& table_name);
|
||||
|
||||
ServerError OnExecute() override;
|
||||
|
||||
|
||||
private:
|
||||
std::string table_name_;
|
||||
};
|
||||
@ -114,7 +129,28 @@ private:
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
class SearchVectorTask : public BaseTask {
|
||||
class SearchVectorTaskBase : public BaseTask {
|
||||
protected:
|
||||
SearchVectorTaskBase(const std::string& table_name,
|
||||
const std::vector<std::string>& file_id_array,
|
||||
const std::vector<::milvus::thrift::RowRecord> & query_record_array,
|
||||
const std::vector<::milvus::thrift::Range> & query_range_array,
|
||||
const int64_t top_k);
|
||||
|
||||
ServerError OnExecute() override;
|
||||
|
||||
virtual ServerError ConstructResult(engine::QueryResults& results) = 0;
|
||||
|
||||
protected:
|
||||
std::string table_name_;
|
||||
std::vector<std::string> file_id_array_;
|
||||
int64_t top_k_;
|
||||
const std::vector<::milvus::thrift::RowRecord>& record_array_;
|
||||
const std::vector<::milvus::thrift::Range>& range_array_;
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
class SearchVectorTask1 : public SearchVectorTaskBase {
|
||||
public:
|
||||
static BaseTaskPtr Create(const std::string& table_name,
|
||||
const std::vector<std::string>& file_id_array,
|
||||
@ -124,24 +160,43 @@ public:
|
||||
std::vector<::milvus::thrift::TopKQueryResult>& result_array);
|
||||
|
||||
protected:
|
||||
SearchVectorTask(const std::string& table_name,
|
||||
const std::vector<std::string>& file_id_array,
|
||||
const std::vector<::milvus::thrift::RowRecord> & query_record_array,
|
||||
const std::vector<::milvus::thrift::Range> & query_range_array,
|
||||
const int64_t top_k,
|
||||
SearchVectorTask1(const std::string& table_name,
|
||||
const std::vector<std::string>& file_id_array,
|
||||
const std::vector<::milvus::thrift::RowRecord> & query_record_array,
|
||||
const std::vector<::milvus::thrift::Range> & query_range_array,
|
||||
const int64_t top_k,
|
||||
std::vector<::milvus::thrift::TopKQueryResult>& result_array);
|
||||
|
||||
ServerError OnExecute() override;
|
||||
ServerError ConstructResult(engine::QueryResults& results) override;
|
||||
|
||||
private:
|
||||
std::string table_name_;
|
||||
std::vector<std::string> file_id_array_;
|
||||
int64_t top_k_;
|
||||
const std::vector<::milvus::thrift::RowRecord>& record_array_;
|
||||
const std::vector<::milvus::thrift::Range>& range_array_;
|
||||
std::vector<::milvus::thrift::TopKQueryResult>& result_array_;
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
class SearchVectorTask2 : public SearchVectorTaskBase {
|
||||
public:
|
||||
static BaseTaskPtr Create(const std::string& table_name,
|
||||
const std::vector<std::string>& file_id_array,
|
||||
const std::vector<::milvus::thrift::RowRecord> & query_record_array,
|
||||
const std::vector<::milvus::thrift::Range> & query_range_array,
|
||||
const int64_t top_k,
|
||||
std::vector<::milvus::thrift::TopKQueryBinResult>& result_array);
|
||||
|
||||
protected:
|
||||
SearchVectorTask2(const std::string& table_name,
|
||||
const std::vector<std::string>& file_id_array,
|
||||
const std::vector<::milvus::thrift::RowRecord> & query_record_array,
|
||||
const std::vector<::milvus::thrift::Range> & query_range_array,
|
||||
const int64_t top_k,
|
||||
std::vector<::milvus::thrift::TopKQueryBinResult>& result_array);
|
||||
|
||||
ServerError ConstructResult(engine::QueryResults& results) override;
|
||||
|
||||
private:
|
||||
std::vector<::milvus::thrift::TopKQueryBinResult>& result_array_;
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
class GetTableRowCountTask : public BaseTask {
|
||||
public:
|
||||
@ -174,4 +229,4 @@ private:
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -226,6 +226,10 @@ Server::Stop() {
|
||||
ServerError
|
||||
Server::LoadConfig() {
|
||||
ServerConfig::GetInstance().LoadConfigFile(config_filename_);
|
||||
ServerError err = ServerConfig::GetInstance().ValidateConfig();
|
||||
if(err != SERVER_SUCCESS){
|
||||
exit(0);
|
||||
}
|
||||
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
|
@ -12,11 +12,16 @@
|
||||
#include <iostream>
|
||||
|
||||
#include "config/IConfigMgr.h"
|
||||
#include "utils/CommonUtil.h"
|
||||
#include "utils/ValidationUtil.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
|
||||
constexpr uint64_t MB = 1024*1024;
|
||||
constexpr uint64_t GB = MB*1024;
|
||||
|
||||
ServerConfig&
|
||||
ServerConfig::GetInstance() {
|
||||
static ServerConfig config;
|
||||
@ -53,6 +58,65 @@ ServerConfig::LoadConfigFile(const std::string& config_filename) {
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
|
||||
ServerError ServerConfig::ValidateConfig() const {
|
||||
//server config validation
|
||||
ConfigNode server_config = GetConfig(CONFIG_SERVER);
|
||||
uint32_t gpu_index = (uint32_t)server_config.GetInt32Value(CONFIG_GPU_INDEX, 0);
|
||||
if(ValidationUtil::ValidateGpuIndex(gpu_index) != SERVER_SUCCESS) {
|
||||
std::cout << "Error: invalid gpu_index " << std::to_string(gpu_index) << std::endl;
|
||||
return SERVER_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
//db config validation
|
||||
unsigned long total_mem = 0, free_mem = 0;
|
||||
CommonUtil::GetSystemMemInfo(total_mem, free_mem);
|
||||
|
||||
ConfigNode db_config = GetConfig(CONFIG_DB);
|
||||
uint64_t insert_buffer_size = (uint64_t)db_config.GetInt32Value(CONFIG_DB_INSERT_BUFFER_SIZE, 4);
|
||||
insert_buffer_size *= GB;
|
||||
if(insert_buffer_size >= total_mem) {
|
||||
std::cout << "Error: insert_buffer_size execeed system memory" << std::endl;
|
||||
return SERVER_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
uint64_t index_building_threshold = (uint64_t)db_config.GetInt32Value(CONFIG_DB_INDEX_TRIGGER_SIZE, 1024);
|
||||
index_building_threshold *= MB;
|
||||
|
||||
size_t gpu_mem = 0;
|
||||
ValidationUtil::GetGpuMemory(gpu_index, gpu_mem);
|
||||
if(index_building_threshold >= gpu_mem) {
|
||||
std::cout << "Error: index_building_threshold execeed gpu memory" << std::endl;
|
||||
return SERVER_INVALID_ARGUMENT;
|
||||
} else if(index_building_threshold >= gpu_mem/3) {
|
||||
std::cout << "Warnning: index_building_threshold is greater than 1/3 of gpu memory, "
|
||||
<< "some index type(such as IVFLAT) may cause cuda::bad_alloc() error" << std::endl;
|
||||
}
|
||||
|
||||
//cache config validation
|
||||
ConfigNode cache_config = GetConfig(CONFIG_CACHE);
|
||||
uint64_t cache_cap = (uint64_t)cache_config.GetInt64Value(CONFIG_CPU_CACHE_CAPACITY, 16);
|
||||
cache_cap *= GB;
|
||||
if(cache_cap >= total_mem) {
|
||||
std::cout << "Error: cpu_cache_capacity execeed system memory" << std::endl;
|
||||
return SERVER_INVALID_ARGUMENT;
|
||||
} if(cache_cap > (double)total_mem*0.9) {
|
||||
std::cout << "Warnning: cpu_cache_capacity value is too aggressive" << std::endl;
|
||||
}
|
||||
|
||||
if(insert_buffer_size + cache_cap >= total_mem) {
|
||||
std::cout << "Error: sum of cpu_cache_capacity and insert_buffer_size execeed system memory" << std::endl;
|
||||
return SERVER_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
double free_percent = cache_config.GetDoubleValue(server::CACHE_FREE_PERCENT, 0.85);
|
||||
if(free_percent < std::numeric_limits<double>::epsilon() || free_percent > 1.0) {
|
||||
std::cout << "Error: invalid cache_free_percent " << std::to_string(free_percent) << std::endl;
|
||||
return SERVER_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
|
||||
void
|
||||
ServerConfig::PrintAll() const {
|
||||
if(const IConfigMgr* mgr = IConfigMgr::GetInstance()) {
|
||||
|
@ -19,20 +19,25 @@ static const std::string CONFIG_SERVER_ADDRESS = "address";
|
||||
static const std::string CONFIG_SERVER_PORT = "port";
|
||||
static const std::string CONFIG_SERVER_PROTOCOL = "transfer_protocol";
|
||||
static const std::string CONFIG_CLUSTER_MODE = "mode";
|
||||
static const std::string CONFIG_GPU_INDEX = "gpu_index";
|
||||
|
||||
static const std::string CONFIG_DB = "db_config";
|
||||
static const std::string CONFIG_DB_URL = "db_backend_url";
|
||||
static const std::string CONFIG_DB_PATH = "db_path";
|
||||
static const std::string CONFIG_DB_SLAVE_PATH = "db_slave_path";
|
||||
static const std::string CONFIG_DB_INDEX_TRIGGER_SIZE = "index_building_threshold";
|
||||
static const std::string CONFIG_DB_ARCHIVE_DISK = "archive_disk_threshold";
|
||||
static const std::string CONFIG_DB_ARCHIVE_DAYS = "archive_days_threshold";
|
||||
static const std::string CONFIG_MAXMIMUM_MEMORY = "maximum_memory";
|
||||
static const std::string CONFIG_DB_INSERT_BUFFER_SIZE = "insert_buffer_size";
|
||||
static const std::string CONFIG_DB_PARALLEL_REDUCE = "parallel_reduce";
|
||||
|
||||
static const std::string CONFIG_LOG = "log_config";
|
||||
|
||||
static const std::string CONFIG_CACHE = "cache_config";
|
||||
static const std::string CONFIG_CPU_CACHE_CAPACITY = "cpu_cache_capacity";
|
||||
static const std::string CONFIG_GPU_CACHE_CAPACITY = "gpu_cache_capacity";
|
||||
static const std::string CACHE_FREE_PERCENT = "cache_free_percent";
|
||||
static const std::string CONFIG_INSERT_CACHE_IMMEDIATELY = "insert_cache_immediately";
|
||||
|
||||
static const std::string CONFIG_LICENSE = "license_config";
|
||||
static const std::string CONFIG_LICENSE_PATH = "license_path";
|
||||
@ -45,12 +50,16 @@ static const std::string CONFIG_METRIC_PROMETHEUS_PORT = "port";
|
||||
|
||||
static const std::string CONFIG_ENGINE = "engine_config";
|
||||
static const std::string CONFIG_NPROBE = "nprobe";
|
||||
static const std::string CONFIG_NLIST = "nlist";
|
||||
static const std::string CONFIG_DCBT = "use_blas_threshold";
|
||||
static const std::string CONFIG_METRICTYPE = "metric_type";
|
||||
|
||||
class ServerConfig {
|
||||
public:
|
||||
static ServerConfig &GetInstance();
|
||||
|
||||
ServerError LoadConfigFile(const std::string& config_filename);
|
||||
ServerError ValidateConfig() const;
|
||||
void PrintAll() const;
|
||||
|
||||
ConfigNode GetConfig(const std::string& name) const;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -58,6 +58,18 @@ class MilvusServiceIf {
|
||||
*/
|
||||
virtual void DeleteTable(const std::string& table_name) = 0;
|
||||
|
||||
/**
|
||||
* @brief Build index by table method
|
||||
*
|
||||
* This method is used to build index by table in sync mode.
|
||||
*
|
||||
* @param table_name, table is going to be built index.
|
||||
*
|
||||
*
|
||||
* @param table_name
|
||||
*/
|
||||
virtual void BuildIndex(const std::string& table_name) = 0;
|
||||
|
||||
/**
|
||||
* @brief Add vector array to table
|
||||
*
|
||||
@ -92,6 +104,25 @@ class MilvusServiceIf {
|
||||
*/
|
||||
virtual void SearchVector(std::vector<TopKQueryResult> & _return, const std::string& table_name, const std::vector<RowRecord> & query_record_array, const std::vector<Range> & query_range_array, const int64_t topk) = 0;
|
||||
|
||||
/**
|
||||
* @brief Query vector
|
||||
*
|
||||
* This method is used to query vector in table.
|
||||
*
|
||||
* @param table_name, table_name is queried.
|
||||
* @param query_record_array, all vector are going to be queried.
|
||||
* @param query_range_array, optional ranges for conditional search. If not specified, search whole table
|
||||
* @param topk, how many similarity vectors will be searched.
|
||||
*
|
||||
* @return query binary result array.
|
||||
*
|
||||
* @param table_name
|
||||
* @param query_record_array
|
||||
* @param query_range_array
|
||||
* @param topk
|
||||
*/
|
||||
virtual void SearchVector2(std::vector<TopKQueryBinResult> & _return, const std::string& table_name, const std::vector<RowRecord> & query_record_array, const std::vector<Range> & query_range_array, const int64_t topk) = 0;
|
||||
|
||||
/**
|
||||
* @brief Internal use query interface
|
||||
*
|
||||
@ -197,12 +228,18 @@ class MilvusServiceNull : virtual public MilvusServiceIf {
|
||||
void DeleteTable(const std::string& /* table_name */) {
|
||||
return;
|
||||
}
|
||||
void BuildIndex(const std::string& /* table_name */) {
|
||||
return;
|
||||
}
|
||||
void AddVector(std::vector<int64_t> & /* _return */, const std::string& /* table_name */, const std::vector<RowRecord> & /* record_array */) {
|
||||
return;
|
||||
}
|
||||
void SearchVector(std::vector<TopKQueryResult> & /* _return */, const std::string& /* table_name */, const std::vector<RowRecord> & /* query_record_array */, const std::vector<Range> & /* query_range_array */, const int64_t /* topk */) {
|
||||
return;
|
||||
}
|
||||
void SearchVector2(std::vector<TopKQueryBinResult> & /* _return */, const std::string& /* table_name */, const std::vector<RowRecord> & /* query_record_array */, const std::vector<Range> & /* query_range_array */, const int64_t /* topk */) {
|
||||
return;
|
||||
}
|
||||
void SearchVectorInFiles(std::vector<TopKQueryResult> & /* _return */, const std::string& /* table_name */, const std::vector<std::string> & /* file_id_array */, const std::vector<RowRecord> & /* query_record_array */, const std::vector<Range> & /* query_range_array */, const int64_t /* topk */) {
|
||||
return;
|
||||
}
|
||||
@ -541,6 +578,110 @@ class MilvusService_DeleteTable_presult {
|
||||
|
||||
};
|
||||
|
||||
typedef struct _MilvusService_BuildIndex_args__isset {
|
||||
_MilvusService_BuildIndex_args__isset() : table_name(false) {}
|
||||
bool table_name :1;
|
||||
} _MilvusService_BuildIndex_args__isset;
|
||||
|
||||
class MilvusService_BuildIndex_args {
|
||||
public:
|
||||
|
||||
MilvusService_BuildIndex_args(const MilvusService_BuildIndex_args&);
|
||||
MilvusService_BuildIndex_args& operator=(const MilvusService_BuildIndex_args&);
|
||||
MilvusService_BuildIndex_args() : table_name() {
|
||||
}
|
||||
|
||||
virtual ~MilvusService_BuildIndex_args() throw();
|
||||
std::string table_name;
|
||||
|
||||
_MilvusService_BuildIndex_args__isset __isset;
|
||||
|
||||
void __set_table_name(const std::string& val);
|
||||
|
||||
bool operator == (const MilvusService_BuildIndex_args & rhs) const
|
||||
{
|
||||
if (!(table_name == rhs.table_name))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
bool operator != (const MilvusService_BuildIndex_args &rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
bool operator < (const MilvusService_BuildIndex_args & ) const;
|
||||
|
||||
uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
|
||||
uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
|
||||
|
||||
};
|
||||
|
||||
|
||||
class MilvusService_BuildIndex_pargs {
|
||||
public:
|
||||
|
||||
|
||||
virtual ~MilvusService_BuildIndex_pargs() throw();
|
||||
const std::string* table_name;
|
||||
|
||||
uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
|
||||
|
||||
};
|
||||
|
||||
typedef struct _MilvusService_BuildIndex_result__isset {
|
||||
_MilvusService_BuildIndex_result__isset() : e(false) {}
|
||||
bool e :1;
|
||||
} _MilvusService_BuildIndex_result__isset;
|
||||
|
||||
class MilvusService_BuildIndex_result {
|
||||
public:
|
||||
|
||||
MilvusService_BuildIndex_result(const MilvusService_BuildIndex_result&);
|
||||
MilvusService_BuildIndex_result& operator=(const MilvusService_BuildIndex_result&);
|
||||
MilvusService_BuildIndex_result() {
|
||||
}
|
||||
|
||||
virtual ~MilvusService_BuildIndex_result() throw();
|
||||
Exception e;
|
||||
|
||||
_MilvusService_BuildIndex_result__isset __isset;
|
||||
|
||||
void __set_e(const Exception& val);
|
||||
|
||||
bool operator == (const MilvusService_BuildIndex_result & rhs) const
|
||||
{
|
||||
if (!(e == rhs.e))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
bool operator != (const MilvusService_BuildIndex_result &rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
bool operator < (const MilvusService_BuildIndex_result & ) const;
|
||||
|
||||
uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
|
||||
uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
|
||||
|
||||
};
|
||||
|
||||
typedef struct _MilvusService_BuildIndex_presult__isset {
|
||||
_MilvusService_BuildIndex_presult__isset() : e(false) {}
|
||||
bool e :1;
|
||||
} _MilvusService_BuildIndex_presult__isset;
|
||||
|
||||
class MilvusService_BuildIndex_presult {
|
||||
public:
|
||||
|
||||
|
||||
virtual ~MilvusService_BuildIndex_presult() throw();
|
||||
Exception e;
|
||||
|
||||
_MilvusService_BuildIndex_presult__isset __isset;
|
||||
|
||||
uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
|
||||
|
||||
};
|
||||
|
||||
typedef struct _MilvusService_AddVector_args__isset {
|
||||
_MilvusService_AddVector_args__isset() : table_name(false), record_array(false) {}
|
||||
bool table_name :1;
|
||||
@ -793,6 +934,139 @@ class MilvusService_SearchVector_presult {
|
||||
|
||||
};
|
||||
|
||||
typedef struct _MilvusService_SearchVector2_args__isset {
|
||||
_MilvusService_SearchVector2_args__isset() : table_name(false), query_record_array(false), query_range_array(false), topk(false) {}
|
||||
bool table_name :1;
|
||||
bool query_record_array :1;
|
||||
bool query_range_array :1;
|
||||
bool topk :1;
|
||||
} _MilvusService_SearchVector2_args__isset;
|
||||
|
||||
class MilvusService_SearchVector2_args {
|
||||
public:
|
||||
|
||||
MilvusService_SearchVector2_args(const MilvusService_SearchVector2_args&);
|
||||
MilvusService_SearchVector2_args& operator=(const MilvusService_SearchVector2_args&);
|
||||
MilvusService_SearchVector2_args() : table_name(), topk(0) {
|
||||
}
|
||||
|
||||
virtual ~MilvusService_SearchVector2_args() throw();
|
||||
std::string table_name;
|
||||
std::vector<RowRecord> query_record_array;
|
||||
std::vector<Range> query_range_array;
|
||||
int64_t topk;
|
||||
|
||||
_MilvusService_SearchVector2_args__isset __isset;
|
||||
|
||||
void __set_table_name(const std::string& val);
|
||||
|
||||
void __set_query_record_array(const std::vector<RowRecord> & val);
|
||||
|
||||
void __set_query_range_array(const std::vector<Range> & val);
|
||||
|
||||
void __set_topk(const int64_t val);
|
||||
|
||||
bool operator == (const MilvusService_SearchVector2_args & rhs) const
|
||||
{
|
||||
if (!(table_name == rhs.table_name))
|
||||
return false;
|
||||
if (!(query_record_array == rhs.query_record_array))
|
||||
return false;
|
||||
if (!(query_range_array == rhs.query_range_array))
|
||||
return false;
|
||||
if (!(topk == rhs.topk))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
bool operator != (const MilvusService_SearchVector2_args &rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
bool operator < (const MilvusService_SearchVector2_args & ) const;
|
||||
|
||||
uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
|
||||
uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
|
||||
|
||||
};
|
||||
|
||||
|
||||
class MilvusService_SearchVector2_pargs {
|
||||
public:
|
||||
|
||||
|
||||
virtual ~MilvusService_SearchVector2_pargs() throw();
|
||||
const std::string* table_name;
|
||||
const std::vector<RowRecord> * query_record_array;
|
||||
const std::vector<Range> * query_range_array;
|
||||
const int64_t* topk;
|
||||
|
||||
uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
|
||||
|
||||
};
|
||||
|
||||
typedef struct _MilvusService_SearchVector2_result__isset {
|
||||
_MilvusService_SearchVector2_result__isset() : success(false), e(false) {}
|
||||
bool success :1;
|
||||
bool e :1;
|
||||
} _MilvusService_SearchVector2_result__isset;
|
||||
|
||||
class MilvusService_SearchVector2_result {
|
||||
public:
|
||||
|
||||
MilvusService_SearchVector2_result(const MilvusService_SearchVector2_result&);
|
||||
MilvusService_SearchVector2_result& operator=(const MilvusService_SearchVector2_result&);
|
||||
MilvusService_SearchVector2_result() {
|
||||
}
|
||||
|
||||
virtual ~MilvusService_SearchVector2_result() throw();
|
||||
std::vector<TopKQueryBinResult> success;
|
||||
Exception e;
|
||||
|
||||
_MilvusService_SearchVector2_result__isset __isset;
|
||||
|
||||
void __set_success(const std::vector<TopKQueryBinResult> & val);
|
||||
|
||||
void __set_e(const Exception& val);
|
||||
|
||||
bool operator == (const MilvusService_SearchVector2_result & rhs) const
|
||||
{
|
||||
if (!(success == rhs.success))
|
||||
return false;
|
||||
if (!(e == rhs.e))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
bool operator != (const MilvusService_SearchVector2_result &rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
bool operator < (const MilvusService_SearchVector2_result & ) const;
|
||||
|
||||
uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
|
||||
uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
|
||||
|
||||
};
|
||||
|
||||
typedef struct _MilvusService_SearchVector2_presult__isset {
|
||||
_MilvusService_SearchVector2_presult__isset() : success(false), e(false) {}
|
||||
bool success :1;
|
||||
bool e :1;
|
||||
} _MilvusService_SearchVector2_presult__isset;
|
||||
|
||||
class MilvusService_SearchVector2_presult {
|
||||
public:
|
||||
|
||||
|
||||
virtual ~MilvusService_SearchVector2_presult() throw();
|
||||
std::vector<TopKQueryBinResult> * success;
|
||||
Exception e;
|
||||
|
||||
_MilvusService_SearchVector2_presult__isset __isset;
|
||||
|
||||
uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
|
||||
|
||||
};
|
||||
|
||||
typedef struct _MilvusService_SearchVectorInFiles_args__isset {
|
||||
_MilvusService_SearchVectorInFiles_args__isset() : table_name(false), file_id_array(false), query_record_array(false), query_range_array(false), topk(false) {}
|
||||
bool table_name :1;
|
||||
@ -1403,12 +1677,18 @@ class MilvusServiceClient : virtual public MilvusServiceIf {
|
||||
void DeleteTable(const std::string& table_name);
|
||||
void send_DeleteTable(const std::string& table_name);
|
||||
void recv_DeleteTable();
|
||||
void BuildIndex(const std::string& table_name);
|
||||
void send_BuildIndex(const std::string& table_name);
|
||||
void recv_BuildIndex();
|
||||
void AddVector(std::vector<int64_t> & _return, const std::string& table_name, const std::vector<RowRecord> & record_array);
|
||||
void send_AddVector(const std::string& table_name, const std::vector<RowRecord> & record_array);
|
||||
void recv_AddVector(std::vector<int64_t> & _return);
|
||||
void SearchVector(std::vector<TopKQueryResult> & _return, const std::string& table_name, const std::vector<RowRecord> & query_record_array, const std::vector<Range> & query_range_array, const int64_t topk);
|
||||
void send_SearchVector(const std::string& table_name, const std::vector<RowRecord> & query_record_array, const std::vector<Range> & query_range_array, const int64_t topk);
|
||||
void recv_SearchVector(std::vector<TopKQueryResult> & _return);
|
||||
void SearchVector2(std::vector<TopKQueryBinResult> & _return, const std::string& table_name, const std::vector<RowRecord> & query_record_array, const std::vector<Range> & query_range_array, const int64_t topk);
|
||||
void send_SearchVector2(const std::string& table_name, const std::vector<RowRecord> & query_record_array, const std::vector<Range> & query_range_array, const int64_t topk);
|
||||
void recv_SearchVector2(std::vector<TopKQueryBinResult> & _return);
|
||||
void SearchVectorInFiles(std::vector<TopKQueryResult> & _return, const std::string& table_name, const std::vector<std::string> & file_id_array, const std::vector<RowRecord> & query_record_array, const std::vector<Range> & query_range_array, const int64_t topk);
|
||||
void send_SearchVectorInFiles(const std::string& table_name, const std::vector<std::string> & file_id_array, const std::vector<RowRecord> & query_record_array, const std::vector<Range> & query_range_array, const int64_t topk);
|
||||
void recv_SearchVectorInFiles(std::vector<TopKQueryResult> & _return);
|
||||
@ -1442,8 +1722,10 @@ class MilvusServiceProcessor : public ::apache::thrift::TDispatchProcessor {
|
||||
void process_CreateTable(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
|
||||
void process_HasTable(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
|
||||
void process_DeleteTable(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
|
||||
void process_BuildIndex(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
|
||||
void process_AddVector(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
|
||||
void process_SearchVector(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
|
||||
void process_SearchVector2(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
|
||||
void process_SearchVectorInFiles(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
|
||||
void process_DescribeTable(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
|
||||
void process_GetTableRowCount(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
|
||||
@ -1455,8 +1737,10 @@ class MilvusServiceProcessor : public ::apache::thrift::TDispatchProcessor {
|
||||
processMap_["CreateTable"] = &MilvusServiceProcessor::process_CreateTable;
|
||||
processMap_["HasTable"] = &MilvusServiceProcessor::process_HasTable;
|
||||
processMap_["DeleteTable"] = &MilvusServiceProcessor::process_DeleteTable;
|
||||
processMap_["BuildIndex"] = &MilvusServiceProcessor::process_BuildIndex;
|
||||
processMap_["AddVector"] = &MilvusServiceProcessor::process_AddVector;
|
||||
processMap_["SearchVector"] = &MilvusServiceProcessor::process_SearchVector;
|
||||
processMap_["SearchVector2"] = &MilvusServiceProcessor::process_SearchVector2;
|
||||
processMap_["SearchVectorInFiles"] = &MilvusServiceProcessor::process_SearchVectorInFiles;
|
||||
processMap_["DescribeTable"] = &MilvusServiceProcessor::process_DescribeTable;
|
||||
processMap_["GetTableRowCount"] = &MilvusServiceProcessor::process_GetTableRowCount;
|
||||
@ -1517,6 +1801,15 @@ class MilvusServiceMultiface : virtual public MilvusServiceIf {
|
||||
ifaces_[i]->DeleteTable(table_name);
|
||||
}
|
||||
|
||||
void BuildIndex(const std::string& table_name) {
|
||||
size_t sz = ifaces_.size();
|
||||
size_t i = 0;
|
||||
for (; i < (sz - 1); ++i) {
|
||||
ifaces_[i]->BuildIndex(table_name);
|
||||
}
|
||||
ifaces_[i]->BuildIndex(table_name);
|
||||
}
|
||||
|
||||
void AddVector(std::vector<int64_t> & _return, const std::string& table_name, const std::vector<RowRecord> & record_array) {
|
||||
size_t sz = ifaces_.size();
|
||||
size_t i = 0;
|
||||
@ -1537,6 +1830,16 @@ class MilvusServiceMultiface : virtual public MilvusServiceIf {
|
||||
return;
|
||||
}
|
||||
|
||||
void SearchVector2(std::vector<TopKQueryBinResult> & _return, const std::string& table_name, const std::vector<RowRecord> & query_record_array, const std::vector<Range> & query_range_array, const int64_t topk) {
|
||||
size_t sz = ifaces_.size();
|
||||
size_t i = 0;
|
||||
for (; i < (sz - 1); ++i) {
|
||||
ifaces_[i]->SearchVector2(_return, table_name, query_record_array, query_range_array, topk);
|
||||
}
|
||||
ifaces_[i]->SearchVector2(_return, table_name, query_record_array, query_range_array, topk);
|
||||
return;
|
||||
}
|
||||
|
||||
void SearchVectorInFiles(std::vector<TopKQueryResult> & _return, const std::string& table_name, const std::vector<std::string> & file_id_array, const std::vector<RowRecord> & query_record_array, const std::vector<Range> & query_range_array, const int64_t topk) {
|
||||
size_t sz = ifaces_.size();
|
||||
size_t i = 0;
|
||||
@ -1625,12 +1928,18 @@ class MilvusServiceConcurrentClient : virtual public MilvusServiceIf {
|
||||
void DeleteTable(const std::string& table_name);
|
||||
int32_t send_DeleteTable(const std::string& table_name);
|
||||
void recv_DeleteTable(const int32_t seqid);
|
||||
void BuildIndex(const std::string& table_name);
|
||||
int32_t send_BuildIndex(const std::string& table_name);
|
||||
void recv_BuildIndex(const int32_t seqid);
|
||||
void AddVector(std::vector<int64_t> & _return, const std::string& table_name, const std::vector<RowRecord> & record_array);
|
||||
int32_t send_AddVector(const std::string& table_name, const std::vector<RowRecord> & record_array);
|
||||
void recv_AddVector(std::vector<int64_t> & _return, const int32_t seqid);
|
||||
void SearchVector(std::vector<TopKQueryResult> & _return, const std::string& table_name, const std::vector<RowRecord> & query_record_array, const std::vector<Range> & query_range_array, const int64_t topk);
|
||||
int32_t send_SearchVector(const std::string& table_name, const std::vector<RowRecord> & query_record_array, const std::vector<Range> & query_range_array, const int64_t topk);
|
||||
void recv_SearchVector(std::vector<TopKQueryResult> & _return, const int32_t seqid);
|
||||
void SearchVector2(std::vector<TopKQueryBinResult> & _return, const std::string& table_name, const std::vector<RowRecord> & query_record_array, const std::vector<Range> & query_range_array, const int64_t topk);
|
||||
int32_t send_SearchVector2(const std::string& table_name, const std::vector<RowRecord> & query_record_array, const std::vector<Range> & query_range_array, const int64_t topk);
|
||||
void recv_SearchVector2(std::vector<TopKQueryBinResult> & _return, const int32_t seqid);
|
||||
void SearchVectorInFiles(std::vector<TopKQueryResult> & _return, const std::string& table_name, const std::vector<std::string> & file_id_array, const std::vector<RowRecord> & query_record_array, const std::vector<Range> & query_range_array, const int64_t topk);
|
||||
int32_t send_SearchVectorInFiles(const std::string& table_name, const std::vector<std::string> & file_id_array, const std::vector<RowRecord> & query_record_array, const std::vector<Range> & query_range_array, const int64_t topk);
|
||||
void recv_SearchVectorInFiles(std::vector<TopKQueryResult> & _return, const int32_t seqid);
|
||||
|
@ -65,6 +65,21 @@ class MilvusServiceHandler : virtual public MilvusServiceIf {
|
||||
printf("DeleteTable\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Build index by table method
|
||||
*
|
||||
* This method is used to build index by table in sync mode.
|
||||
*
|
||||
* @param table_name, table is going to be built index.
|
||||
*
|
||||
*
|
||||
* @param table_name
|
||||
*/
|
||||
void BuildIndex(const std::string& table_name) {
|
||||
// Your implementation goes here
|
||||
printf("BuildIndex\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Add vector array to table
|
||||
*
|
||||
@ -105,6 +120,28 @@ class MilvusServiceHandler : virtual public MilvusServiceIf {
|
||||
printf("SearchVector\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Query vector
|
||||
*
|
||||
* This method is used to query vector in table.
|
||||
*
|
||||
* @param table_name, table_name is queried.
|
||||
* @param query_record_array, all vector are going to be queried.
|
||||
* @param query_range_array, optional ranges for conditional search. If not specified, search whole table
|
||||
* @param topk, how many similarity vectors will be searched.
|
||||
*
|
||||
* @return query binary result array.
|
||||
*
|
||||
* @param table_name
|
||||
* @param query_record_array
|
||||
* @param query_range_array
|
||||
* @param topk
|
||||
*/
|
||||
void SearchVector2(std::vector<TopKQueryBinResult> & _return, const std::string& table_name, const std::vector<RowRecord> & query_record_array, const std::vector<Range> & query_range_array, const int64_t topk) {
|
||||
// Your implementation goes here
|
||||
printf("SearchVector2\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Internal use query interface
|
||||
*
|
||||
|
@ -34,7 +34,8 @@ int _kErrorCodeValues[] = {
|
||||
ErrorCode::CANNOT_CREATE_FOLDER,
|
||||
ErrorCode::CANNOT_CREATE_FILE,
|
||||
ErrorCode::CANNOT_DELETE_FOLDER,
|
||||
ErrorCode::CANNOT_DELETE_FILE
|
||||
ErrorCode::CANNOT_DELETE_FILE,
|
||||
ErrorCode::BUILD_INDEX_ERROR
|
||||
};
|
||||
const char* _kErrorCodeNames[] = {
|
||||
"SUCCESS",
|
||||
@ -57,9 +58,10 @@ const char* _kErrorCodeNames[] = {
|
||||
"CANNOT_CREATE_FOLDER",
|
||||
"CANNOT_CREATE_FILE",
|
||||
"CANNOT_DELETE_FOLDER",
|
||||
"CANNOT_DELETE_FILE"
|
||||
"CANNOT_DELETE_FILE",
|
||||
"BUILD_INDEX_ERROR"
|
||||
};
|
||||
const std::map<int, const char*> _ErrorCode_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(21, _kErrorCodeValues, _kErrorCodeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
|
||||
const std::map<int, const char*> _ErrorCode_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(22, _kErrorCodeValues, _kErrorCodeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
|
||||
|
||||
std::ostream& operator<<(std::ostream& out, const ErrorCode::type& val) {
|
||||
std::map<int, const char*>::const_iterator it = _ErrorCode_VALUES_TO_NAMES.find(val);
|
||||
@ -779,4 +781,119 @@ void TopKQueryResult::printTo(std::ostream& out) const {
|
||||
out << ")";
|
||||
}
|
||||
|
||||
|
||||
TopKQueryBinResult::~TopKQueryBinResult() throw() {
|
||||
}
|
||||
|
||||
|
||||
void TopKQueryBinResult::__set_id_array(const std::string& val) {
|
||||
this->id_array = val;
|
||||
}
|
||||
|
||||
void TopKQueryBinResult::__set_distance_array(const std::string& val) {
|
||||
this->distance_array = val;
|
||||
}
|
||||
std::ostream& operator<<(std::ostream& out, const TopKQueryBinResult& obj)
|
||||
{
|
||||
obj.printTo(out);
|
||||
return out;
|
||||
}
|
||||
|
||||
|
||||
uint32_t TopKQueryBinResult::read(::apache::thrift::protocol::TProtocol* iprot) {
|
||||
|
||||
::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
|
||||
uint32_t xfer = 0;
|
||||
std::string fname;
|
||||
::apache::thrift::protocol::TType ftype;
|
||||
int16_t fid;
|
||||
|
||||
xfer += iprot->readStructBegin(fname);
|
||||
|
||||
using ::apache::thrift::protocol::TProtocolException;
|
||||
|
||||
bool isset_id_array = false;
|
||||
bool isset_distance_array = false;
|
||||
|
||||
while (true)
|
||||
{
|
||||
xfer += iprot->readFieldBegin(fname, ftype, fid);
|
||||
if (ftype == ::apache::thrift::protocol::T_STOP) {
|
||||
break;
|
||||
}
|
||||
switch (fid)
|
||||
{
|
||||
case 1:
|
||||
if (ftype == ::apache::thrift::protocol::T_STRING) {
|
||||
xfer += iprot->readBinary(this->id_array);
|
||||
isset_id_array = true;
|
||||
} else {
|
||||
xfer += iprot->skip(ftype);
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
if (ftype == ::apache::thrift::protocol::T_STRING) {
|
||||
xfer += iprot->readBinary(this->distance_array);
|
||||
isset_distance_array = true;
|
||||
} else {
|
||||
xfer += iprot->skip(ftype);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
xfer += iprot->skip(ftype);
|
||||
break;
|
||||
}
|
||||
xfer += iprot->readFieldEnd();
|
||||
}
|
||||
|
||||
xfer += iprot->readStructEnd();
|
||||
|
||||
if (!isset_id_array)
|
||||
throw TProtocolException(TProtocolException::INVALID_DATA);
|
||||
if (!isset_distance_array)
|
||||
throw TProtocolException(TProtocolException::INVALID_DATA);
|
||||
return xfer;
|
||||
}
|
||||
|
||||
uint32_t TopKQueryBinResult::write(::apache::thrift::protocol::TProtocol* oprot) const {
|
||||
uint32_t xfer = 0;
|
||||
::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
|
||||
xfer += oprot->writeStructBegin("TopKQueryBinResult");
|
||||
|
||||
xfer += oprot->writeFieldBegin("id_array", ::apache::thrift::protocol::T_STRING, 1);
|
||||
xfer += oprot->writeBinary(this->id_array);
|
||||
xfer += oprot->writeFieldEnd();
|
||||
|
||||
xfer += oprot->writeFieldBegin("distance_array", ::apache::thrift::protocol::T_STRING, 2);
|
||||
xfer += oprot->writeBinary(this->distance_array);
|
||||
xfer += oprot->writeFieldEnd();
|
||||
|
||||
xfer += oprot->writeFieldStop();
|
||||
xfer += oprot->writeStructEnd();
|
||||
return xfer;
|
||||
}
|
||||
|
||||
void swap(TopKQueryBinResult &a, TopKQueryBinResult &b) {
|
||||
using ::std::swap;
|
||||
swap(a.id_array, b.id_array);
|
||||
swap(a.distance_array, b.distance_array);
|
||||
}
|
||||
|
||||
TopKQueryBinResult::TopKQueryBinResult(const TopKQueryBinResult& other19) {
|
||||
id_array = other19.id_array;
|
||||
distance_array = other19.distance_array;
|
||||
}
|
||||
TopKQueryBinResult& TopKQueryBinResult::operator=(const TopKQueryBinResult& other20) {
|
||||
id_array = other20.id_array;
|
||||
distance_array = other20.distance_array;
|
||||
return *this;
|
||||
}
|
||||
void TopKQueryBinResult::printTo(std::ostream& out) const {
|
||||
using ::apache::thrift::to_string;
|
||||
out << "TopKQueryBinResult(";
|
||||
out << "id_array=" << to_string(id_array);
|
||||
out << ", " << "distance_array=" << to_string(distance_array);
|
||||
out << ")";
|
||||
}
|
||||
|
||||
}} // namespace
|
||||
|
@ -42,7 +42,8 @@ struct ErrorCode {
|
||||
CANNOT_CREATE_FOLDER = 17,
|
||||
CANNOT_CREATE_FILE = 18,
|
||||
CANNOT_DELETE_FOLDER = 19,
|
||||
CANNOT_DELETE_FILE = 20
|
||||
CANNOT_DELETE_FILE = 20,
|
||||
BUILD_INDEX_ERROR = 21
|
||||
};
|
||||
};
|
||||
|
||||
@ -62,6 +63,8 @@ class QueryResult;
|
||||
|
||||
class TopKQueryResult;
|
||||
|
||||
class TopKQueryBinResult;
|
||||
|
||||
typedef struct _Exception__isset {
|
||||
_Exception__isset() : code(false), reason(false) {}
|
||||
bool code :1;
|
||||
@ -345,6 +348,47 @@ void swap(TopKQueryResult &a, TopKQueryResult &b);
|
||||
|
||||
std::ostream& operator<<(std::ostream& out, const TopKQueryResult& obj);
|
||||
|
||||
|
||||
class TopKQueryBinResult : public virtual ::apache::thrift::TBase {
|
||||
public:
|
||||
|
||||
TopKQueryBinResult(const TopKQueryBinResult&);
|
||||
TopKQueryBinResult& operator=(const TopKQueryBinResult&);
|
||||
TopKQueryBinResult() : id_array(), distance_array() {
|
||||
}
|
||||
|
||||
virtual ~TopKQueryBinResult() throw();
|
||||
std::string id_array;
|
||||
std::string distance_array;
|
||||
|
||||
void __set_id_array(const std::string& val);
|
||||
|
||||
void __set_distance_array(const std::string& val);
|
||||
|
||||
bool operator == (const TopKQueryBinResult & rhs) const
|
||||
{
|
||||
if (!(id_array == rhs.id_array))
|
||||
return false;
|
||||
if (!(distance_array == rhs.distance_array))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
bool operator != (const TopKQueryBinResult &rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
bool operator < (const TopKQueryBinResult & ) const;
|
||||
|
||||
uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
|
||||
uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
|
||||
|
||||
virtual void printTo(std::ostream& out) const;
|
||||
};
|
||||
|
||||
void swap(TopKQueryBinResult &a, TopKQueryBinResult &b);
|
||||
|
||||
std::ostream& operator<<(std::ostream& out, const TopKQueryBinResult& obj);
|
||||
|
||||
}} // namespace
|
||||
|
||||
#endif
|
||||
|
@ -35,6 +35,7 @@ enum ErrorCode {
|
||||
CANNOT_CREATE_FILE,
|
||||
CANNOT_DELETE_FOLDER,
|
||||
CANNOT_DELETE_FILE,
|
||||
BUILD_INDEX_ERROR,
|
||||
}
|
||||
|
||||
exception Exception {
|
||||
@ -83,6 +84,14 @@ struct TopKQueryResult {
|
||||
1: list<QueryResult> query_result_arrays; ///< TopK query result
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief TopK query binary result
|
||||
*/
|
||||
struct TopKQueryBinResult {
|
||||
1: required binary id_array; ///< id array, interger array
|
||||
2: required binary distance_array; ///< distance array, double array
|
||||
}
|
||||
|
||||
service MilvusService {
|
||||
/**
|
||||
* @brief Create table method
|
||||
@ -115,6 +124,16 @@ service MilvusService {
|
||||
*/
|
||||
void DeleteTable(2: string table_name) throws(1: Exception e);
|
||||
|
||||
/**
|
||||
* @brief Build index by table method
|
||||
*
|
||||
* This method is used to build index by table in sync mode.
|
||||
*
|
||||
* @param table_name, table is going to be built index.
|
||||
*
|
||||
*/
|
||||
void BuildIndex(2: string table_name) throws(1: Exception e);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Add vector array to table
|
||||
@ -147,6 +166,23 @@ service MilvusService {
|
||||
4: list<Range> query_range_array,
|
||||
5: i64 topk) throws(1: Exception e);
|
||||
|
||||
/**
|
||||
* @brief Query vector
|
||||
*
|
||||
* This method is used to query vector in table.
|
||||
*
|
||||
* @param table_name, table_name is queried.
|
||||
* @param query_record_array, all vector are going to be queried.
|
||||
* @param query_range_array, optional ranges for conditional search. If not specified, search whole table
|
||||
* @param topk, how many similarity vectors will be searched.
|
||||
*
|
||||
* @return query binary result array.
|
||||
*/
|
||||
list<TopKQueryBinResult> SearchVector2(2: string table_name,
|
||||
3: list<RowRecord> query_record_array,
|
||||
4: list<Range> query_range_array,
|
||||
5: i64 topk) throws(1: Exception e);
|
||||
|
||||
/**
|
||||
* @brief Internal use query interface
|
||||
*
|
||||
@ -207,4 +243,4 @@ service MilvusService {
|
||||
* @return Server status.
|
||||
*/
|
||||
string Ping(2: string cmd) throws(1: Exception e);
|
||||
}
|
||||
}
|
||||
|
@ -32,27 +32,26 @@ namespace server {
|
||||
|
||||
namespace fs = boost::filesystem;
|
||||
|
||||
bool CommonUtil::GetSystemMemInfo(unsigned long &totalMem, unsigned long &freeMem) {
|
||||
bool CommonUtil::GetSystemMemInfo(unsigned long &total_mem, unsigned long &free_mem) {
|
||||
struct sysinfo info;
|
||||
int ret = sysinfo(&info);
|
||||
totalMem = info.totalram;
|
||||
freeMem = info.freeram;
|
||||
total_mem = info.totalram;
|
||||
free_mem = info.freeram;
|
||||
|
||||
return ret == 0;//succeed 0, failed -1
|
||||
}
|
||||
|
||||
bool CommonUtil::GetSystemAvailableThreads(unsigned int &threadCnt) {
|
||||
bool CommonUtil::GetSystemAvailableThreads(unsigned int &thread_count) {
|
||||
//threadCnt = std::thread::hardware_concurrency();
|
||||
threadCnt = sysconf(_SC_NPROCESSORS_CONF);
|
||||
threadCnt *= THREAD_MULTIPLY_CPU;
|
||||
if (threadCnt == 0)
|
||||
threadCnt = 8;
|
||||
thread_count = sysconf(_SC_NPROCESSORS_CONF);
|
||||
thread_count *= THREAD_MULTIPLY_CPU;
|
||||
if (thread_count == 0)
|
||||
thread_count = 8;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CommonUtil::IsDirectoryExist(const std::string &path)
|
||||
{
|
||||
bool CommonUtil::IsDirectoryExist(const std::string &path) {
|
||||
DIR *dp = nullptr;
|
||||
if ((dp = opendir(path.c_str())) == nullptr) {
|
||||
return false;
|
||||
@ -63,9 +62,13 @@ bool CommonUtil::IsDirectoryExist(const std::string &path)
|
||||
}
|
||||
|
||||
ServerError CommonUtil::CreateDirectory(const std::string &path) {
|
||||
struct stat directoryStat;
|
||||
int statOK = stat(path.c_str(), &directoryStat);
|
||||
if (statOK == 0) {
|
||||
if(path.empty()) {
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
|
||||
struct stat directory_stat;
|
||||
int status = stat(path.c_str(), &directory_stat);
|
||||
if (status == 0) {
|
||||
return SERVER_SUCCESS;//already exist
|
||||
}
|
||||
|
||||
@ -76,8 +79,8 @@ ServerError CommonUtil::CreateDirectory(const std::string &path) {
|
||||
return err;
|
||||
}
|
||||
|
||||
statOK = stat(path.c_str(), &directoryStat);
|
||||
if (statOK == 0) {
|
||||
status = stat(path.c_str(), &directory_stat);
|
||||
if (status == 0) {
|
||||
return SERVER_SUCCESS;//already exist
|
||||
}
|
||||
|
||||
@ -91,37 +94,41 @@ ServerError CommonUtil::CreateDirectory(const std::string &path) {
|
||||
|
||||
namespace {
|
||||
void RemoveDirectory(const std::string &path) {
|
||||
DIR *pDir = NULL;
|
||||
DIR *dir = nullptr;
|
||||
struct dirent *dmsg;
|
||||
char szFileName[256];
|
||||
char szFolderName[256];
|
||||
char file_name[256];
|
||||
char folder_name[256];
|
||||
|
||||
strcpy(szFolderName, path.c_str());
|
||||
strcat(szFolderName, "/%s");
|
||||
if ((pDir = opendir(path.c_str())) != NULL) {
|
||||
while ((dmsg = readdir(pDir)) != NULL) {
|
||||
strcpy(folder_name, path.c_str());
|
||||
strcat(folder_name, "/%s");
|
||||
if ((dir = opendir(path.c_str())) != nullptr) {
|
||||
while ((dmsg = readdir(dir)) != nullptr) {
|
||||
if (strcmp(dmsg->d_name, ".") != 0
|
||||
&& strcmp(dmsg->d_name, "..") != 0) {
|
||||
sprintf(szFileName, szFolderName, dmsg->d_name);
|
||||
std::string tmp = szFileName;
|
||||
sprintf(file_name, folder_name, dmsg->d_name);
|
||||
std::string tmp = file_name;
|
||||
if (tmp.find(".") == std::string::npos) {
|
||||
RemoveDirectory(szFileName);
|
||||
RemoveDirectory(file_name);
|
||||
}
|
||||
remove(szFileName);
|
||||
remove(file_name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (pDir != NULL) {
|
||||
closedir(pDir);
|
||||
if (dir != nullptr) {
|
||||
closedir(dir);
|
||||
}
|
||||
remove(path.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
ServerError CommonUtil::DeleteDirectory(const std::string &path) {
|
||||
struct stat directoryStat;
|
||||
int statOK = stat(path.c_str(), &directoryStat);
|
||||
if(path.empty()) {
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
|
||||
struct stat directory_stat;
|
||||
int statOK = stat(path.c_str(), &directory_stat);
|
||||
if (statOK != 0)
|
||||
return SERVER_SUCCESS;
|
||||
|
||||
@ -133,6 +140,15 @@ bool CommonUtil::IsFileExist(const std::string &path) {
|
||||
return (access(path.c_str(), F_OK) == 0);
|
||||
}
|
||||
|
||||
uint64_t CommonUtil::GetFileSize(const std::string &path) {
|
||||
struct stat file_info;
|
||||
if (stat(path.c_str(), &file_info) < 0) {
|
||||
return 0;
|
||||
} else {
|
||||
return (uint64_t)file_info.st_size;
|
||||
}
|
||||
}
|
||||
|
||||
std::string CommonUtil::GetExePath() {
|
||||
const size_t buf_len = 1024;
|
||||
char buf[buf_len];
|
||||
|
@ -16,10 +16,11 @@ namespace server {
|
||||
|
||||
class CommonUtil {
|
||||
public:
|
||||
static bool GetSystemMemInfo(unsigned long &totalMem, unsigned long &freeMem);
|
||||
static bool GetSystemAvailableThreads(unsigned int &threadCnt);
|
||||
static bool GetSystemMemInfo(unsigned long &total_mem, unsigned long &free_mem);
|
||||
static bool GetSystemAvailableThreads(unsigned int &thread_count);
|
||||
|
||||
static bool IsFileExist(const std::string &path);
|
||||
static uint64_t GetFileSize(const std::string &path);
|
||||
static bool IsDirectoryExist(const std::string &path);
|
||||
static ServerError CreateDirectory(const std::string &path);
|
||||
static ServerError DeleteDirectory(const std::string &path);
|
||||
|
@ -35,6 +35,7 @@ constexpr ServerError SERVER_CANNOT_CREATE_FOLDER = ToGlobalServerErrorCode(8);
|
||||
constexpr ServerError SERVER_CANNOT_CREATE_FILE = ToGlobalServerErrorCode(9);
|
||||
constexpr ServerError SERVER_CANNOT_DELETE_FOLDER = ToGlobalServerErrorCode(10);
|
||||
constexpr ServerError SERVER_CANNOT_DELETE_FILE = ToGlobalServerErrorCode(11);
|
||||
constexpr ServerError SERVER_BUILD_INDEX_ERROR = ToGlobalServerErrorCode(12);
|
||||
|
||||
constexpr ServerError SERVER_TABLE_NOT_EXIST = ToGlobalServerErrorCode(100);
|
||||
constexpr ServerError SERVER_INVALID_TABLE_NAME = ToGlobalServerErrorCode(101);
|
||||
@ -83,4 +84,3 @@ private:
|
||||
} // namespace server
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
||||
|
@ -27,6 +27,10 @@ void StringHelpFunctions::TrimStringQuote(std::string &string, const std::string
|
||||
ServerError StringHelpFunctions::SplitStringByDelimeter(const std::string &str,
|
||||
const std::string &delimeter,
|
||||
std::vector<std::string> &result) {
|
||||
if(str.empty()) {
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
|
||||
size_t last = 0;
|
||||
size_t index = str.find_first_of(delimeter, last);
|
||||
while (index != std::string::npos) {
|
||||
|
@ -12,131 +12,81 @@ namespace milvus {
|
||||
namespace server {
|
||||
|
||||
TimeRecorder::TimeRecorder(const std::string &header,
|
||||
TimeRecorder::TimeDisplayUnit unit,
|
||||
int64_t log_level) :
|
||||
header_(header),
|
||||
time_unit_(unit),
|
||||
log_level_(log_level) {
|
||||
header_(header),
|
||||
log_level_(log_level) {
|
||||
start_ = last_ = stdclock::now();
|
||||
span_ = 0.0;
|
||||
}
|
||||
|
||||
TimeRecorder::~TimeRecorder() {
|
||||
}
|
||||
|
||||
std::string
|
||||
TimeRecorder::GetTimeSpanStr(TimeRecorder::TimeDisplayUnit &unit, double span) const {
|
||||
std::string spanStr;
|
||||
std::string unitStr;
|
||||
TimeRecorder::GetTimeSpanStr(double span) {
|
||||
std::string str_sec = std::to_string(span * 0.000001) + ((span > 1000000) ? " seconds" : " second");
|
||||
std::string str_ms = std::to_string(span * 0.001) + " ms";
|
||||
|
||||
switch (unit) {
|
||||
case TimeRecorder::eTimeAutoUnit: {
|
||||
if (span >= 1000000) {
|
||||
int64_t t = (int64_t) span;
|
||||
int64_t hour, minute;
|
||||
double second;
|
||||
hour = t / 1000000 / 3600;
|
||||
t -= hour * 3600 * 1000000;
|
||||
minute = t / 1000000 / 60;
|
||||
t -= minute * 60 * 1000000;
|
||||
second = t * 0.000001;
|
||||
spanStr += (hour < 10 ? "0" : "") + std::to_string(hour) + ":";
|
||||
spanStr += (minute < 10 ? "0" : "") + std::to_string(minute) + ":";
|
||||
spanStr += (second < 10 ? "0" : "") + std::to_string(second);
|
||||
unitStr = "";
|
||||
} else if (span >= 1000) {
|
||||
spanStr = std::to_string(span * 0.001);
|
||||
unitStr = " ms";
|
||||
} else {
|
||||
spanStr = std::to_string(span);
|
||||
unitStr = " us";
|
||||
}
|
||||
}
|
||||
break;
|
||||
case TimeRecorder::eTimeHourUnit:
|
||||
spanStr = std::to_string((span * 0.000001) / 3600);
|
||||
unitStr = " hour";
|
||||
break;
|
||||
case TimeRecorder::eTimeMinuteUnit:
|
||||
spanStr = std::to_string((span * 0.000001) / 60);
|
||||
unitStr = " min";
|
||||
break;
|
||||
case TimeRecorder::eTimeSecondUnit:
|
||||
spanStr = std::to_string(span * 0.000001);
|
||||
unitStr = " sec";
|
||||
break;
|
||||
case TimeRecorder::eTimeMilliSecUnit:
|
||||
spanStr = std::to_string(span * 0.001);
|
||||
unitStr = " ms";
|
||||
break;
|
||||
case TimeRecorder::eTimeMicroSecUnit:
|
||||
default:
|
||||
spanStr = std::to_string(span);
|
||||
unitStr = " us";
|
||||
break;
|
||||
}
|
||||
|
||||
return spanStr + unitStr;
|
||||
return str_sec + " [" + str_ms + "]";
|
||||
}
|
||||
|
||||
void
|
||||
TimeRecorder::PrintTimeRecord(const std::string &msg, double span) {
|
||||
std::string strLog;
|
||||
if (!header_.empty()) strLog += header_ + ": ";
|
||||
strLog += msg;
|
||||
strLog += " (";
|
||||
strLog += GetTimeSpanStr(time_unit_, span);
|
||||
strLog += ")";
|
||||
std::string str_log;
|
||||
if (!header_.empty()) str_log += header_ + ": ";
|
||||
str_log += msg;
|
||||
str_log += " (";
|
||||
str_log += TimeRecorder::GetTimeSpanStr(span);
|
||||
str_log += ")";
|
||||
|
||||
switch (log_level_) {
|
||||
case 0: {
|
||||
SERVER_LOG_TRACE << strLog;
|
||||
SERVER_LOG_TRACE << str_log;
|
||||
break;
|
||||
}
|
||||
case 1: {
|
||||
SERVER_LOG_DEBUG << strLog;
|
||||
SERVER_LOG_DEBUG << str_log;
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
SERVER_LOG_INFO << strLog;
|
||||
SERVER_LOG_INFO << str_log;
|
||||
break;
|
||||
}
|
||||
case 3: {
|
||||
SERVER_LOG_WARNING << strLog;
|
||||
SERVER_LOG_WARNING << str_log;
|
||||
break;
|
||||
}
|
||||
case 4: {
|
||||
SERVER_LOG_ERROR << strLog;
|
||||
SERVER_LOG_ERROR << str_log;
|
||||
break;
|
||||
}
|
||||
case 5: {
|
||||
SERVER_LOG_FATAL << strLog;
|
||||
SERVER_LOG_FATAL << str_log;
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
SERVER_LOG_INFO << strLog;
|
||||
SERVER_LOG_INFO << str_log;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
TimeRecorder::Record(const std::string &msg) {
|
||||
double
|
||||
TimeRecorder::RecordSection(const std::string &msg) {
|
||||
stdclock::time_point curr = stdclock::now();
|
||||
span_ = (std::chrono::duration<double, std::micro>(curr - last_)).count();
|
||||
double span = (std::chrono::duration<double, std::micro>(curr - last_)).count();
|
||||
last_ = curr;
|
||||
|
||||
PrintTimeRecord(msg, span_);
|
||||
}
|
||||
|
||||
void
|
||||
TimeRecorder::Elapse(const std::string &msg) {
|
||||
stdclock::time_point curr = stdclock::now();
|
||||
span_ = (std::chrono::duration<double, std::micro>(curr - start_)).count();
|
||||
|
||||
PrintTimeRecord(msg, span_);
|
||||
PrintTimeRecord(msg, span);
|
||||
return span;
|
||||
}
|
||||
|
||||
double
|
||||
TimeRecorder::Span() {
|
||||
return span_;
|
||||
TimeRecorder::ElapseFromBegin(const std::string &msg) {
|
||||
stdclock::time_point curr = stdclock::now();
|
||||
double span = (std::chrono::duration<double, std::micro>(curr - start_)).count();
|
||||
|
||||
PrintTimeRecord(msg, span);
|
||||
return span;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -17,36 +17,24 @@ class TimeRecorder {
|
||||
using stdclock = std::chrono::high_resolution_clock;
|
||||
|
||||
public:
|
||||
enum TimeDisplayUnit {
|
||||
eTimeAutoUnit = 0,
|
||||
eTimeHourUnit,
|
||||
eTimeMinuteUnit,
|
||||
eTimeSecondUnit,
|
||||
eTimeMilliSecUnit,
|
||||
eTimeMicroSecUnit,
|
||||
};
|
||||
|
||||
TimeRecorder(const std::string &header,
|
||||
TimeRecorder::TimeDisplayUnit unit = TimeRecorder::eTimeAutoUnit,
|
||||
int64_t log_level = 1); //trace = 0, debug = 1, info = 2, warn = 3, error = 4, critical = 5
|
||||
int64_t log_level = 1);
|
||||
|
||||
void Record(const std::string &msg);
|
||||
~TimeRecorder();//trace = 0, debug = 1, info = 2, warn = 3, error = 4, critical = 5
|
||||
|
||||
void Elapse(const std::string &msg);
|
||||
double RecordSection(const std::string &msg);
|
||||
|
||||
double Span();
|
||||
double ElapseFromBegin(const std::string &msg);
|
||||
|
||||
static std::string GetTimeSpanStr(double span);
|
||||
|
||||
private:
|
||||
std::string GetTimeSpanStr(TimeRecorder::TimeDisplayUnit &unit, double span) const;
|
||||
|
||||
void PrintTimeRecord(const std::string &msg, double span);
|
||||
|
||||
private:
|
||||
std::string header_;
|
||||
TimeRecorder::TimeDisplayUnit time_unit_;
|
||||
stdclock::time_point start_;
|
||||
stdclock::time_point last_;
|
||||
double span_;
|
||||
int64_t log_level_;
|
||||
};
|
||||
|
||||
|
@ -1,7 +1,8 @@
|
||||
#include <src/db/ExecutionEngine.h>
|
||||
#include "db/ExecutionEngine.h"
|
||||
#include "ValidationUtil.h"
|
||||
#include "Log.h"
|
||||
|
||||
#include <cuda_runtime.h>
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
@ -11,7 +12,7 @@ constexpr size_t table_name_size_limit = 255;
|
||||
constexpr int64_t table_dimension_limit = 16384;
|
||||
|
||||
ServerError
|
||||
ValidateTableName(const std::string &table_name) {
|
||||
ValidationUtil::ValidateTableName(const std::string &table_name) {
|
||||
|
||||
// Table name shouldn't be empty.
|
||||
if (table_name.empty()) {
|
||||
@ -45,7 +46,7 @@ ValidateTableName(const std::string &table_name) {
|
||||
}
|
||||
|
||||
ServerError
|
||||
ValidateTableDimension(int64_t dimension) {
|
||||
ValidationUtil::ValidateTableDimension(int64_t dimension) {
|
||||
if (dimension <= 0 || dimension > table_dimension_limit) {
|
||||
SERVER_LOG_ERROR << "Table dimension excceed the limitation: " << table_dimension_limit;
|
||||
return SERVER_INVALID_VECTOR_DIMENSION;
|
||||
@ -55,18 +56,42 @@ ValidateTableDimension(int64_t dimension) {
|
||||
}
|
||||
|
||||
ServerError
|
||||
ValidateTableIndexType(int32_t index_type) {
|
||||
auto engine_type = engine::EngineType(index_type);
|
||||
switch (engine_type) {
|
||||
case engine::EngineType::FAISS_IDMAP:
|
||||
case engine::EngineType::FAISS_IVFFLAT_GPU: {
|
||||
SERVER_LOG_DEBUG << "Index type: " << index_type;
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
default: {
|
||||
return SERVER_INVALID_INDEX_TYPE;
|
||||
}
|
||||
ValidationUtil::ValidateTableIndexType(int32_t index_type) {
|
||||
int engine_type = (int)engine::EngineType(index_type);
|
||||
if(engine_type <= 0 || engine_type > (int)engine::EngineType::MAX_VALUE) {
|
||||
return SERVER_INVALID_INDEX_TYPE;
|
||||
}
|
||||
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
|
||||
ServerError
|
||||
ValidationUtil::ValidateGpuIndex(uint32_t gpu_index) {
|
||||
int num_devices = 0;
|
||||
auto cuda_err = cudaGetDeviceCount(&num_devices);
|
||||
if (cuda_err) {
|
||||
SERVER_LOG_ERROR << "Failed to count video card: " << std::to_string(cuda_err);
|
||||
return SERVER_UNEXPECTED_ERROR;
|
||||
}
|
||||
|
||||
if(gpu_index >= num_devices) {
|
||||
return SERVER_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
|
||||
ServerError
|
||||
ValidationUtil::GetGpuMemory(uint32_t gpu_index, size_t& memory) {
|
||||
cudaDeviceProp deviceProp;
|
||||
auto cuda_err = cudaGetDeviceProperties(&deviceProp, gpu_index);
|
||||
if (cuda_err) {
|
||||
SERVER_LOG_ERROR << "Failed to get video card properties: " << std::to_string(cuda_err);
|
||||
return SERVER_UNEXPECTED_ERROR;
|
||||
}
|
||||
|
||||
memory = deviceProp.totalGlobalMem;
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -6,14 +6,23 @@ namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
|
||||
ServerError
|
||||
ValidateTableName(const std::string& table_name);
|
||||
class ValidationUtil {
|
||||
public:
|
||||
static ServerError
|
||||
ValidateTableName(const std::string &table_name);
|
||||
|
||||
ServerError
|
||||
ValidateTableDimension(int64_t dimension);
|
||||
static ServerError
|
||||
ValidateTableDimension(int64_t dimension);
|
||||
|
||||
ServerError
|
||||
ValidateTableIndexType(int32_t index_type);
|
||||
static ServerError
|
||||
ValidateTableIndexType(int32_t index_type);
|
||||
|
||||
static ServerError
|
||||
ValidateGpuIndex(uint32_t gpu_index);
|
||||
|
||||
static ServerError
|
||||
GetGpuMemory(uint32_t gpu_index, size_t &memory);
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ class GpuResources {
|
||||
using namespace zilliz::milvus::server;
|
||||
ServerConfig &config = ServerConfig::GetInstance();
|
||||
ConfigNode server_config = config.GetConfig(CONFIG_SERVER);
|
||||
gpu_num = server_config.GetInt32Value("gpu_index", 0);
|
||||
gpu_num = server_config.GetInt32Value(server::CONFIG_GPU_INDEX, 0);
|
||||
}
|
||||
|
||||
int32_t GetGpu() {
|
||||
@ -72,7 +72,8 @@ Index_ptr IndexBuilder::build_all(const long &nb,
|
||||
{
|
||||
LOG(DEBUG) << "Build index by GPU";
|
||||
// TODO: list support index-type.
|
||||
faiss::Index *ori_index = faiss::index_factory(opd_->d, opd_->get_index_type(nb).c_str());
|
||||
faiss::MetricType metric_type = opd_->metric_type == "L2" ? faiss::METRIC_L2 : faiss::METRIC_INNER_PRODUCT;
|
||||
faiss::Index *ori_index = faiss::index_factory(opd_->d, opd_->get_index_type(nb).c_str(), metric_type);
|
||||
|
||||
std::lock_guard<std::mutex> lk(gpu_resource);
|
||||
faiss::gpu::StandardGpuResources res;
|
||||
@ -91,7 +92,8 @@ Index_ptr IndexBuilder::build_all(const long &nb,
|
||||
#else
|
||||
{
|
||||
LOG(DEBUG) << "Build index by CPU";
|
||||
faiss::Index *index = faiss::index_factory(opd_->d, opd_->get_index_type(nb).c_str());
|
||||
faiss::MetricType metric_type = opd_->metric_type == "L2" ? faiss::METRIC_L2 : faiss::METRIC_INNER_PRODUCT;
|
||||
faiss::Index *index = faiss::index_factory(opd_->d, opd_->get_index_type(nb).c_str(), metric_type);
|
||||
if (!index->is_trained) {
|
||||
nt == 0 || xt == nullptr ? index->train(nb, xb)
|
||||
: index->train(nt, xt);
|
||||
@ -114,7 +116,8 @@ BgCpuBuilder::BgCpuBuilder(const zilliz::milvus::engine::Operand_ptr &opd) : Ind
|
||||
|
||||
Index_ptr BgCpuBuilder::build_all(const long &nb, const float *xb, const long *ids, const long &nt, const float *xt) {
|
||||
std::shared_ptr<faiss::Index> index = nullptr;
|
||||
index.reset(faiss::index_factory(opd_->d, opd_->get_index_type(nb).c_str()));
|
||||
faiss::MetricType metric_type = opd_->metric_type == "L2" ? faiss::METRIC_L2 : faiss::METRIC_INNER_PRODUCT;
|
||||
index.reset(faiss::index_factory(opd_->d, opd_->get_index_type(nb).c_str(), metric_type));
|
||||
|
||||
LOG(DEBUG) << "Build index by CPU";
|
||||
{
|
||||
|
@ -1,3 +1,4 @@
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
|
||||
// Unauthorized copying of this file, via any medium is strictly prohibited.
|
||||
@ -17,40 +18,68 @@ using std::string;
|
||||
enum IndexType {
|
||||
Invalid_Option = 0,
|
||||
IVF = 1,
|
||||
IDMAP = 2
|
||||
IDMAP = 2,
|
||||
IVFSQ8 = 3,
|
||||
};
|
||||
|
||||
IndexType resolveIndexType(const string &index_type) {
|
||||
if (index_type == "IVF") { return IndexType::IVF; }
|
||||
if (index_type == "IDMap") { return IndexType::IDMAP; }
|
||||
if (index_type == "IVFSQ8") { return IndexType::IVFSQ8; }
|
||||
return IndexType::Invalid_Option;
|
||||
}
|
||||
|
||||
int CalcBacketCount(int nb, size_t nlist) {
|
||||
int backet_count = int(nb / 1000000.0 * nlist);
|
||||
if(backet_count == 0) {
|
||||
backet_count = 1; //avoid faiss rash
|
||||
}
|
||||
|
||||
return backet_count;
|
||||
}
|
||||
|
||||
// nb at least 100
|
||||
string Operand::get_index_type(const int &nb) {
|
||||
if (!index_str.empty()) { return index_str; }
|
||||
|
||||
// TODO: support OPQ or ...
|
||||
if (!preproc.empty()) { index_str += (preproc + ","); }
|
||||
|
||||
switch (resolveIndexType(index_type)) {
|
||||
case Invalid_Option: {
|
||||
// TODO: add exception
|
||||
break;
|
||||
}
|
||||
case IVF: {
|
||||
|
||||
using namespace zilliz::milvus::server;
|
||||
ServerConfig &config = ServerConfig::GetInstance();
|
||||
ConfigNode engine_config = config.GetConfig(CONFIG_ENGINE);
|
||||
size_t nlist = engine_config.GetInt32Value(CONFIG_NLIST, 16384);
|
||||
|
||||
index_str += (ncent != 0 ? index_type + std::to_string(ncent) :
|
||||
index_type + std::to_string(int(nb / 1000000.0 * 16384)));
|
||||
index_type + std::to_string(CalcBacketCount(nb, nlist)));
|
||||
// std::cout<<"nlist = "<<nlist<<std::endl;
|
||||
if (!postproc.empty()) { index_str += ("," + postproc); }
|
||||
break;
|
||||
}
|
||||
case IVFSQ8: {
|
||||
|
||||
using namespace zilliz::milvus::server;
|
||||
ServerConfig &config = ServerConfig::GetInstance();
|
||||
ConfigNode engine_config = config.GetConfig(CONFIG_ENGINE);
|
||||
size_t nlist = engine_config.GetInt32Value(CONFIG_NLIST, 16384);
|
||||
|
||||
index_str += (ncent != 0 ? "IVF" + std::to_string(ncent) :
|
||||
"IVF" + std::to_string(CalcBacketCount(nb, nlist)));
|
||||
index_str += ",SQ8";
|
||||
// std::cout<<"nlist = "<<nlist<<std::endl;
|
||||
break;
|
||||
}
|
||||
case IDMAP: {
|
||||
index_str += index_type;
|
||||
if (!postproc.empty()) { index_str += ("," + postproc); }
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: support PQ or ...
|
||||
if (!postproc.empty()) { index_str += ("," + postproc); }
|
||||
return index_str;
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user