start 2.0

Signed-off-by: yefu.chen <yefu.chen@zilliz.com>
This commit is contained in:
yefu.chen 2020-08-20 10:23:45 +08:00
parent 83cbe0f490
commit 49058d5dfb
1683 changed files with 0 additions and 425673 deletions

View File

@ -1,78 +0,0 @@
{
"files": [
"README.md"
],
"imageSize": 100,
"commit": false,
"contributors": [
{
"login": "zerowe-seven",
"name": "zerowe-seven",
"avatar_url": "https://avatars0.githubusercontent.com/u/57790060?v=4",
"profile": "https://github.com/zerowe-seven",
"contributions": [
"code"
]
},
{
"login": "erdustiggen",
"name": "erdustiggen",
"avatar_url": "https://avatars1.githubusercontent.com/u/25433850?v=4",
"profile": "https://github.com/erdustiggen",
"contributions": [
"code"
]
},
{
"login": "gaolizhou",
"name": "gaolizhou",
"avatar_url": "https://avatars2.githubusercontent.com/u/2884044?v=4",
"profile": "https://github.com/gaolizhou",
"contributions": [
"code"
]
},
{
"login": "akihoni",
"name": "Sijie Zhang",
"avatar_url": "https://avatars0.githubusercontent.com/u/36330442?v=4",
"profile": "https://github.com/akihoni",
"contributions": [
"doc"
]
},
{
"login": "PizzaL",
"name": "PizzaL",
"avatar_url": "https://avatars0.githubusercontent.com/u/5666666?v=4",
"profile": "https://github.com/PizzaL",
"contributions": [
"code"
]
},
{
"login": "levylll",
"name": "levylll",
"avatar_url": "https://avatars2.githubusercontent.com/u/5645285?v=4",
"profile": "https://github.com/levylll",
"contributions": [
"code"
]
},
{
"login": "aaronjin2010",
"name": "aaronjin2010",
"avatar_url": "https://avatars1.githubusercontent.com/u/48044391?v=4",
"profile": "https://github.com/aaronjin2010",
"contributions": [
"code"
]
}
],
"contributorsPerLine": 7,
"projectName": "milvus",
"projectOwner": "milvus-io",
"repoType": "github",
"repoHost": "https://github.com",
"skipCi": true
}

View File

@ -1,27 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
---
BasedOnStyle: Google
DerivePointerAlignment: false
ColumnLimit: 120
IndentWidth: 4
AccessModifierOffset: -3
AlwaysBreakAfterReturnType: All
AllowShortBlocksOnASingleLine: false
AllowShortFunctionsOnASingleLine: false
AllowShortIfStatementsOnASingleLine: false
AlignTrailingComments: true

View File

@ -1,33 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
---
Checks: 'clang-diagnostic-*,clang-analyzer-*,-clang-analyzer-alpha*,google-*,modernize-*,readability-*'
# produce HeaderFilterRegex from cpp/build-support/lint_exclusions.txt with:
# echo -n '^('; sed -e 's/*/\.*/g' cpp/build-support/lint_exclusions.txt | tr '\n' '|'; echo ')$'
HeaderFilterRegex: '^(.*cmake-build-debug.*|.*cmake-build-release.*|.*cmake_build.*|.*src/core/thirdparty.*|.*thirdparty.*|.*easylogging++.*|.*SqliteMetaImpl.cpp|.*src/grpc.*|.*src/core.*|.*src/wrapper.*)$'
AnalyzeTemporaryDtors: true
ChainedConditionalReturn: 1
ChainedConditionalAssignment: 1
CheckOptions:
- key: google-readability-braces-around-statements.ShortStatementLines
value: '1'
- key: google-readability-function-size.StatementThreshold
value: '800'
- key: google-readability-namespace-comments.ShortNamespaceLines
value: '10'
- key: google-readability-namespace-comments.SpacesBeforeComments
value: '2'

View File

@ -1,17 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#

View File

@ -1,7 +0,0 @@
exclude_paths:
- 'core/src/index/thirdparty/**'
- 'sdk/build-support/**'
- 'core/build-support/**'
- 'core/src/grpc/gen-milvus/**'
- 'core/src/grpc/gen-status/**'
- 'sdk/grpc-gen/**'

View File

@ -1,3 +0,0 @@
core/src/index/thirdparty/**
sdk/build-support/**
core/build-support/**

6
.env
View File

@ -1,6 +0,0 @@
REPO=milvusdb/milvus-dev
MILVUS_INSTALL_PREFIX=/var/lib/milvus
ARCH=amd64
UBUNTU=18.04
CENTOS=7
CUDA=10.1

View File

@ -1,29 +0,0 @@
---
name: "\U0001F41B Bug report"
about: Create a bug report to help us improve Milvus
title: ''
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**Steps/Code to reproduce behavior**
Follow this [guide](http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports) to craft a minimal bug report. This helps us reproduce the issue you're having and resolve the issue more quickly.
**Expected behavior**
A clear and concise description of what you expected to happen.
**Environment details**
- Hardware/Softward conditions (OS, CPU, GPU, Memory)
- Method of installation (Docker, or from source)
- Milvus version (v0.3.1, or v0.4.0)
- Milvus configuration (Settings you made in `server_config.yaml`)
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Additional context**
Add any other context about the problem here.

View File

@ -1,35 +0,0 @@
---
name: "\U0001F4DD Documentation request"
about: Report incorrect or needed documentation
title: ''
labels: ''
assignees: ''
---
## Report incorrect documentation
**Location of incorrect documentation**
Provide links and line numbers if applicable.
**Describe the problems or issues found in the documentation**
A clear and concise description of what you found to be incorrect.
**Steps taken to verify documentation is incorrect**
List any steps you have taken:
**Suggested fix for documentation**
Detail proposed changes to fix the documentation if you have any.
---
## Report needed documentation
**Report needed documentation**
A clear and concise description of what documentation you believe it is needed and why.
**Describe the documentation you'd like**
A clear and concise description of what you want to happen.
**Steps taken to search for needed documentation**
List any steps you have taken:

View File

@ -1,20 +0,0 @@
---
name: "\U0001F680 Feature request"
about: Suggest an idea for Milvus
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. E.g. I wish I could use Milvus to do [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context, code examples, or references to existing implementations about the feature request here.

View File

@ -1,10 +0,0 @@
---
name: "\U0001F914 General question"
about: Ask a general question about Milvus
title: ''
labels: ''
assignees: ''
---
**What is your question?**

View File

@ -1,18 +0,0 @@
**What type of PR is this?**
api-change / bug / design / documentation / feature
**What this PR does / why we need it:**
**Which issue(s) this PR fixes:**
Fixes #
**Special notes for your reviewer:**
**Additional documentation (e.g. design docs, usage docs, etc.):**

View File

@ -1,118 +0,0 @@
name: Core
# This workflow is triggered on pushes or pull request to the repository.
on:
push:
# file paths to consider in the event. Optional; defaults to all.
paths:
- 'ci/**'
- 'core/**'
- '.github/workflows/core.yml'
- '!**.md'
- '!ci/jenkins/**'
pull_request:
# file paths to consider in the event. Optional; defaults to all.
paths:
- 'ci/**'
- 'core/**'
- '.github/workflows/core.yml'
- '!**.md'
- '!ci/jenkins/**'
jobs:
ubuntu:
name: AMD64 Ubuntu ${{ matrix.ubuntu }}
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
ubuntu: [18.04]
env:
UBUNTU: ${{ matrix.ubuntu }}
steps:
# This step checks out a copy of your repository.
- name: Checkout Milvus
uses: actions/checkout@v1
- name: Check Dockerfile
uses: mgrachev/action-hadolint@v1
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
reporter: github-pr-check # Default is github-pr-check
- name: Docker Pull
shell: bash
run: |
docker-compose pull --ignore-pull-failures db
docker-compose pull --ignore-pull-failures ubuntu-core
- name: Docker Build
shell: bash
run: |
docker-compose build ubuntu-core
docker rmi $(docker images | grep '<none>' | awk '{print $3}') || exit 0
- name: Docker Run
run: |
docker-compose run --use-aliases -d db
docker-compose run ubuntu-core
- name: Docker Push
if: success() && github.event_name == 'push' && github.repository == 'milvus-io/milvus'
continue-on-error: true
shell: bash
run: |
docker login -u ${{ secrets.DOCKERHUB_USER }} \
-p ${{ secrets.DOCKERHUB_TOKEN }}
docker-compose push ubuntu-core
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
file: ./ci/scripts/output_new.info
name: ubuntu-${{ matrix.ubuntu }}-unittests
flags: cpu_version_ubuntu_18_04_unittest
yml: ./codecov.yaml
centos:
name: AMD64 CentOS ${{ matrix.centos }}
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
centos: [7]
env:
CENTOS: ${{ matrix.centos }}
steps:
- name: Checkout Milvus
uses: actions/checkout@v1
- name: Check Dockerfile
uses: mgrachev/action-hadolint@v1
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
reporter: github-pr-check # Default is github-pr-check
- name: Docker Pull
shell: bash
run: |
docker-compose pull --ignore-pull-failures db
docker-compose pull --ignore-pull-failures centos-core
- name: Docker Build
shell: bash
run: |
docker-compose build centos-core
docker rmi $(docker images | grep '<none>' | awk '{print $3}') || exit 0
- name: Docker Run
run: |
docker-compose run --use-aliases -d db
docker-compose run centos-core
- name: Docker Push
if: success() && github.event_name == 'push' && github.repository == 'milvus-io/milvus'
continue-on-error: true
shell: bash
run: |
docker login -u ${{ secrets.DOCKERHUB_USER }} \
-p ${{ secrets.DOCKERHUB_TOKEN }}
docker-compose push centos-core
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
file: ./ci/scripts/output_new.info
name: centos-${{ matrix.centos }}-unittests
flags: cpu_version_centos7_unittest
yml: ./codecov.yaml

35
.gitignore vendored
View File

@ -1,35 +0,0 @@
# CLion generated files
core/cmake-build-debug/
core/cmake-build-release/
core/cmake_build
core/.idea/
core/thirdparty/knowhere_build
.idea/
.ycm_extra_conf.py
__pycache__
# vscode generated files
.vscode
build
cmake-build-debug
cmake-build-release
cmake_build
# Compiled source
*.a
*.so
*.so.*
*.o
*.lo
*.tar.gz
*.log
.coverage
*.pyc
cov_html/
# temp
shards/all_in_one_with_mysql/metadata/
shards/mishards/.env
*.swp

View File

@ -1,6 +0,0 @@
ignored:
- DL3003
- DL3007
- DL3008
# disable following sourced files
- SC1091

View File

@ -1,19 +0,0 @@
pull_request_rules:
- name: Automatic merge on approval
conditions:
- "label=automerge"
- "#approved-reviews-by>=1"
- "#review-requested=0"
- "#changes-requested-reviews-by=0"
- "status-success=DCO"
- "status-success=continuous-integration/jenkins/pr-merge"
actions:
merge:
method: squash
- name: Ask to resolve conflict
conditions:
- conflict
actions:
comment:
message: This pull request is now in conflicts. Could you fix it? 🙏

View File

@ -1,888 +0,0 @@
# Changelog
Please mark all change in change log and use the issue from GitHub
# Milvus 0.10.3 (TBD)
## Bug
## Feature
## Improvement
- \#3213 Allow users to specify a distance type at runtime for Flat index
## Task
# Milvus 0.10.2 (2020-08-15)
## Bug
- \#2890 Fix the index size caculation in cache
- \#2952 Fix the result merging of IVF_PQ IP
- \#2975 Fix config UT failed
- \#3012 If the cache is too small, queries using multiple GPUs will cause to crash
- \#3133 Reverse query result in mishards if metric type is IP
## Feature
## Improvement
- \#2653 Improve IVF search performance when NQ and nProbe are both large
- \#2828 Let Faiss not to compile half float by default
## Task
# Milvus 0.10.1 (2020-07-20)
## Bug
- \#2487 Enlarge timeout value for creating collection
- \#2487 HotFix release lock failed on NAS
- \#2557 Fix random crash of INSERT_DUPLICATE_ID case
- \#2578 Result count doesn't match target vectors count
- \#2585 Support IVF_PQ IP on GPU
- \#2598 Fix Milvus docker image report illegal instruction
- \#2617 Fix HNSW and RNSG index files size
- \#2637 Suit the range of HNSW parameters
- \#2642 Create index failed and server crashed
- \#2649 Search parameter of annoy has conflict with document
- \#2690 Remove body parser in show-partitions endpoints
- \#2692 Milvus hangs during multi-thread concurrent search
- \#2739 Fix mishards start failed
- \#2752 Milvus formats vectors data to double-precision and return to http client
- \#2767 Fix a bug of getting wrong nprobe limitation in knowhere on GPU version
- \#2768 After building the index, the number of vectors increases
- \#2774 Server down during loading data
- \#2776 Fix too many data copies during creating IVF index
- \#2813 To implemente RNSG IP
## Feature
## Improvement
- \#2932 Upgrade mishards for milvus 0.10.1
## Task
# Milvus 0.10.0 (2020-06-15)
## Bug
- \#2367 Fix inconsistent reading and writing when using mishards
- \#2368 Make read node detect delete behavior
- \#2394 Drop collection timeout if too many partitions created on collection
- \#2549 Launch server fail using demo config
- \#2564 cache.cache_size range check error
## Feature
- \#2363 Update branch version
- \#2510 Upgrade Milvus config
## Improvement
- \#2381 Upgrade FAISS to 1.6.3
- \#2429 Fix Milvus 0.9.1 performance degrade issue
- \#2441 Improve Knowhere code coverage
- \#2466 Optimize k-selection implemention of faiss gpu version
- \#2489 Add exception throw on mysql meta error
- \#2495 Add creating lock file failure reason.
- \#2516 Improve unit test coverage
- \#2548 Upgrade mishards for milvus v0.10.0
## Task
# Milvus 0.9.1 (2020-05-29)
## Bug
- \#2366 Reduce SQL execution times for collection contains lot of partitions
- \#2378 Duplicate data after server restart
- \#2395 Fix large nq cudaMalloc error
- \#2399 The nlist set by the user may not take effect
- \#2403 MySQL max_idle_time is 10 by default
- \#2450 The deleted vectors may be found on GPU
- \#2456 openblas library install failed
## Feature
## Improvement
- \#2353 Remove log_config from code and scripts
- \#2370 Clean compile warning
- \#2410 Logging build index progress
- \#2461 Upgrade mishards for milvus 0.9.1
# Milvus 0.9.0 (2020-05-15)
## Bug
- \#1705 Limit the insert data batch size
- \#1776 Error out when index SQ8H run in CPU mode
- \#1925 To flush all collections, flush cannot work
- \#1929 Skip MySQL meta schema field width check
- \#1946 Fix load index file CPU2GPU fail during searching
- \#1955 Switch create_index operation to background once client break connection
- \#1997 Index file missed after compact
- \#2002 Remove log error msg `Attributes is null`
- \#2073 Fix CheckDBConfigBackendUrl error message
- \#2076 CheckMetricConfigAddress error message
- \#2120 Fix Search expected failed if search params set invalid
- \#2121 Allow regex match partition tag when search
- \#2128 Check has_partition params
- \#2131 Distance/ID returned is not correct if searching with duplicate ids
- \#2141 Fix server start failed if wal directory exist
- \#2169 Fix SingleIndexTest.IVFSQHybrid unittest
- \#2194 Fix get collection info failed
- \#2196 Fix server start failed if wal is disabled
- \#2203 0.8.0 id=-1 is returned when total count < topk
- \#2228 Fix show partitions failed in http module
- \#2231 Use server_config to define hard-delete delay time for segment files
- \#2261 Re-define result returned by has_collection if collection in delete state
- \#2264 Milvus opened too many files when the metric_config.enable_monitor=true
- \#2266 Server hangs when using multi-clients to query different collections
- \#2280 has_partition should return true for `_default`
## Feature
- \#1751 Add api SearchByID
- \#1752 Add api GetVectorsByID
- \#1962 Add api HasPartition
- \#1965 FAISS/NSG/HNSW/ANNOY use unified distance calculation algorithm
- \#2054 Check if CPU instruction sets are illegal
- \#2057 Add a config parameter to switch off http server
- \#2059 Add lock file avoid multiple instances modifying data at the same time
- \#2064 Warn when use SQLite as metadata management
- \#2111 Check GPU environment before start server
- \#2206 Log file rotating
- \#2240 Obtain running rpc requests information
- \#2268 Intelligently detect openblas library in system to avoid installing from source code every time
- \#2283 Suspend the building tasks when any query comand arrives.
## Improvement
- \#221 Refactor LOG macro
- \#833 Catch exception in RolloutHandler and output in stderr
- \#1796 Compile Openblas with source code to improve the performance
- \#1942 Background merge file strategy
- \#2039 Support Milvus run on SSE CPUs
- \#2149 Merge server_cpu_config.template and server_gpu_config.template
- \#2153 Upgrade thirdparty oatpp to v1.0.0
- \#2167 Merge log_config.conf with server_config.yaml
- \#2173 Check storage permission
- \#2178 Using elkan K-Means to improve IVF
- \#2185 Change id to string format in http module
- \#2186 Update endpoints in http module
- \#2190 Fix memory usage is twice of index size when using GPU searching
- \#2248 Use hostname and port as instance label of metrics
- \#2252 Upgrade mishards APIs and requirements
- \#2256 k-means clustering algorithm use only Euclidean distance metric
- \#2300 Upgrade mishrads configuration to version 0.4
- \#2311 Update mishards methods
- \#2330 Change url for behavior 'get_entities_by_id'
- \#2347 Update http document for v0.9.0
- \#2358 Upgrade mishards for v0.9.0
## Task
# Milvus 0.8.0 (2020-04-15)
## Bug
- \#1276 SQLite throw exception after create 50000+ partitions in a table
- \#1762 Server is not forbidden to create new partition which tag is `_default`
- \#1789 Fix multi-client search cause server crash
- \#1832 Fix crash in tracing module
- \#1873 Fix index file serialize to incorrect path
- \#1881 Fix bad alloc when index files lost
- \#1883 Fix inserted vectors becomes all zero when index_file_size >= 2GB
- \#1901 Search failed with flat index
- \#1903 Fix invalid annoy result
- \#1910 C++ SDK GetIDsInSegment could not work for large dataset
## Feature
- \#261 Integrate ANNOY into Milvus
- \#1655 GPU index support delete vectors
- \#1660 IVF PQ CPU support deleted vectors searching
- \#1661 HNSW support deleted vectors searching
- \#1825 Add annoy index type in C++ sdk
- \#1849 NSG support deleted vectors searching
- \#1893 Log config information and device information
## Improvement
- \#1627 Move read/write index APIs into codec
- \#1784 Add Substructure and Superstructure in http module
- \#1858 Disable S3 build
- \#1882 Add index annoy into http module
- \#1885 Optimize knowhere unittest
- \#1886 Refactor log on search and insert request
- \#1897 Heap pop and push can be realized by heap_swap_top
- \#1921 Use TimeRecorder instead of chrono
- \#1928 Fix too many data and uid copies when loading files
- \#1930 Upgrade mishards to v0.8.0
## Task
# Milvus 0.7.1 (2020-03-29)
## Bug
- \#1301 Data in WAL may be accidentally inserted into a new table with the same name.
- \#1634 Fix search demo bug in HTTP doc
- \#1635 Vectors can be returned by searching after vectors deleted if `cache_insert_data` set true
- \#1648 The cache cannot be used all when the vector type is binary
- \#1651 Check validity of dimension when collection metric type is binary one
- \#1663 PQ index parameter 'm' validation
- \#1686 API search_in_files cannot work correctly when vectors is stored in certain non-default partition
- \#1689 Fix SQ8H search fail on SIFT-1B dataset
- \#1667 Create index failed with type: rnsg if metric_type is IP
- \#1708 NSG search crashed
- \#1724 Remove unused unittests
- \#1728 Optimize request handler to combine similar query
- \#1734 Opentracing for combined search request
- \#1735 Fix search out of memory with ivf_flat
- \#1747 Expected error status if search with partition_tag not existed
- \#1756 Fix memory exhausted during searching
- \#1781 Fix search hang with SQ8H
- \#1812 Fix incorrect request method in search example in http readme
- \#1818 Duplicate data generated after restart milvus server
## Feature
- \#1603 BinaryFlat add 2 Metric: Substructure and Superstructure
## Improvement
- \#267 Improve search performance: reduce delay
- \#342 Knowhere and Wrapper refactor
- \#1537 Optimize raw vector and uids read/write
- \#1546 Move Config.cpp to config directory
- \#1547 Rename storage/file to storage/disk and rename classes
- \#1548 Move store/Directory to storage/Operation and add FSHandler
- \#1572 Optimize config cpu/gpu cache_capacity setter
- \#1619 Improve compact performance
- \#1649 Fix Milvus crash on old CPU
- \#1653 IndexFlat (SSE) and IndexBinaryFlat performance improvement for small NQ
- \#1678 Remove CUSTOMIZATION macro
- \#1698 Upgrade mishards to v0.7.0
- \#1719 Improve Milvus log
- \#1754 Optimize behavior to get file ids from metadata in mishards
- \#1799 Update docker images to 0.7.1 in mishards
## Task
# Milvus 0.7.0 (2020-03-11)
## Bug
- \#715 Milvus crash when searching and building index simultaneously using SQ8H
- \#744 Don't return partition table for show_tables
- \#770 Server unittest run failed on low-end server
- \#805 IVFTest.gpu_seal_test unittest failed
- \#831 Judge branch error in CommonUtil.cpp
- \#977 Server crash when create tables concurrently
- \#990 Check gpu resources setting when assign repeated value
- \#995 Table count set to 0 if no tables found
- \#1010 Improve error message when offset or page_size is equal 0
- \#1022 Check if partition name is valid
- \#1028 Check if table exists when show partitions
- \#1029 Check if table exists when try to delete partition
- \#1066 Optimize http insert and search speed
- \#1022 Check if partition name is legal
- \#1028 Check if table exists when show partitions
- \#1029 Check if table exists when try to delete partition
- \#1066 Optimize http insert and search speed
- \#1067 Add binary vectors support in http server
- \#1075 Improve error message when page size or offset is illegal
- \#1082 Check page_size or offset value to avoid float
- \#1115 Http server support load table into memory
- \#1152 Error log output continuously after server start
- \#1211 Server down caused by searching with index_type: HNSW
- \#1240 Update license declaration
- \#1298 Unit test failed when on CPU2GPU case
- \#1359 Negative distance value returned when searching with HNSW index type
- \#1429 Server crashed when searching vectors with GPU
- \#1476 Fix vectors results bug when getting vectors from segments
- \#1484 Index type changed to IDMAP after compacted
- \#1491 Server crashed during adding vectors
- \#1499 Fix duplicated ID number issue
- \#1504 Avoid possible race condition between delete and search
- \#1507 set_config for insert_buffer_size is wrong
- \#1510 Add set interfaces for WAL configurations
- \#1511 Fix big integer cannot pass to server correctly
- \#1517 Result is not correct when search vectors in multi partition, index type is RNSG
- \#1518 Table count did not match after deleting vectors and compact
- \#1521 Make cache_insert_data take effect in-service
- \#1525 Add setter API for config preload_table
- \#1529 Fix server crash when cache_insert_data enabled
- \#1530 Set table file with correct engine type in meta
- \#1532 Search with ivf_flat failed with open-dataset: sift-256-hamming
- \#1535 Degradation searching performance with metric_type: binary_idmap
- \#1549 Fix server/wal config setting bug
- \#1556 Index file not created after table and index created
- \#1560 Search crashed with Super-high dimensional binary vector
- \#1564 Too low recall for glove-200-angular, ivf_pq index
- \#1571 Meta engine type become IDMAP after dropping index for BINARY table
- \#1574 Set all existing bitset in cache when applying deletes
- \#1577 Row count incorrect if delete vectors then create index
- \#1580 Old segment folder not removed after merge/compact if create_index is called before adding data
- \#1590 Server down caused by failure to write file during concurrent mixed operations
- \#1598 Server down during mixed operations
- \#1601 External link bug in HTTP doc
- \#1609 Refine Compact function
- \#1808 Building index params check for Annoy
- \#1852 Search index type<Annoy> failed with reason `failed to load index file`
## Feature
- \#216 Add CLI to get server info
- \#343 Add Opentracing
- \#665 Support get/set config via CLI
- \#759 Put C++ sdk out of milvus/core
- \#766 If partition tag is similar, wrong partition is searched
- \#771 Add server build commit info interface
- \#788 Add web server into server module
- \#813 Add push mode for prometheus monitor
- \#815 Support MinIO storage
- \#823 Support binary vector tanimoto/jaccard/hamming metric
- \#830 Support WAL(write-ahead logging)
- \#853 Support HNSW
- \#861 Support DeleteById / SearchByID / GetVectorById / Flush
- \#910 Change Milvus c++ standard to c++17
- \#1122 Support AVX-512 in FAISS
- \#1204 Add api to get table data information
- \#1250 Support CPU profiling
- \#1302 Get all record IDs in a segment by given a segment id
- \#1461 Add crud APIs and segments APIs into http module
- \#1463 Update config version to 0.2
- \#1531 Remove S3 related config
## Improvement
- \#738 Use Openblas / lapack from apt install
- \#758 Enhance config description
- \#791 Remove Arrow
- \#834 Add cpu mode for built-in Faiss
- \#848 Add ready-to-use config files to the Milvus repo for enhanced user experince
- \#860 Remove redundant checks in CacheMgr's constructor
- \#908 Move "primary_path" and "secondary_path" to storage config
- \#931 Remove "collector" from config
- \#966 Update NOTICE.md
- \#1002 Rename minio to s3 in Storage Config section
- \#1078 Move 'insert_buffer_size' to Cache Config section
- \#1105 Error message is not clear when creating IVFSQ8H index without gpu resources
- \#740, #849, #878, #972, #1033, #1161, #1173, #1199, #1190, #1223, #1222, #1257, #1264, #1269, #1164, #1303, #1304, #1324, #1388, #1459 Various fixes and improvements for Milvus documentation.
- \#1297 Hide partition_name parameter, avid user directly access partition table
- \#1234 Do S3 server validation check when Milvus startup
- \#1263 Allow system conf modifiable and some take effect directly
- \#1310 Add default partition tag for a table
- \#1320 Remove debug logging from faiss
- \#1426 Support to configure whether to enabled autoflush and the autoflush interval
- \#1444 Improve delete
- \#1448 General proto api for NNS libraries
- \#1480 Add return code for AVX512 selection
- \#1524 Update config "preload_table" description
- \#1544 Update resources name in HTTP module
- \#1567 Update yaml config description
## Task
- \#1327 Exclude third-party code from codebeat
- \#1331 Exclude third-party code from codacy
# Milvus 0.6.0 (2019-12-07)
## Bug
- \#228 Memory usage increased slowly during searching vectors
- \#246 Exclude src/external folder from code coverage for jenkin ci
- \#248 Reside src/external in thirdparty
- \#316 Some files not merged after vectors added
- \#327 Search does not use GPU when index type is FLAT
- \#331 Add exception handle when search fail
- \#340 Test cases run failed on 0.6.0
- \#353 Rename config.h.in to version.h.in
- \#374 sdk_simple return empty result
- \#377 Create partition success if tag name only contains spaces
- \#397 sdk_simple return incorrect result
- \#399 Create partition should be failed if partition tag existed
- \#412 Message returned is confused when partition created with null partition name
- \#416 Drop the same partition success repeatally
- \#440 Query API in customization still uses old version
- \#440 Server cannot startup with gpu_resource_config.enable=false in GPU version
- \#458 Index data is not compatible between 0.5 and 0.6
- \#465 Server hang caused by searching with nsg index
- \#485 Increase code coverage rate
- \#486 gpu no usage during index building
- \#497 CPU-version search performance decreased
- \#504 The code coverage rate of core/src/scheduler/optimizer is too low
- \#509 IVF_PQ index build trapped into dead loop caused by invalid params
- \#513 Unittest DELETE_BY_RANGE sometimes failed
- \#523 Erase file data from cache once the file is marked as deleted
- \#527 faiss benchmark not compatible with faiss 1.6.0
- \#530 BuildIndex stop when do build index and search simultaneously
- \#532 Assigin value to `table_name` from confest shell
- \#533 NSG build failed with MetricType Inner Product
- \#543 client raise exception in shards when search results is empty
- \#545 Avoid dead circle of build index thread when error occurs
- \#547 NSG build failed using GPU-edition if set gpu_enable false
- \#548 NSG search accuracy is too low
- \#552 Server down during building index_type: IVF_PQ using GPU-edition
- \#561 Milvus server should report exception/error message or terminate on mysql metadata backend error
- \#579 Build index hang in GPU version when gpu_resources disabled
- \#596 Frequently insert operation cost too much disk space
- \#599 Build index log is incorrect
- \#602 Optimizer specify wrong gpu_id
- \#606 No log generated during building index with CPU
- \#616 IP search metric_type is not supported by IVF_PQ index
- \#631 FAISS isn't compiled with O3 option
- \#636 (CPU) Create index PQ should be failed if table metric type set Inner Product
- \#649 Typo "partiton" should be "partition"
- \#654 Random crash when frequently insert vector one by one
- \#658 Milvus error out when building SQ8H index without GPU resources
- \#668 Update badge of README
- \#670 Random failure of unittest db_test::SEARCH_TEST
- \#674 Server down in stability test
- \#696 Metric_type changed from IP to L2
- \#705 Fix search SQ8H crash without GPU resource
## Feature
- \#12 Pure CPU version for Milvus
- \#77 Support table partition
- \#127 Support new Index type IVFPQ
- \#226 Experimental shards middleware for Milvus
- \#227 Support new index types SPTAG-KDT and SPTAG-BKT
- \#346 Support build index with multiple gpu
- \#420 Update shards merge part to match v0.5.3
- \#488 Add log in scheduler/optimizer
- \#502 C++ SDK support IVFPQ and SPTAG
- \#560 Add version in server config file
- \#605 Print more messages when server start
- \#644 Add a new rpc command to get milvus build version whether cpu or gpu
- \#709 Show last commit id when server start
## Improvement
- \#255 Add ivfsq8 test report detailed version
- \#260 C++ SDK README
- \#266 RPC request source code refactor
- \#274 Logger the time cost during preloading data
- \#275 Rename C++ SDK IndexType
- \#284 Change C++ SDK to shared library
- \#306 Use int64 for all config integer
- \#310 Add Q&A for 'protocol https not supported or disable in libcurl' issue
- \#314 add Find FAISS in CMake
- \#322 Add option to enable / disable prometheus
- \#354 Build migration scripts into milvus docker image
- \#358 Add more information in build.sh and install.md
- \#404 Add virtual method Init() in Pass abstract class
- \#409 Add a Fallback pass in optimizer
- \#433 C++ SDK query result is not easy to use
- \#449 Add ShowPartitions example for C++ SDK
- \#470 Small raw files should not be build index
- \#584 Intergrate internal FAISS
- \#611 Remove MILVUS_CPU_VERSION
- \#634 FAISS GPU version is compiled with O0
- \#737 Refactor server module to separate Grpc from server handler and scheduler
## Task
# Milvus 0.5.3 (2019-11-13)
## Bug
- \#258 Bytes type in proto cause big-endian/little-endian problem
## Feature
## Improvement
- \#204 improve grpc performance in search
- \#207 Add more unittest for config set/get
- \#208 Optimize unittest to support run single test more easily
- \#284 Change C++ SDK to shared library
- \#260 C++ SDK README
## Task
# Milvus 0.5.2 (2019-11-07)
## Bug
- \#194 Search faild: message="Table file doesn't exist"
## Feature
## Improvement
- \#190 Update default config:use_blas_threshold to 1100 and server version printout to 0.5.2
## Task
# Milvus 0.5.1 (2019-11-04)
## Bug
- \#134 JFrog cache error
- \#161 Search IVFSQHybrid crash on gpu
- \#169 IVF_FLAT search out of memory
## Feature
- \#90 The server start error messages could be improved to enhance user experience
- \#104 test_scheduler core dump
- \#115 Using new structure for tasktable
- \#139 New config option use_gpu_threshold
- \#146 Add only GPU and only CPU version for IVF_SQ8 and IVF_FLAT
- \#164 Add CPU version for building index
## Improvement
- \#64 Improvement dump function in scheduler
- \#80 Print version information into log during server start
- \#82 Move easyloggingpp into "external" directory
- \#92 Speed up CMake build process
- \#96 Remove .a file in milvus/lib for docker-version
- \#118 Using shared_ptr instead of weak_ptr to avoid performance loss
- \#122 Add unique id for Job
- \#130 Set task state MOVED after resource copy it completed
- \#149 Improve large query optimizer pass
- \#156 Not return error when search_resources and index_build_device set cpu
- \#159 Change the configuration name from 'use_gpu_threshold' to 'gpu_search_threshold'
- \#168 Improve result reduce
- \#175 add invalid config unittest
## Task
# Milvus 0.5.0 (2019-10-21)
## Bug
- MS-568 Fix gpuresource free error
- MS-572 Milvus crash when get SIGINT
- MS-577 Unittest Query randomly hung
- MS-587 Count get wrong result after adding vectors and index built immediately
- MS-599 Search wrong result when table created with metric_type: IP
- MS-601 Docker logs error caused by get CPUTemperature error
- MS-605 Server going down during searching vectors
- MS-620 Get table row counts display wrong error code
- MS-622 Delete vectors should be failed if date range is invalid
- MS-624 Search vectors failed if time ranges long enough
- MS-637 Out of memory when load too many tasks
- MS-639 SQ8H index created failed and server hang
- MS-640 Cache object size calculate incorrect
- MS-641 Segment fault(signal 11) in PickToLoad
- MS-644 Search crashed with index-type: flat
- MS-647 grafana display average cpu-temp
- MS-652 IVFSQH quantization double free
- MS-650 SQ8H index create issue
- MS-653 When config check fail, Milvus close without message
- MS-654 Describe index timeout when building index
- MS-658 Fix SQ8 Hybrid can't search
- MS-665 IVF_SQ8H search crash when no GPU resource in search_resources
- \#9 Change default gpu_cache_capacity to 4
- \#20 C++ sdk example get grpc error
- \#23 Add unittest to improve code coverage
- \#31 make clang-format failed after run build.sh -l
- \#39 Create SQ8H index hang if using github server version
- \#30 Some troubleshoot messages in Milvus do not provide enough information
- \#48 Config unittest failed
- \#59 Topk result is incorrect for small dataset
## Improvement
- MS-552 Add and change the easylogging library
- MS-553 Refine cache code
- MS-555 Remove old scheduler
- MS-556 Add Job Definition in Scheduler
- MS-557 Merge Log.h
- MS-558 Refine status code
- MS-562 Add JobMgr and TaskCreator in Scheduler
- MS-566 Refactor cmake
- MS-574 Milvus configuration refactor
- MS-578 Make sure milvus5.0 don't crack 0.3.1 data
- MS-585 Update namespace in scheduler
- MS-606 Speed up result reduce
- MS-608 Update TODO names
- MS-609 Update task construct function
- MS-611 Add resources validity check in ResourceMgr
- MS-619 Add optimizer class in scheduler
- MS-626 Refactor DataObj to support cache any type data
- MS-648 Improve unittest
- MS-655 Upgrade SPTAG
- \#42 Put union of index_build_device and search resources to gpu_pool
- \#67 Avoid linking targets multiple times in cmake
## Feature
- MS-614 Preload table at startup
- MS-627 Integrate new index: IVFSQHybrid
- MS-631 IVFSQ8H Index support
- MS-636 Add optimizer in scheduler for FAISS_IVFSQ8H
## Task
- MS-554 Change license to Apache 2.0
- MS-561 Add contributing guidelines, code of conduct and README docs
- MS-567 Add NOTICE.md
- MS-569 Complete the NOTICE.md
- MS-575 Add Clang-format & Clang-tidy & Cpplint
- MS-586 Remove BUILD_FAISS_WITH_MKL option
- MS-590 Refine cmake code to support cpplint
- MS-600 Reconstruct unittest code
- MS-602 Remove zilliz namespace
- MS-610 Change error code base value from hex to decimal
- MS-624 Re-organize project directory for open-source
- MS-635 Add compile option to support customized faiss
- MS-660 add ubuntu_build_deps.sh
- \#18 Add all test cases
# Milvus 0.4.0 (2019-09-12)
## Bug
- MS-119 The problem of combining the log files
- MS-121 The problem that user can't change the time zone
- MS-411 Fix metric unittest linking error
- MS-412 Fix gpu cache logical error
- MS-416 ExecutionEngineImpl::GpuCache has not return value cause crash
- MS-417 YAML sequence load disable cause scheduler startup failed
- MS-413 Create index failed and server exited
- MS-427 Describe index error after drop index
- MS-432 Search vectors params nprobe need to check max number
- MS-431 Search vectors params nprobe: 0/-1, expected result: raise exception
- MS-331 Crate Table : when table exists, error code is META_FAILED(code=15) rather than ILLEGAL TABLE NAME(code=9))
- MS-430 Search no result if index created with FLAT
- MS-443 Create index hang again
- MS-436 Delete vectors failed if index created with index_type: IVF_FLAT/IVF_SQ8
- MS-449 Add vectors twice success, once with ids, the other no ids
- MS-450 server hang after run stop_server.sh
- MS-458 Keep building index for one file when no gpu resource
- MS-461 Mysql meta unittest failed
- MS-462 Run milvus server twices, should display error
- MS-463 Search timeout
- MS-467 mysql db test failed
- MS-470 Drop index success, which table not created
- MS-471 code coverage run failed
- MS-492 Drop index failed if index have been created with index_type: FLAT
- MS-493 Knowhere unittest crash
- MS-453 GPU search error when nprobe set more than 1024
- MS-474 Create index hang if use branch-0.3.1 server config
- MS-510 unittest out of memory and crashed
- MS-507 Dataset 10m-512, index type sq8performance in-normal when set CPU_CACHE to 16 or 64
- MS-543 SearchTask fail without exception
- MS-582 grafana displays changes frequently
## Improvement
- MS-327 Clean code for milvus
- MS-336 Scheduler interface
- MS-344 Add TaskTable Test
- MS-345 Add Node Test
- MS-346 Add some implementation of scheduler to solve compile error
- MS-348 Add ResourceFactory Test
- MS-350 Remove knowhere submodule
- MS-354 Add task class and interface in scheduler
- MS-355 Add copy interface in ExcutionEngine
- MS-357 Add minimum schedule function
- MS-359 Add cost test in new scheduler
- MS-361 Add event in resource
- MS-364 Modify tasktableitem in tasktable
- MS-365 Use tasktableitemptr instead in event
- MS-366 Implement TaskTable
- MS-368 Implement cost.cpp
- MS-371 Add TaskTableUpdatedEvent
- MS-373 Add resource test
- MS-374 Add action definition
- MS-375 Add Dump implementation for Event
- MS-376 Add loader and executor enable flag in Resource avoid diskresource execute task
- MS-377 Improve process thread trigger in ResourceMgr, Scheduler and TaskTable
- MS-378 Debug and Update normal_test in scheduler unittest
- MS-379 Add Dump implementation in Resource
- MS-380 Update resource loader and executor, work util all finished
- MS-383 Modify condition variable usage in scheduler
- MS-384 Add global instance of ResourceMgr and Scheduler
- MS-389 Add clone interface in Task
- MS-390 Update resource construct function
- MS-391 Add PushTaskToNeighbourHasExecutor action
- MS-394 Update scheduler unittest
- MS-400 Add timestamp record in task state change function
- MS-402 Add dump implementation for TaskTableItem
- MS-406 Add table flag for meta
- MS-403 Add GpuCacheMgr
- MS-404 Release index after search task done avoid memory increment continues
- MS-405 Add delete task support
- MS-407 Reconstruct MetricsCollector
- MS-408 Add device_id in resource construct function
- MS-409 Using new scheduler
- MS-413 Remove thrift dependency
- MS-410 Add resource config comment
- MS-414 Add TaskType in Scheduler::Task
- MS-415 Add command tasktable to dump all tasktables
- MS-418 Update server_config.template file, set CPU compute only default
- MS-419 Move index_file_size from IndexParam to TableSchema
- MS-421 Add TaskLabel in scheduler
- MS-422 Support DeleteTask in Multi-GpuResource case
- MS-428 Add PushTaskByDataLocality in scheduler
- MS-440 Add DumpTaskTables in sdk
- MS-442 Merge Knowhere
- MS-445 Rename CopyCompleted to LoadCompleted
- MS-451 Update server_config.template file, set GPU compute default
- MS-455 Distribute tasks by minimal cost in scheduler
- MS-460 Put transport speed as weight when choosing neighbour to execute task
- MS-459 Add cache for pick function in tasktable
- MS-476 Improve search performance
- MS-482 Change search stream transport to unary in grpc
- MS-487 Define metric type in CreateTable
- MS-488 Improve code format in scheduler
- MS-495 cmake: integrated knowhere
- MS-496 Change the top_k limitation from 1024 to 2048
- MS-502 Update tasktable_test in scheduler
- MS-504 Update node_test in scheduler
- MS-505 Install core unit test and add to coverage
- MS-508 Update normal_test in scheduler
- MS-532 Add grpc server unittest
- MS-511 Update resource_test in scheduler
- MS-517 Update resource_mgr_test in scheduler
- MS-518 Add schedinst_test in scheduler
- MS-519 Add event_test in scheduler
- MS-520 Update resource_test in scheduler
- MS-524 Add some unittest in event_test and resource_test
- MS-525 Disable parallel reduce in SearchTask
- MS-527 Update scheduler_test and enable it
- MS-528 Hide some config used future
- MS-530 Add unittest for SearchTask->Load
- MS-531 Disable next version code
- MS-533 Update resource_test to cover dump function
- MS-523 Config file validation
- MS-539 Remove old task code
- MS-546 Add simple mode resource_config
- MS-570 Add prometheus docker-compose file
- MS-576 Scheduler refactor
- MS-592 Change showtables stream transport to unary
## Feature
- MS-343 Implement ResourceMgr
- MS-338 NewAPI: refine code to support CreateIndex
- MS-339 NewAPI: refine code to support DropIndex
- MS-340 NewAPI: implement DescribeIndex
## Task
- MS-297 disable mysql unit test
# Milvus 0.3.1 (2019-07-10)
## Bug
- MS-148 Disable cleanup if mode is read only
- MS-149 Fixed searching only one index file issue in distributed mode
- MS-153 Fix c_str error when connecting to MySQL
- MS-157 Fix changelog
- MS-190 Use env variable to switch mem manager and fix cmake
- MS-217 Fix SQ8 row count bug
- MS-224 Return AlreadyExist status in MySQLMetaImpl::CreateTable if table already exists
- MS-232 Add MySQLMetaImpl::UpdateTableFilesToIndex and set maximum_memory to default if config value = 0
- MS-233 Remove mem manager log
- MS-230 Change parameter name: Maximum_memory to insert_buffer_size
- MS-234 Some case cause background merge thread stop
- MS-235 Some test cases random fail
- MS-236 Add MySQLMetaImpl::HasNonIndexFiles
- MS-257 Update bzip2 download url
- MS-288 Update compile scripts
- MS-330 Stability test failed caused by server core dumped
- MS-347 Build index hangs again
- MS-382 fix MySQLMetaImpl::CleanUpFilesWithTTL unknown column bug
## Improvement
- MS-156 Add unittest for merge result functions
- MS-152 Delete assert in MySQLMetaImpl and change MySQLConnectionPool impl
- MS-204 Support multi db_path
- MS-206 Support SQ8 index type
- MS-208 Add buildinde interface for C++ SDK
- MS-212 Support Inner product metric type
- MS-241 Build Faiss with MKL if using Intel CPU; else build with OpenBlas
- MS-242 Clean up cmake and change MAKE_BUILD_ARGS to be user defined variable
- MS-245 Improve search result transfer performance
- MS-248 Support AddVector/SearchVector profiling
- MS-256 Add more cache config
- MS-260 Refine log
- MS-249 Check machine hardware during initialize
- MS-261 Update faiss version to 1.5.3 and add BUILD_FAISS_WITH_MKL as an option
- MS-266 Improve topk reduce time by using multi-threads
- MS-275 Avoid sqlite logic error excetion
- MS-278 add IndexStatsHelper
- MS-313 add GRPC
- MS-325 add grpc status return for C++ sdk and modify some format
- MS-278 Add IndexStatsHelper
- MS-312 Set openmp thread number by config
- MS-305 Add CPU core percent metric
- MS-310 Add milvus CPU utilization ratio and CPU/GPU temperature metrics
- MS-324 Show error when there is not enough gpu memory to build index
- MS-328 Check metric type on server start
- MS-332 Set grpc and thrift server run concurrently
- MS-352 Add hybrid index
## Feature
- MS-180 Add new mem manager
- MS-195 Add nlist and use_blas_threshold conf
- MS-137 Integrate knowhere
## Task
- MS-125 Create 0.3.1 release branch
- MS-306 Optimize build efficiency
# Milvus 0.3.0 (2019-06-30)
## Bug
- MS-104 Fix unittest lcov execution error
- MS-102 Fix build script file condition error
- MS-80 Fix server hang issue
- MS-89 Fix compile failed, libgpufaiss.a link missing
- MS-90 Fix arch match incorrect on ARM
- MS-99 Fix compilation bug
- MS-110 Avoid huge file size
## Improvement
- MS-82 Update server startup welcome message
- MS-83 Update vecwise to Milvus
- MS-77 Performance issue of post-search action
- MS-22 Enhancement for MemVector size control
- MS-92 Unify behavior of debug and release build
- MS-98 Install all unit test to installation directory
- MS-115 Change is_startup of metric_config switch from true to on
- MS-122 Archive criteria config
- MS-124 HasTable interface
- MS-126 Add more error code
- MS-128 Change default db path
## Feature
- MS-57 Implement index load/search pipeline
- MS-56 Add version information when server is started
- MS-64 Different table can have different index type
- MS-52 Return search score
- MS-66 Support time range query
- MS-68 Remove rocksdb from third-party
- MS-70 cmake: remove redundant libs in src
- MS-71 cmake: fix faiss dependency
- MS-72 cmake: change prometheus source to git
- MS-73 cmake: delete civetweb
- MS-65 Implement GetTableRowCount interface
- MS-45 Implement DeleteTable interface
- MS-75 cmake: change faiss version to 1.5.2; add CUDA gencode
- MS-81 Fix faiss ptx issue; change cuda gencode
- MS-84 cmake: add arrow, jemalloc and jsoncons third party; default build option OFF
- MS-85 add NetIO metric
- MS-96 add new query interface for specified files
- MS-97 Add S3 SDK for MinIO Storage
- MS-105 Add MySQL
- MS-130 Add prometheus_test
- MS-144 Add nprobe config
- MS-147 Enable IVF
- MS-130 Add prometheus_test
## Task
- MS-74 Change README.md in cpp
- MS-88 Add support for arm architecture
# Milvus 0.2.0 (2019-05-31)
## Bug
- MS-32 Fix thrift error
- MS-34 Fix prometheus-cpp thirdparty
- MS-67 Fix license check bug
- MS-76 Fix pipeline crash bug
- MS-100 CMake: fix AWS build issue
- MS-101 Change AWS build type to Release
## Improvement
- MS-20 Clean Code Part 1
## Feature
- MS-5 Implement Auto Archive Feature
- MS-6 Implement SDK interface part 1
- MS-16 Implement metrics without prometheus
- MS-21 Implement SDK interface part 2
- MS-26 CMake. Add thirdparty packages
- MS-31 CMake: add prometheus
- MS-33 CMake: add -j4 to make third party packages build faster
- MS-27 Support gpu config and disable license build config in cmake
- MS-47 Add query vps metrics
- MS-37 Add query, cache usage, disk write speed and file data size metrics
- MS-30 Use faiss v1.5.2
- MS-54 CMake: Change Thrift third party URL to github.com
- MS-69 Prometheus: add all proposed metrics
## Task
- MS-1 Add CHANGELOG.md
- MS-4 Refactor the vecwise_engine code structure
- MS-62 Search range to all if no date specified

View File

@ -1,9 +0,0 @@
# Each line is a component followed by one or more owners.
* @JinHai-CN
/sdk @scsven
/shards @XuPeng-SH
/core/ @scsven
/core/src/db/meta @yhmo
/core/src/index/knowhere @cydrain
/core/src/index/ @shengjun1985

View File

@ -1,48 +0,0 @@
# Milvus Code of Conduct
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment include:
- Using welcoming and inclusive language.
- Being respectful of differing viewpoints and experiences.
- Gracefully accepting constructive criticism.
- Focusing on what is best for the community.
- Showing empathy towards other community members.
Examples of unacceptable behavior by participants include:
- The use of sexualized language or imagery and unwelcome sexual attention or advances.
- Trolling, insulting/derogatory comments, and personal or political attacks.
- Public or private harassment.
- Publishing others' private information, such as a physical or electronic address, without explicit permission.
- Conduct which could reasonably be considered inappropriate for the forum in which it occurs.
All Milvus forums and spaces are meant for professional interactions, and any behavior which could reasonably be considered inappropriate in a professional setting is unacceptable.
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
## Scope
This Code of Conduct applies to all content on milvus.io, Milvuss GitHub organization, or any other official Milvus web presence allowing for community interactions, as well as at all official Milvus events, whether offline or online.
The Code of Conduct also applies within all project spaces and in public spaces whenever an individual is representing Milvus or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed or de facto representative at an online or offline event. Representation of Milvus may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at conduct@lfai.foundation. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org/), version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
For answers to common questions about this code of conduct, see https://www.contributor-covenant.org/faq

View File

@ -1,99 +0,0 @@
# Community Roles
<!-- TOC -->
- [TSC Members](#tsc-members)
- [Committers](#committers)
- [Reviewers](#reviewers)
- [Contributors](#contributors)
<!-- /TOC -->
## TSC Members
The Technical Steering Committee (TSC) functions as the core management team that oversees the Milvus community. The TSC decides the roadmap of the project and makes major decisions related to the community.
TSC members have the following responsibilities:
- Coordinate the technical direction of the project.
- Approve project or system proposals.
- Decide on formal releases of the project's software.
- Organize and remove sub-projects.
- Coordinate any marketing, events, or communications regarding the project.
To become a TSC member, a committer must be sponsored by a TSC member and the sponsorship must be approved by 2/3 of all TSC members.
The TSC members are listed below:
- [cxie](https://github.com/cxie)
- [JinHai-CN](https://github.com/JinHai-CN)
## Committers
Committers lead project feature discussions and oversee the overall project quality.
Committers have the following responsibilities:
- Lead feature design discussions and implementation.
- Ensure the overall project quality and approve PRs.
- Participate in product release, feature planning, and roadmap design.
- Have a constructive and friendly attitude in all community interactions.
- Mentor reviewers and contributors.
To become a committer, a reviewer must have contributed broadly throughout the Milvus project. A reviewer must also be sponsored by a committer and the sponsorship must be approved by the TSC.
The committers are listed below:
- [JinHai-CN](https://github.com/JinHai-CN)
## Reviewers
Reviewers review new code contributions and ensure the quality of existing code.
Reviewers have the following responsibilities:
- Participate in feature design discussion and implementation.
- Ensure the quality of owned code modules.
- Ensure the technical accuracy of documentation.
- Quickly respond to issues and PRs and conduct code reviews.
To become a reviewer, a contributor must have provided continued and quality contribution to the Milvus project for at least 6 months and have contributed at least one major component where the contributor has taken an ownership role.
The reviewers are listed below:
- [XuPeng-SH](https://github.com/XuPeng-SH)
- [yhmo](https://github.com/yhmo)
- [scsven](https://github.com/scsven)
- [cydrain](https://github.com/cydrain)
- [shengjun1985](https://github.com/shengjun1985)
## Contributors
Contributors can be anyone who has successfully submitted at least one PR to the Milvus project.
The contributors are listed below:
- [ZhifengZhang-CN](https://github.com/ZhifengZhang-CN)
- [tinkerlin](https://github.com/tinkerlin)
- [youny626](https://github.com/youny626)
- [fishpenguin](https://github.com/fishpenguin)
- [BossZou](https://github.com/BossZou)
- [del-zhenwu](https://github.com/del-zhenwu)
- [jielinxu](https://github.com/jielinxu)
- [yamasite](https://github.com/yamasite)
- [Yukikaze-CZR](https://github.com/Yukikaze-CZR)
- [Heisenberg-Y](https://github.com/Heisenberg-Y)
- [sahuang](https://github.com/sahuang)
- [op-hunter](https://github.com/op-hunter)
- [GuanyunFeng](https://github.com/GuanyunFeng)
- [thywdy](https://github.com/thywdy)
- [erdustiggen](https://github.com/erdustiggen)
- [akihoni](https://github.com/akihoni)
- [shiyu22](https://github.com/shiyu22)
- [shengjh](https://github.com/shengjh)
- [dvzubarev](https://github.com/dvzubarev)
- [aaronjin2010](https://github.com/aaronjin2010)
- [ReigenAraka](https://github.com/ReigenAraka)
- [JackLCL](https://github.com/JackLCL)
- [Bennu-Li](https://github.com/Bennu-Li)
- [ABNER-1](https://github.com/ABNER-1)

View File

@ -1,135 +0,0 @@
# Contributing to Milvus
First of all, thanks for taking the time to contribute to Milvus! It's people like you that help Milvus come to fruition. :tada:
The following are a set of guidelines for contributing to Milvus. Following these guidelines helps contributing to this project easy and transparent. These are mostly guideline, not rules. Use your best judgment, and feel free to propose changes to this document in a pull request.
As for everything else in the project, the contributions to Milvus are governed by our [Code of Conduct](CODE_OF_CONDUCT.md).
## Contribution Checklist
Before you make any contributions, make sure you follow this list.
- Read [Contributing to Milvus](CONTRIBUTING.md).
- Check if the changes are consistent with the [coding style](CONTRIBUTING.md#coding-style), and format your code accordingly.
- Run [unit tests](CONTRIBUTING.md#run-unit-test-with-code-coverage) and check your code coverage rate.
## What contributions can I make?
Contributions to Milvus fall into the following categories.
1. To report a bug or a problem with documentation, please file an [issue](https://github.com/milvus-io/milvus/issues/new/choose) providing the details of the problem. If you believe the issue needs priority attention, please comment on the issue to notify the team.
2. To propose a new feature, please file a new feature request [issue](https://github.com/milvus-io/milvus/issues/new/choose). Describe the intended feature and discuss the design and implementation with the team and community. Once the team agrees that the plan looks good, go ahead and implement it, following the [Contributing code](CONTRIBUTING.md#contributing-code).
3. To implement a feature or bug-fix for an existing outstanding issue, follow the [Contributing code](CONTRIBUTING.md#contributing-code). If you need more context on a particular issue, comment on the issue to let people know.
## How can I contribute?
### Contributing code
If you have improvements to Milvus, send us your pull requests! For those just getting started, see [GitHub workflow](#github-workflow). Make sure to refer to the related issue in your pull request's comment and update [CHANGELOG.md](CHANGELOG.md).
The Milvus team members will review your pull requests, and once it is accepted, the status of the projects to which it is associated will be changed to **Reviewer approved**. This means we are working on submitting your pull request to the internal repository. After the change has been submitted internally, your pull request will be merged automatically on GitHub.
### GitHub workflow
Please create a new branch from an up-to-date master on your fork.
1. Fork the repository on GitHub.
2. Clone your fork to your local machine with `git clone git@github.com:<yourname>/milvus-io/milvus.git`.
3. Create a branch with `git checkout -b my-topic-branch`.
4. Make your changes, commit, then push to to GitHub with `git push --set-upstream origin my-topic-branch`. You must record your changes in [CHANGELOG.md](CHANGELOG.md) with issue numbers and descriptions.
5. Visit GitHub and make your pull request.
If you have an existing local repository, please update it before you start, to minimize the chance of merge conflicts.
```shell
git remote add upstream git@github.com:milvus-io/milvus.git
git checkout master
git pull upstream master
git checkout -b my-topic-branch
```
### General guidelines
Before sending your pull requests for review, make sure your changes are consistent with the guidelines and follow the Milvus coding style.
- Include unit tests when you contribute new features, as they help to prove that your code works correctly, and also guard against future breaking changes to lower the maintenance cost.
- Bug fixes also require unit tests, because the presence of bugs usually indicates insufficient test coverage.
- Keep API compatibility in mind when you change code in Milvus. Reviewers of your pull request will comment on any API compatibility issues.
- When you contribute a new feature to Milvus, the maintenance burden is (by default) transferred to the Milvus team. This means that the benefit of the contribution must be compared against the cost of maintaining the feature.
### Developer Certificate of Origin (DCO)
All contributions to this project must be accompanied by acknowledgment of, and agreement to, the [Developer Certificate of Origin](https://developercertificate.org/). Acknowledgment of and agreement to the Developer Certificate of Origin _must_ be included in the comment section of each contribution and _must_ take the form of `Signed-off-by: {{Full Name}} <{{email address}}>` (without the `{}`). Contributions without this acknowledgment will be required to add it before being accepted. If contributors are unable or unwilling to agree to the Developer Certificate of Origin, their contribution will not be included.
Contributors sign-off that they adhere to DCO by adding the following Signed-off-by line to commit messages:
```text
This is my commit message
Signed-off-by: Random J Developer <random@developer.example.org>
```
Git also has a `-s` command line option to append this automatically to your commit message:
```shell
$ git commit -s -m 'This is my commit message'
```
## Coding Style
The coding style used in Milvus generally follow [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html).
And we made the following changes based on the guide:
- 4 spaces for indentation
- Adopt .cpp file extension instead of .cc extension
- 120-character line length
- Camel-Cased file names
### Format code
Install clang-format
```shell
$ sudo apt-get install clang-format
$ rm cmake_build/CMakeCache.txt
```
Check code style
```shell
$ ./build.sh -l
```
To format the code
```shell
$ cd cmake_build
$ make clang-format
```
## Run unit test with code coverage
Before submitting your PR, make sure you have run unit test, and your code coverage rate is >= 90%.
Install lcov
```shell
$ sudo apt-get install lcov
```
Run unit test and generate code for code coverage check
```shell
$ ./build.sh -u -c
```
Run MySQL docker
```shell
docker pull mysql:latest
docker run -p 3306:3306 -e MYSQL_ROOT_PASSWORD=123456 -d mysql:latest
```
Run code coverage
```shell
$ ./coverage.sh -u root -p 123456 -t 127.0.0.1
```
Or start your own MySQL server, and then run code coverage
```shell
$ ./coverage.sh -u ${MYSQL_USERNAME} -p ${MYSQL_PASSWORD} -t ${MYSQL_SERVER_IP}
```

View File

@ -1,13 +0,0 @@
# Milvus Design Documents
If you would like to propose a new feature for Milvus, it is recommended that you submit a design document following the [design document template](https://docs.google.com/document/d/1KhWs9b53K6WQUZ_FGWhIaeTraaSqsLQF7v2v68sAh_c/edit?usp=sharing).
The following list contains existing design documents for Milvus.
- [Support DSL interface](https://docs.google.com/document/d/1U83LY36TyaG3WD67Q9HWg9saD3qQcz9BfMcEScgwQPM/edit?usp=sharing)
- [Support delete/search by ID, attribute filtering, ID de-duplication](https://docs.google.com/document/d/1CDKdTj_DnE90YaZrPgsMaphqOTkMdbKETNrsFKj_Bco/edit?usp=sharing)
- [Support write-ahead logging](https://docs.google.com/document/d/12N8RC_wJb2dvEKY9jrlh8hU_eH8jxQVBewoPuHNqcXE/edit?usp=sharing)
- [Support in-service config modification](https://docs.google.com/document/d/1pK1joWJgAHM5nVp3q005iLbLqU5bn9InWeBy0mRAoSg/edit?usp=sharing)
- [Support Multi-Storage](https://docs.google.com/document/d/1iwwLH4Jtm3OXIVb7jFYsfmcbOyX6AWZKaNJAaXC7-cw/edit?usp=sharing)
- [Support AVX-512](https://docs.google.com/document/d/1do6_JgRCYdcV95sTPE6rLoiBK8wAcZki5Ypp7jbgqK0/edit?usp=sharing)
- [Refactor Knowhere](https://docs.google.com/document/d/1HY27EXV4UjJhDEmJ9t4Rjh7I1sB8iJHvqvliM6HHLS8/edit?usp=sharing)

View File

@ -1,282 +0,0 @@
# Install Milvus from Source Code
<!-- TOC -->
- [Build from source](#build-from-source)
- [Requirements](#requirements)
- [Compilation](#compilation)
- [Launch Milvus server](#launch-milvus-server)
- [Compile Milvus on Docker](#compile-milvus-on-docker)
- [Step 1 Pull Milvus Docker images](#step-1-pull-milvus-docker-images)
- [Step 2 Start the Docker container](#step-2-start-the-docker-container)
- [Step 3 Download Milvus source code](#step-3-download-milvus-source-code)
- [Step 4 Compile Milvus in the container](#step-4-compile-milvus-in-the-container)
- [Troubleshooting](#troubleshooting)
- [Error message: `protocol https not supported or disabled in libcurl`](#error-message-protocol-https-not-supported-or-disabled-in-libcurl)
- [Error message: `internal compiler error`](#error-message-internal-compiler-error)
- [Error message: `error while loading shared libraries: libmysqlpp.so.3`](#error-message-error-while-loading-shared-libraries-libmysqlppso3)
- [CMake version is not supported](#cmake-version-is-not-supported)
<!-- /TOC -->
## Build from source
### Requirements
- Operating system
- Ubuntu 18.04 or higher
- CentOS 7
> Note: If your Linux operating system does not meet the requirements, we recommend that you pull a Docker image of [Ubuntu 18.04](https://docs.docker.com/install/linux/docker-ce/ubuntu/) or [CentOS 7](https://docs.docker.com/install/linux/docker-ce/centos/) as your compilation environment.
- GCC 7.0 or higher to support C++ 17
- CMake 3.12 or higher
- Git
For GPU-enabled version, you will also need:
- CUDA 10.0 or higher
- NVIDIA driver 418 or higher
### Compilation
#### Step 1 Install dependencies
##### Install in Ubuntu
```shell
$ cd [Milvus root path]/core
$ ./ubuntu_build_deps.sh
```
##### Install in CentOS
```shell
$ cd [Milvus root path]/core
$ ./centos7_build_deps.sh
```
#### Step 2 Build
```shell
$ cd [Milvus root path]/core
$ ./build.sh -t Debug
```
or
```shell
$ ./build.sh -t Release
```
By default, it will build CPU-only version. To build GPU version, add `-g` option.
```shell
$ ./build.sh -g
```
If you want to know the complete build options, run the following command.
```shell
$./build.sh -h
```
When the build is completed, everything that you need in order to run Milvus will be installed under `[Milvus root path]/core/milvus`.
### Launch Milvus server
```shell
$ cd [Milvus root path]/core/milvus
```
Add `lib/` directory to `LD_LIBRARY_PATH`
```shell
$ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:[Milvus root path]/core/milvus/lib
```
Then start Milvus server:
```shell
$ cd scripts
$ ./start_server.sh
```
To stop Milvus server, run:
```shell
$ ./stop_server.sh
```
## Compile Milvus on Docker
With the following Docker images, you should be able to compile Milvus on any Linux platform that runs Docker. To build a GPU supported Milvus, you need to install [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker/) first.
### Step 1 Pull Milvus Docker images
Pull CPU-only image:
```shell
$ docker pull milvusdb/milvus-cpu-build-env:latest
```
Pull GPU-enabled image:
```shell
$ docker pull milvusdb/milvus-gpu-build-env:latest
```
### Step 2 Start the Docker container
Start a CPU-only container:
```shell
$ docker run -it -p 19530:19530 -d milvusdb/milvus-cpu-build-env:latest
```
Start a GPU container:
- For nvidia docker 2:
```shell
$ docker run --runtime=nvidia -it -p 19530:19530 -d milvusdb/milvus-gpu-build-env:latest
```
- For nvidia container toolkit:
```shell
docker run --gpus all -it -p 19530:19530 -d milvusdb/milvus-gpu-build-env:latest
```
To enter the container:
```shell
$ docker exec -it [container_id] bash
```
### Step 3 Download Milvus source code
Download latest Milvus source code:
```shell
$ cd /home
$ git clone https://github.com/milvus-io/milvus
```
To enter its core directory:
```shell
$ cd ./milvus/core
```
### Step 4 Compile Milvus in the container
If you are using a CPU-only image:
1. run `build.sh`:
```shell
$ ./build.sh -t Release
```
2. Start Milvus server
```shell
$ ./start_server.sh
```
If you are using a GPU-enabled image:
1. Add cuda library path to `LD_LIBRARY_PATH`:
```shell
$ export LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH
```
2. Add cuda binary path to `PATH`:
```shell
$ export PATH=/usr/local/cuda/bin:$PATH
```
3. Add a `-g` parameter to run `build.sh`:
```shell
$ ./build.sh -g -t Release
```
4. Start Milvus server
```shell
$ ./start_server.sh
```
## Troubleshooting
### Error message: `protocol https not supported or disabled in libcurl`
Follow the steps below to solve this problem:
1. Make sure you have `libcurl4-openssl-dev` installed in your system.
2. Try reinstalling the latest CMake from source with `--system-curl` option:
```shell
$ ./bootstrap --system-curl
$ make
$ sudo make install
```
If the `--system-curl` command doesn't work, you can also reinstall CMake in **Ubuntu Software** on your local computer.
### Error message: `internal compiler error`
Try increasing the memory allocated to Docker. If this doesn't work, you can reduce the number of threads in CMake build in `[Milvus root path]/core/build.sh`.
```shell
make -j 8 install || exit 1 # The default number of threads is 8.
```
Note: You might also need to configure CMake build for faiss in `[Milvus root path]/core/src/index/thirdparty/faiss`.
### Error message: `error while loading shared libraries: libmysqlpp.so.3`
Follow the steps below to solve this problem:
1. Check whether `libmysqlpp.so.3` is correctly installed.
2. If `libmysqlpp.so.3` is installed, check whether it is added to `LD_LIBRARY_PATH`.
### CMake version is not supported
Follow the steps below to install a supported version of CMake:
1. Remove the unsupported version of CMake.
2. Get CMake 3.12 or higher. Here we get CMake 3.12.
```shell
$ wget https://cmake.org/files/v3.12/cmake-3.12.2-Linux-x86_64.tar.gz
```
3. Extract the file and install CMake.
```shell
$ tar zxvf cmake-3.12.2-Linux-x86_64.tar.gz
$ mv cmake-3.12.2-Linux-x86_64 /opt/cmake-3.12.2
$ ln -sf /opt/cmake-3.12.2/bin/* /usr/bin/
```

201
LICENSE
View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,24 +0,0 @@
| Name | License |
| ---------------- | -------------------------------------------------------------------------------------------------------------------------------|
| Boost | [Boost Software License](https://github.com/boostorg/boost/blob/master/LICENSE_1_0.txt) |
| FAISS | [MIT](https://github.com/facebookresearch/faiss/blob/master/LICENSE) |
| Gtest | [BSD 3-Clause](https://github.com/google/googletest/blob/master/LICENSE) |
| LAPACK | [LAPACK](https://github.com/Reference-LAPACK/lapack/blob/master/LICENSE) |
| MySQLPP | [LGPL 2.1](https://tangentsoft.com/mysqlpp/artifact/b128a66dab867923) |
| OpenBLAS | [BSD 3-Clause](https://github.com/xianyi/OpenBLAS/blob/develop/LICENSE) |
| Prometheus | [Apache 2.0](https://github.com/prometheus/prometheus/blob/master/LICENSE) |
| SQLite | [Public Domain](https://www.sqlite.org/copyright.html) |
| SQLite-ORM | [BSD 3-Clause](https://github.com/fnc12/sqlite_orm/blob/master/LICENSE) |
| yaml-cpp | [MIT](https://github.com/jbeder/yaml-cpp/blob/master/LICENSE) |
| ZLIB | [ZLIB](http://zlib.net/zlib_license.html) |
| libunwind | [MIT](https://github.com/libunwind/libunwind/blob/master/LICENSE) |
| gperftools | [BSD 3-Clause](https://github.com/gperftools/gperftools/blob/master/COPYING) |
| grpc | [Apache 2.0](https://github.com/grpc/grpc/blob/master/LICENSE) |
| EASYLOGGINGPP | [MIT](https://github.com/zuhd-org/easyloggingpp/blob/master/LICENSE) |
| Json | [MIT](https://github.com/nlohmann/json/blob/develop/LICENSE.MIT) |
| opentracing-cpp | [Apache 2.0](https://github.com/opentracing/opentracing-cpp/blob/master/LICENSE) |
| libfiu | [BOLA](https://github.com/albertito/libfiu/blob/master/LICENSE) |
| aws-sdk-cpp | [Apache 2.0](https://github.com/aws/aws-sdk-cpp/blob/master/LICENSE) |
| SPTAG | [MIT](https://github.com/microsoft/SPTAG/blob/master/LICENSE) |
| hnswlib | [Apache 2.0](https://github.com/nmslib/hnswlib/blob/master/LICENSE) |
| annoy | [Apache 2.0](https://github.com/spotify/annoy/blob/master/LICENSE) |

View File

@ -1,2 +0,0 @@
osslifecycle=active

View File

@ -1,92 +0,0 @@
![Milvuslogo](https://github.com/milvus-io/docs/blob/master/assets/milvus_logo.png)
[![Slack](https://img.shields.io/badge/Join-Slack-orange)](https://join.slack.com/t/milvusio/shared_invite/zt-e0u4qu3k-bI2GDNys3ZqX1YCJ9OM~GQ)
![GitHub](https://img.shields.io/github/license/milvus-io/milvus)
![Docker pulls](https://img.shields.io/docker/pulls/milvusdb/milvus)
[![Build Status](http://internal.zilliz.com:18080/jenkins/job/milvus-ci/job/master/badge/icon)](http://internal.zilliz.com:18080/jenkins/job/milvus-ci/job/master/)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/3563/badge)](https://bestpractices.coreinfrastructure.org/projects/3563)
[![codecov](https://codecov.io/gh/milvus-io/milvus/branch/master/graph/badge.svg)](https://codecov.io/gh/milvus-io/milvus)
[![codebeat badge](https://codebeat.co/badges/e030a4f6-b126-4475-a938-4723d54ec3a7?style=plastic)](https://codebeat.co/projects/github-com-milvus-io-milvus-master)
[![CodeFactor Grade](https://www.codefactor.io/repository/github/milvus-io/milvus/badge)](https://www.codefactor.io/repository/github/milvus-io/milvus)
[![Codacy Badge](https://api.codacy.com/project/badge/Grade/c4bb2ccfb51b47f99e43bfd1705edd95)](https://app.codacy.com/gh/milvus-io/milvus?utm_source=github.com&utm_medium=referral&utm_content=milvus-io/milvus&utm_campaign=Badge_Grade_Dashboard)
English | [中文版](README_CN.md)
## What is Milvus
As an open source vector similarity search engine, Milvus is easy-to-use, highly reliable, scalable, robust, and blazing fast. Adopted by over 100 organizations and institutions worldwide, Milvus empowers applications in a variety of fields, including image processing, computer vision, natural language processing, voice recognition, recommender systems, drug discovery, etc.
Milvus has the following architecture:
![arch](https://github.com/milvus-io/docs/blob/v0.7.1/assets/milvus_arch.png)
For more detailed introduction of Milvus and its architecture, see [Milvus overview](https://www.milvus.io/docs/about_milvus/overview.md). Keep up-to-date with newest releases and latest updates by reading Milvus [release notes](https://www.milvus.io/docs/releases/release_notes.md).
Milvus is an [LF AI Foundation](https://lfai.foundation/) incubation project. Learn more at [lfai.foundation](https://lfai.foundation/).
## Get started
### Install Milvus
See the [Milvus install guide](https://www.milvus.io/docs/guides/get_started/install_milvus/install_milvus.md) to install Milvus using Docker. To install Milvus from source code, see [build from source](INSTALL.md).
### Try example programs
Try an example program with Milvus using [Python](https://www.milvus.io/docs/guides/get_started/example_code.md), [Java](https://github.com/milvus-io/milvus-sdk-java/tree/master/examples), [Go](https://github.com/milvus-io/milvus-sdk-go/tree/master/examples), or [C++ example code](https://github.com/milvus-io/milvus/tree/master/sdk/examples).
## Supported clients
- [Go](https://github.com/milvus-io/milvus-sdk-go)
- [Python](https://github.com/milvus-io/pymilvus)
- [Java](https://github.com/milvus-io/milvus-sdk-java)
- [C++](https://github.com/milvus-io/milvus/tree/master/sdk)
- [RESTful API](https://github.com/milvus-io/milvus/tree/master/core/src/server/web_impl)
- [Node.js](https://www.npmjs.com/package/@arkie-ai/milvus-client) (Provided by [arkie](https://www.arkie.cn/))
## Application scenarios
You can use Milvus to build intelligent systems in a variety of AI application scenarios. Refer to [Milvus Scenarios](https://milvus.io/scenarios) for live demos. You can also refer to [Milvus Bootcamp](https://github.com/milvus-io/bootcamp) for detailed solutions and application scenarios.
## Benchmark
See our [test reports](https://github.com/milvus-io/milvus/tree/master/docs) for more information about performance benchmarking of different indexes in Milvus.
## Roadmap
To learn what's coming up soon in Milvus, read our [Roadmap](https://github.com/milvus-io/milvus/projects).
It is a Work in Progress, and is subject to reasonable adjustments when necessary. And we greatly welcome any comments/requirements/suggestions regarding Milvus roadmap.:clap:
## Contribution guidelines
Contributions are welcomed and greatly appreciated. Please read our [contribution guidelines](CONTRIBUTING.md) for detailed contribution workflow. This project adheres to the [code of conduct](CODE_OF_CONDUCT.md) of Milvus. By participating, you are expected to uphold this code.
We use [GitHub issues](https://github.com/milvus-io/milvus/issues) to track issues and bugs. For general questions and public discussions, please join our community.
## Join our community
:heart:To connect with other users and contributors, welcome to join our [Slack channel](https://join.slack.com/t/milvusio/shared_invite/zt-e0u4qu3k-bI2GDNys3ZqX1YCJ9OM~GQ).
See our [community](https://github.com/milvus-io/community) repository to learn about our governance and access more community resources.
## Resources
- [Milvus.io](https://www.milvus.io)
- [Milvus FAQ](https://www.milvus.io/docs/faq/operational_faq.md)
- [Milvus Medium](https://medium.com/@milvusio)
- [Milvus CSDN](https://zilliz.blog.csdn.net/)
- [Milvus Twitter](https://twitter.com/milvusio)
- [Milvus Facebook](https://www.facebook.com/io.milvus.5)
- [Milvus design docs](DESIGN.md)
## License
[Apache License 2.0](LICENSE)

View File

@ -1,95 +0,0 @@
![Milvuslogo](https://github.com/milvus-io/docs/blob/master/assets/milvus_logo.png)
[![Slack](https://img.shields.io/badge/Join-Slack-orange)](https://join.slack.com/t/milvusio/shared_invite/zt-e0u4qu3k-bI2GDNys3ZqX1YCJ9OM~GQ)
![GitHub](https://img.shields.io/github/license/milvus-io/milvus)
![Docker pulls](https://img.shields.io/docker/pulls/milvusdb/milvus)
[![Build Status](http://internal.zilliz.com:18080/jenkins/job/milvus-ci/job/master/badge/icon)](http://internal.zilliz.com:18080/jenkins/job/milvus-ci/job/master/)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/3563/badge)](https://bestpractices.coreinfrastructure.org/projects/3563)
[![codecov](https://codecov.io/gh/milvus-io/milvus/branch/master/graph/badge.svg)](https://codecov.io/gh/milvus-io/milvus)
[![codebeat badge](https://codebeat.co/badges/e030a4f6-b126-4475-a938-4723d54ec3a7?style=plastic)](https://codebeat.co/projects/github-com-milvus-io-milvus-master)
[![Codacy Badge](https://api.codacy.com/project/badge/Grade/c4bb2ccfb51b47f99e43bfd1705edd95)](https://app.codacy.com/gh/milvus-io/milvus?utm_source=github.com&utm_medium=referral&utm_content=milvus-io/milvus&utm_campaign=Badge_Grade_Dashboard)
[English](README.md) | 中文版
# 欢迎来到 Milvus
## Milvus 是什么
Milvus 是一款开源的特征向量相似度搜索引擎具有使用方便、实用可靠、易于扩展、稳定高效和搜索迅速等特点在全球范围内被上百家组织和机构所采用。Milvus 已经被广泛应用于多个领域,其中包括图像处理、机器视觉、自然语言处理、语音识别、推荐系统以及新药发现等。
Milvus 的架构如下:
![arch](https://github.com/milvus-io/docs/raw/v0.7.1/assets/milvus_arch.png)
若要了解 Milvus 详细介绍和整体架构,请访问 [Milvus 简介](https://www.milvus.io/cn/docs/about_milvus/overview.md)。您可以通过 [版本发布说明](https://www.milvus.io/cn/docs/releases/release_notes.md) 获取最新版本的功能和更新。
Milvus是一个[LF AI基金会](https://lfai.foundation/)的孵化项目。获取更多,请访问[lfai.foundation](https://lfai.foundation/)。
## Milvus 快速上手
### 安装 Milvus
请参阅 [Milvus 安装指南](https://www.milvus.io/cn/docs/guides/get_started/install_milvus/install_milvus.md) 使用 Docker 容器安装 Milvus。若要基于源码编译请访问 [源码安装](INSTALL.md)。
### 尝试示例代码
您可以尝试用 [Python](https://www.milvus.io/cn/docs/guides/get_started/example_code.md)[Java](https://github.com/milvus-io/milvus-sdk-java/tree/master/examples)[Go](https://github.com/milvus-io/milvus-sdk-go/tree/master/examples),或者 [C++](https://github.com/milvus-io/milvus/tree/master/sdk/examples) 运行 Milvus 示例代码。
## 支持的客户端
- [Go](https://github.com/milvus-io/milvus-sdk-go)
- [Python](https://github.com/milvus-io/pymilvus)
- [Java](https://github.com/milvus-io/milvus-sdk-java)
- [C++](https://github.com/milvus-io/milvus/tree/master/sdk)
- [RESTful API](https://github.com/milvus-io/milvus/tree/master/core/src/server/web_impl)
- [Node.js](https://www.npmjs.com/package/@arkie-ai/milvus-client) (由 [arkie](https://www.arkie.cn/) 提供)
## 应用场景
Milvus 可以应用于多种 AI 场景。您可以访问 [Milvus 应用场景](https://milvus.io/scenarios) 体验在线场景展示。您也可以访问 [Milvus 训练营](https://github.com/milvus-io/bootcamp) 了解更详细的应用场景和解决方案。
## 性能基准测试
关于 Milvus 性能基准的更多信息,请参考[测试报告](https://github.com/milvus-io/milvus/tree/master/docs)。
## 路线图
您可以参考我们的[路线图](https://github.com/milvus-io/milvus/projects),了解 Milvus 即将实现的新特性。
路线图尚未完成,并且可能会存在合理改动。我们欢迎各种针对路线图的意见、需求和建议。
## 贡献者指南
我们由衷欢迎您推送贡献。关于贡献流程的详细信息,请参阅[贡献者指南](https://github.com/milvus-io/milvus/blob/master/CONTRIBUTING.md)。本项目遵循 Milvus [行为准则](https://github.com/milvus-io/milvus/blob/master/CODE_OF_CONDUCT.md)。如果您希望参与本项目,请遵守该准则的内容。
我们使用 [GitHub issues](https://github.com/milvus-io/milvus/issues) 追踪问题和补丁。若您希望提出问题或进行讨论,请加入我们的社区。
## 加入 Milvus 社区
欢迎加入我们的 [Slack 频道](https://join.slack.com/t/milvusio/shared_invite/zt-e0u4qu3k-bI2GDNys3ZqX1YCJ9OM~GQ)以便与其他用户和贡献者进行交流。
## 加入 Milvus 技术交流微信群
![qrcode](https://github.com/milvus-io/docs/blob/v0.7.0/assets/qrcode.png)
## 相关链接
- [Milvus.io](https://www.milvus.io)
- [Milvus 常见问题](https://www.milvus.io/cn/docs/faq/operational_faq.md)
- [Milvus Medium](https://medium.com/@milvusio)
- [Milvus CSDN](https://zilliz.blog.csdn.net/)
- [Milvus Twitter](https://twitter.com/milvusio)
- [Milvus Facebook](https://www.facebook.com/io.milvus.5)
- [Milvus 设计文档](DESIGN.md)
## 许可协议
[Apache 许可协议 2.0 版](https://github.com/milvus-io/milvus/blob/master/LICENSE)

View File

@ -1,17 +0,0 @@
# Milvus Release Methodology and Criterias
## Release methodology
Milvus releases are packages that have been approved for general public release, with varying degrees of caveat regarding their perceived quality or potential for change.
They are stable releases intended for everyday usage by developers and non-developers.
Project versioning follows the specification of [Semantic Versioning 2.0.0](https://semver.org/).
## Release criteria
- Milvus core test code coverage must be at least 90%.
- Reported bugs should not have any critical issues.
- All bugs, new features, enhancements must be tested.
- All documents need to be reviewed with no broken link.
- Pressure testing, stability testing, accuracy testing and performance testing results should be evaluated.

View File

@ -1,15 +0,0 @@
# Security Policy
## Supported versions
The following versions of Milvus are currently being supported with security updates.
| Version | Supported |
| --------- | ------------------ |
| 0.6.0 | ✔️ |
| <= 0.5.3 | :x: |
## Reporting a vulnerability
To report a security vulnerability, please reach out to the Milvus team via <hai.jin@zilliz.com>.

View File

@ -1,14 +0,0 @@
# Support for deploying and using Milvus
We use GitHub for tracking bugs and feature requests. If you need any support for using Milvus, please refer to the following resources below.
## Documentation
- [User Documentation](https://www.milvus.io/docs/guides/get_started/install_milvus/install_milvus.md)
- [Troubleshooting Guide](https://www.milvus.io/docs/v0.6.0/guides/troubleshoot.md)
- [FAQ](https://www.milvus.io/docs/v0.6.0/faq/operational_faq.md)
## Real-time chat
[Slack](https://join.slack.com/t/milvusio/shared_invite/enQtNzY1OTQ0NDI3NjMzLWNmYmM1NmNjOTQ5MGI5NDhhYmRhMGU5M2NhNzhhMDMzY2MzNDdlYjM5ODQ5MmE3ODFlYzU3YjJkNmVlNDQ2ZTk): The #general channel is the place where people offer support.
## Other
[Bootcamp](https://github.com/milvus-io/bootcamp): It provides more scenario-based applications and demos of Milvus.

View File

@ -1,33 +0,0 @@
ARG arch=amd64
FROM ${arch}/centos:7
# pipefail is enabled for proper error detection in the `wget`
# step
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
RUN yum install -y epel-release centos-release-scl-rh && yum install -y wget curl which && \
wget -qO- "https://cmake.org/files/v3.14/cmake-3.14.3-Linux-x86_64.tar.gz" | tar --strip-components=1 -xz -C /usr/local && \
yum install -y make automake git python3-pip libcurl-devel python3-devel boost-static mysql-devel \
devtoolset-7-gcc devtoolset-7-gcc-c++ devtoolset-7-gcc-gfortran llvm-toolset-7.0-clang llvm-toolset-7.0-clang-tools-extra \
mysql lcov && \
rm -rf /var/cache/yum/* && \
echo "source scl_source enable devtoolset-7" >> /etc/profile.d/devtoolset-7.sh && \
echo "source scl_source enable llvm-toolset-7.0" >> /etc/profile.d/llvm-toolset-7.sh
ENV CLANG_TOOLS_PATH="/opt/rh/llvm-toolset-7.0/root/usr/bin"
RUN source /etc/profile.d/devtoolset-7.sh && \
wget https://github.com/xianyi/OpenBLAS/archive/v0.3.9.tar.gz && \
tar zxvf v0.3.9.tar.gz && cd OpenBLAS-0.3.9 && \
make TARGET=CORE2 DYNAMIC_ARCH=1 DYNAMIC_OLDER=1 USE_THREAD=0 USE_OPENMP=0 FC=gfortran CC=gcc COMMON_OPT="-O3 -g -fPIC" FCOMMON_OPT="-O3 -g -fPIC -frecursive" NMAX="NUM_THREADS=128" LIBPREFIX="libopenblas" LAPACKE="NO_LAPACKE=1" INTERFACE64=0 NO_STATIC=1 && \
make PREFIX=/usr install && \
cd .. && rm -rf OpenBLAS-0.3.9 && rm v0.3.9.tar.gz
RUN yum install -y ccache && \
rm -rf /var/cache/yum/*
# use login shell to activate environment un the RUN commands
SHELL [ "/bin/bash", "-c", "-l" ]
# use login shell when running the container
ENTRYPOINT [ "/bin/bash", "-c", "-l" ]

View File

@ -1,43 +0,0 @@
ARG arch=amd64
FROM ${arch}/ubuntu:18.04
# pipefail is enabled for proper error detection in the `wget | apt-key add`
# step
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update && apt-get install -y --no-install-recommends wget ca-certificates gnupg2 && \
wget -P /tmp https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB && \
apt-key add /tmp/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB && \
sh -c 'echo deb https://apt.repos.intel.com/mkl all main > /etc/apt/sources.list.d/intel-mkl.list' && \
wget -qO- "https://cmake.org/files/v3.14/cmake-3.14.3-Linux-x86_64.tar.gz" | tar --strip-components=1 -xz -C /usr/local && \
apt-get update && apt-get install -y --no-install-recommends \
g++ git gfortran lsb-core \
libboost-serialization-dev libboost-filesystem-dev libboost-system-dev libboost-regex-dev \
curl libtool automake libssl-dev pkg-config libcurl4-openssl-dev python3-pip \
clang-format-6.0 clang-tidy-6.0 \
lcov mysql-client libmysqlclient-dev intel-mkl-gnu-2019.5-281 intel-mkl-core-2019.5-281 && \
apt-get remove --purge -y && \
rm -rf /var/lib/apt/lists/*
RUN ln -s /usr/lib/x86_64-linux-gnu/libmysqlclient.so \
/usr/lib/x86_64-linux-gnu/libmysqlclient_r.so
RUN sh -c 'echo export LD_LIBRARY_PATH=/opt/intel/compilers_and_libraries_2019.5.281/linux/mkl/lib/intel64:\$LD_LIBRARY_PATH > /etc/profile.d/mkl.sh'
RUN wget https://github.com/xianyi/OpenBLAS/archive/v0.3.9.tar.gz && \
tar zxvf v0.3.9.tar.gz && cd OpenBLAS-0.3.9 && \
make TARGET=CORE2 DYNAMIC_ARCH=1 DYNAMIC_OLDER=1 USE_THREAD=0 USE_OPENMP=0 FC=gfortran CC=gcc COMMON_OPT="-O3 -g -fPIC" FCOMMON_OPT="-O3 -g -fPIC -frecursive" NMAX="NUM_THREADS=128" LIBPREFIX="libopenblas" LAPACKE="NO_LAPACKE=1" INTERFACE64=0 NO_STATIC=1 && \
make PREFIX=/usr install && \
cd .. && rm -rf OpenBLAS-0.3.9 && rm v0.3.9.tar.gz
RUN apt-get update && apt-get install -y --no-install-recommends ccache && \
apt-get remove --purge -y && \
rm -rf /var/lib/apt/lists/*
# use login shell to activate environment un the RUN commands
SHELL [ "/bin/bash", "-c", "-l" ]
# use login shell when running the container
ENTRYPOINT [ "/bin/bash", "-c", "-l" ]

244
ci/jenkins/Jenkinsfile vendored
View File

@ -1,244 +0,0 @@
#!/usr/bin/env groovy
String cron_timezone = "TZ=Asia/Shanghai"
String cron_string = BRANCH_NAME == "0.10.3" ? "50 22 * * * " : ""
pipeline {
agent none
triggers {
cron """${cron_timezone}
${cron_string}"""
}
options {
timestamps()
}
parameters{
choice choices: ['Release', 'Debug'], description: 'Build Type', name: 'BUILD_TYPE'
string defaultValue: 'registry.zilliz.com', description: 'DOCKER REGISTRY URL', name: 'DOKCER_REGISTRY_URL', trim: true
string defaultValue: 'ba070c98-c8cc-4f7c-b657-897715f359fc', description: 'DOCKER CREDENTIALS ID', name: 'DOCKER_CREDENTIALS_ID', trim: true
string defaultValue: 'http://192.168.1.201/artifactory/milvus', description: 'JFROG ARTFACTORY URL', name: 'JFROG_ARTFACTORY_URL', trim: true
string defaultValue: '1a527823-d2b7-44fd-834b-9844350baf14', description: 'JFROG CREDENTIALS ID', name: 'JFROG_CREDENTIALS_ID', trim: true
}
environment {
PROJECT_NAME = "milvus"
MILVUS_ROOT_PATH="/var/lib"
MILVUS_INSTALL_PREFIX="${env.MILVUS_ROOT_PATH}/${env.PROJECT_NAME}"
LOWER_BUILD_TYPE = params.BUILD_TYPE.toLowerCase()
SEMVER = "${BRANCH_NAME.contains('/') ? BRANCH_NAME.substring(BRANCH_NAME.lastIndexOf('/') + 1) : BRANCH_NAME}"
PIPELINE_NAME = "milvus-ci"
HELM_BRANCH = "0.10.1"
}
stages {
stage ('Milvus CI') {
matrix {
agent none
axes {
axis {
name 'OS_NAME'
values 'ubuntu18.04', 'centos7'
}
axis {
name 'CPU_ARCH'
values 'amd64'
}
axis {
name 'BINARY_VERSION'
values 'gpu', 'cpu'
}
}
when {
not {
expression {return OS_NAME == "ubuntu18.04" && !isTimeTriggeredBuild()}
}
}
environment {
PACKAGE_VERSION = VersionNumber([
versionNumberString : '${SEMVER}-${BINARY_VERSION}-${OS_NAME}-${CPU_ARCH}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, "yyyyMMdd"}-${BUILDS_TODAY}'
]);
DOCKER_VERSION = "${SEMVER}-${BINARY_VERSION}-${OS_NAME}-${LOWER_BUILD_TYPE}"
}
stages {
stage("Milvus Build and Unittest") {
agent {
kubernetes {
label "${OS_NAME}-${BINARY_VERSION}-build-${SEMVER}-${env.PIPELINE_NAME}-${env.BUILD_NUMBER}"
defaultContainer 'jnlp'
yamlFile "ci/jenkins/pod/milvus-${BINARY_VERSION}-version-${OS_NAME}-build-env-pod.yaml"
}
}
stages {
stage('Build') {
steps {
container("milvus-${BINARY_VERSION}-build-env") {
script {
try{
load "${env.WORKSPACE}/ci/jenkins/step/build.groovy"
} catch (Exception e) {
containerLog "milvus-${BINARY_VERSION}-build-env"
throw e
}
}
}
}
}
stage('Unittest') {
steps {
container("milvus-${BINARY_VERSION}-build-env") {
script {
if ("${BINARY_VERSION}" == "gpu") {
load "${env.WORKSPACE}/ci/jenkins/step/unittest.groovy"
} else {
echo "Skip Unittest"
}
}
}
}
}
stage('Code Coverage') {
steps {
container("milvus-${BINARY_VERSION}-build-env") {
script {
if ("${BINARY_VERSION}" == "gpu") {
load "${env.WORKSPACE}/ci/jenkins/step/coverage.groovy"
} else {
echo "Skip Code Coverage"
}
}
}
}
}
stage('Upload Package') {
steps {
container("milvus-${BINARY_VERSION}-build-env") {
script {
load "${env.WORKSPACE}/ci/jenkins/step/package.groovy"
}
}
}
}
}
}
stage('Publish Docker images') {
agent {
kubernetes {
label "${OS_NAME}-${BINARY_VERSION}-publish-${SEMVER}-${env.PIPELINE_NAME}-${env.BUILD_NUMBER}"
defaultContainer 'jnlp'
yamlFile 'ci/jenkins/pod/docker-pod.yaml'
}
}
stages {
stage('Publish') {
steps {
container('publish-images') {
script {
load "${env.WORKSPACE}/ci/jenkins/step/publishImages.groovy"
}
}
}
}
}
}
stage('Deploy to Development') {
environment {
FROMAT_SEMVER = "${env.SEMVER}".replaceAll("\\.", "-").replaceAll("_", "-")
FORMAT_OS_NAME = "${OS_NAME}".replaceAll("\\.", "-").replaceAll("_", "-")
HELM_RELEASE_NAME = "${env.PIPELINE_NAME}-${env.FROMAT_SEMVER}-${env.BUILD_NUMBER}-single-${FORMAT_OS_NAME}-${BINARY_VERSION}".toLowerCase()
SHARDS_HELM_RELEASE_NAME = "${env.PIPELINE_NAME}-${env.FROMAT_SEMVER}-${env.BUILD_NUMBER}-shards-${FORMAT_OS_NAME}-${BINARY_VERSION}".toLowerCase()
DEV_TEST_ARTIFACTS = "_artifacts/${FROMAT_SEMVER}/${FORMAT_OS_NAME}/${BINARY_VERSION}"
}
agent {
kubernetes {
label "${OS_NAME}-${BINARY_VERSION}-dev-test-${SEMVER}-${env.PIPELINE_NAME}-${env.BUILD_NUMBER}"
defaultContainer 'jnlp'
yamlFile 'ci/jenkins/pod/testEnvironment.yaml'
}
}
stages {
stage('Dev Test') {
steps {
container('milvus-test-env') {
script {
sh "mkdir -p ${env.DEV_TEST_ARTIFACTS}"
boolean isNightlyTest = isTimeTriggeredBuild()
if (isNightlyTest) {
load "${env.WORKSPACE}/ci/jenkins/step/singleDevNightlyTest.groovy"
load "${env.WORKSPACE}/ci/jenkins/step/shardsDevNightlyTest.groovy"
} else {
load "${env.WORKSPACE}/ci/jenkins/step/singleDevTest.groovy"
}
}
}
}
}
}
post {
cleanup {
container('milvus-test-env') {
script {
archiveArtifacts artifacts: "${env.DEV_TEST_ARTIFACTS}/**", allowEmptyArchive: true
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
boolean isNightlyTest = isTimeTriggeredBuild()
if (isNightlyTest) {
load "${env.WORKSPACE}/ci/jenkins/step/cleanupShardsDev.groovy"
}
}
}
}
}
}
}
}
}
}
post {
unsuccessful {
script {
boolean isNightlyTest = isTimeTriggeredBuild()
if (isNightlyTest) {
// Send an email only if the build status has changed from green/unstable to red
emailext subject: '$DEFAULT_SUBJECT',
body: '$DEFAULT_CONTENT',
recipientProviders: [
[$class: 'DevelopersRecipientProvider'],
[$class: 'RequesterRecipientProvider']
],
replyTo: '$DEFAULT_REPLYTO',
to: 'dev.milvus@zilliz.com'
}
}
}
}
}
boolean isEmptyChangelog() {
if (currentBuild.changeSets.size() == 0) {
return true
}
return false
}
boolean isTimeTriggeredBuild() {
if (currentBuild.getBuildCauses('hudson.triggers.TimerTrigger$TimerTriggerCause').size() != 0) {
return true
}
return false
}

View File

@ -1,34 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
labels:
app: publish
componet: docker
spec:
containers:
- name: publish-images
image: registry.zilliz.com/library/docker:v1.0.0
securityContext:
privileged: true
command:
- cat
tty: true
resources:
limits:
memory: "8Gi"
cpu: "2"
requests:
memory: "2Gi"
cpu: "1"
volumeMounts:
- name: docker-sock
mountPath: /var/run/docker.sock
volumes:
- name: docker-sock
hostPath:
path: /var/run/docker.sock
tolerations:
- key: dedicated
operator: Equal
value: milvus
effect: NoSchedule

View File

@ -1,50 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: milvus-cpu-build-env
labels:
app: milvus
componet: cpu-build-env
spec:
containers:
- name: milvus-cpu-build-env
image: registry.zilliz.com/milvus/milvus-cpu-build-env:v0.9.0-centos7
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: OS_NAME
value: "centos7"
- name: BUILD_ENV_IMAGE_ID
value: "f2386d84d312e42891c8c70219b12fde014c21fbdbc0e59bede7e7609b1ba58b"
command:
- cat
tty: true
resources:
limits:
memory: "14Gi"
cpu: "6.0"
requests:
memory: "8Gi"
cpu: "4.0"
- name: milvus-mysql
image: mysql:5.6
env:
- name: MYSQL_ROOT_PASSWORD
value: 123456
ports:
- containerPort: 3306
name: mysql
resources:
limits:
memory: "500Mi"
cpu: "0.5"
requests:
memory: "200Mi"
cpu: "0.2"
tolerations:
- key: dedicated
operator: Equal
value: milvus
effect: NoSchedule

View File

@ -1,50 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: milvus-cpu-build-env
labels:
app: milvus
componet: cpu-build-env
spec:
containers:
- name: milvus-cpu-build-env
image: registry.zilliz.com/milvus/milvus-cpu-build-env:v0.9.0-ubuntu18.04
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: OS_NAME
value: "ubuntu18.04"
- name: BUILD_ENV_IMAGE_ID
value: "4719a06f1b77393fed7a4336058baab74745715a431193d3876e9b51262505bd"
command:
- cat
tty: true
resources:
limits:
memory: "14Gi"
cpu: "6.0"
requests:
memory: "8Gi"
cpu: "4.0"
- name: milvus-mysql
image: mysql:5.6
env:
- name: MYSQL_ROOT_PASSWORD
value: 123456
ports:
- containerPort: 3306
name: mysql
resources:
limits:
memory: "500Mi"
cpu: "0.5"
requests:
memory: "200Mi"
cpu: "0.2"
tolerations:
- key: dedicated
operator: Equal
value: milvus
effect: NoSchedule

View File

@ -1,52 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: milvus-gpu-build-env
labels:
app: milvus
componet: gpu-build-env
spec:
containers:
- name: milvus-gpu-build-env
image: registry.zilliz.com/milvus/milvus-gpu-build-env:v0.9.0-centos7
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: OS_NAME
value: "centos7"
- name: BUILD_ENV_IMAGE_ID
value: "7087442c4c5a7a7adbd7324c58b7b1ac19a25acfd86d6017b5752c4c6521f90e"
command:
- cat
tty: true
resources:
limits:
memory: "14Gi"
cpu: "6.0"
# nvidia.com/gpu: 1
aliyun.com/gpu-mem: 2 # GiB
requests:
memory: "8Gi"
cpu: "4.0"
- name: milvus-mysql
image: mysql:5.6
env:
- name: MYSQL_ROOT_PASSWORD
value: 123456
ports:
- containerPort: 3306
name: mysql
resources:
limits:
memory: "500Mi"
cpu: "0.5"
requests:
memory: "200Mi"
cpu: "0.2"
tolerations:
- key: dedicated
operator: Equal
value: milvus
effect: NoSchedule

View File

@ -1,52 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: milvus-gpu-build-env
labels:
app: milvus
componet: gpu-build-env
spec:
containers:
- name: milvus-gpu-build-env
image: registry.zilliz.com/milvus/milvus-gpu-build-env:v0.9.0-ubuntu18.04
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: OS_NAME
value: "ubuntu18.04"
- name: BUILD_ENV_IMAGE_ID
value: "0aa65ebac377834ceb9644c320f114b97b488d11762948770b994f73e5ae518f"
command:
- cat
tty: true
resources:
limits:
memory: "14Gi"
cpu: "6.0"
# nvidia.com/gpu: 1
aliyun.com/gpu-mem: 2 # GiB
requests:
memory: "8Gi"
cpu: "4.0"
- name: milvus-mysql
image: mysql:5.6
env:
- name: MYSQL_ROOT_PASSWORD
value: 123456
ports:
- containerPort: 3306
name: mysql
resources:
limits:
memory: "500Mi"
cpu: "0.5"
requests:
memory: "200Mi"
cpu: "0.2"
tolerations:
- key: dedicated
operator: Equal
value: milvus
effect: NoSchedule

View File

@ -1,33 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
labels:
app: milvus
componet: test-env
spec:
containers:
- name: milvus-test-env
image: registry.zilliz.com/milvus/milvus-test-env:v0.2
command:
- cat
tty: true
resources:
limits:
memory: "8Gi"
cpu: "4.0"
requests:
memory: "4Gi"
cpu: "2.0"
volumeMounts:
- name: kubeconf
mountPath: /root/.kube/
readOnly: true
volumes:
- name: kubeconf
secret:
secretName: test-cluster-config
tolerations:
- key: dedicated
operator: Equal
value: milvus
effect: NoSchedule

View File

@ -1,36 +0,0 @@
# -*- coding: utf-8 -*-
import sys
import logging
from email.mime.text import MIMEText
from email.header import Header
import smtplib
SMS_DEFAULT_TO_LIST = [
"dev.milvus@zilliz.com",
]
def send_email(subject, content, token, receivers=None):
sender = 'test@zilliz.com'
message = MIMEText(content, 'html', 'utf-8')
message['From'] = Header("Daily Test")
message['To'] = Header("dev.milvus")
message['Subject'] = Header(subject, 'utf-8')
try:
smtp_obj = smtplib.SMTP('smtp.exmail.qq.com')
if receivers is None:
receivers = SMS_DEFAULT_TO_LIST
smtp_obj.login(sender, token)
result = smtp_obj.sendmail(sender, receivers, message.as_string())
except smtplib.SMTPException as e:
logging.error(str(e))
finally:
smtp_obj.quit()
if __name__ == "__main__":
if len(sys.argv) != 4:
sys.exit()
subject = sys.argv[1]
content = sys.argv[2]
token = sys.argv[3]
send_email(subject, content, token)

View File

@ -1,2 +0,0 @@
ruamel.yaml==0.16.5
ruamel.yaml.clib==0.2.0

View File

@ -1,536 +0,0 @@
#!/usr/bin/env python3
import sys
import argparse
from argparse import Namespace
import os, shutil
import getopt
from ruamel.yaml import YAML, yaml_object
from ruamel.yaml.comments import CommentedSeq, CommentedMap
from ruamel.yaml.tokens import CommentToken
##
yaml = YAML(typ="rt")
## format yaml file
yaml.indent(mapping=2, sequence=4, offset=2)
############################################
# Comment operation
#
############################################
def _extract_comment(_comment):
"""
remove '#' at start of comment
"""
# if _comment is empty, do nothing
if not _comment:
return _comment
# str_ = _comment.lstrip(" ")
str_ = _comment.strip()
str_ = str_.lstrip("#")
return str_
def _add_eol_comment(element, *args, **kwargs):
"""
add_eol_comment
args --> (comment, key)
"""
if element is None or \
(not isinstance(element, CommentedMap) and
not isinstance(element, CommentedSeq)) or \
args[0] is None or \
len(args[0]) == 0:
return
comment = args[0]
# comment is empty, do nothing
if not comment:
return
key = args[1]
try:
element.yaml_add_eol_comment(*args, **kwargs)
except Exception:
element.ca.items.pop(key, None)
element.yaml_add_eol_comment(*args, **kwargs)
def _map_comment(_element, _key):
origin_comment = ""
token = _element.ca.items.get(_key, None)
if token is not None:
try:
origin_comment = token[2].value
except Exception:
try:
# comment is below element, add profix "#\n"
col = _element.lc.col + 2
space_list = [" " for i in range(col)]
space_str = "".join(space_list)
origin_comment = "\n" + "".join([space_str + t.value for t in token[3]])
except Exception:
pass
return origin_comment
def _seq_comment(_element, _index):
# get target comment
_comment = ""
token = _element.ca.items.get(_index, None)
if token is not None:
_comment = token[0].value
return _comment
def _start_comment(_element):
_comment = ""
cmt = _element.ca.comment
try:
_comment = cmt[1][0].value
except Exception:
pass
return _comment
def _comment_counter(_comment):
"""
counter comment tips and split into list
"""
x = lambda l: l.strip().strip("#").strip()
_counter = []
if _comment.startswith("\n"):
_counter.append("")
_counter.append(x(_comment[1:]))
return _counter
elif _comment.startswith("#\n"):
_counter.append("")
_counter.append(x(_comment[2:]))
else:
index = _comment.find("\n")
_counter.append(x(_comment[:index]))
_counter.append(x(_comment[index + 1:]))
return _counter
def _obtain_comment(_m_comment, _t_comment):
if not _m_comment or not _t_comment:
return _m_comment or _t_comment
_m_counter = _comment_counter(_m_comment)
_t_counter = _comment_counter(_t_comment)
if not _m_counter[0] and not _t_counter[1]:
comment = _t_comment + _m_comment
elif not _m_counter[1] and not _t_counter[0]:
comment = _m_comment + _t_comment
elif _t_counter[0] and _t_counter[1]:
comment = _t_comment
elif not _t_counter[0] and not _t_counter[1]:
comment = _m_comment
elif not _m_counter[0] and not _m_counter[1]:
comment = _t_comment
else:
if _t_counter[0]:
comment = _m_comment.replace(_m_counter[0], _t_counter[0], 1)
else:
comment = _m_comment.replace(_m_counter[1], _t_counter[1], 1)
i = comment.find("\n\n")
while i >= 0:
comment = comment.replace("\n\n\n", "\n\n", 1)
i = comment.find("\n\n\n")
return comment
############################################
# Utils
#
############################################
def _get_update_par(_args):
_dict = _args.__dict__
# file path
_in_file = _dict.get("f", None) or _dict.get("file", None)
# tips
_tips = _dict.get('tips', None) or "Input \"-h\" for more information"
# update
_u = _dict.get("u", None) or _dict.get("update", None)
# apppend
_a = _dict.get('a', None) or _dict.get('append', None)
# out stream group
_i = _dict.get("i", None) or _dict.get("inplace", None)
_o = _dict.get("o", None) or _dict.get("out_file", None)
return _in_file, _u, _a, _i, _o, _tips
############################################
# Element operation
#
############################################
def update_map_element(element, key, value, comment, _type):
"""
element:
key:
value:
comment:
_type: value type.
"""
if element is None or not isinstance(element, CommentedMap):
print("Only key-value update support")
sys.exit(1)
origin_comment = _map_comment(element, key)
sub_element = element.get(key, None)
if isinstance(sub_element, CommentedMap) or isinstance(sub_element, CommentedSeq):
print("Only support update a single value")
element.update({key: value})
comment = _obtain_comment(origin_comment, comment)
_add_eol_comment(element, _extract_comment(comment), key)
def update_seq_element(element, value, comment, _type):
if element is None or not isinstance(element, CommentedSeq):
print("Param `-a` only use to append yaml list")
sys.exit(1)
element.append(str(value))
comment = _obtain_comment("", comment)
_add_eol_comment(element, _extract_comment(comment), len(element) - 1)
def run_update(code, keys, value, comment, _app):
key_list = keys.split(".")
space_str = ":\n "
key_str = "{}".format(key_list[0])
for key in key_list[1:]:
key_str = key_str + space_str + key
space_str = space_str + " "
if not _app:
yaml_str = """{}: {}""".format(key_str, value)
else:
yaml_str = "{}{}- {}".format(key_str, space_str, value)
if comment:
yaml_str = "{} # {}".format(yaml_str, comment)
mcode = yaml.load(yaml_str)
_merge(code, mcode)
def _update(code, _update, _app, _tips):
if not _update:
return code
_update_list = [l.strip() for l in _update.split(",")]
for l in _update_list:
try:
variant, comment = l.split("#")
except ValueError:
variant = l
comment = None
try:
keys, value = variant.split("=")
run_update(code, keys, value, comment, _app)
except ValueError:
print("Invalid format. print command \"--help\" get more info.")
sys.exit(1)
return code
def _backup(in_file_p):
backup_p = in_file_p + ".bak"
if os.path.exists(backup_p):
os.remove(backup_p)
if not os.path.exists(in_file_p):
print("File {} not exists.".format(in_file_p))
sys.exit(1)
shutil.copyfile(in_file_p, backup_p) # 复制文件
def _recovery(in_file_p):
backup_p = in_file_p + ".bak"
if not os.path.exists(in_file_p):
print("File {} not exists.".format(in_file_p))
sys.exit(1)
elif not os.path.exists(backup_p):
print("Backup file not exists")
sys.exit(0)
os.remove(in_file_p)
os.rename(backup_p, in_file_p)
# master merge target
def _merge(master, target):
if type(master) != type(target):
print("yaml format not match:\n")
yaml.dump(master, sys.stdout)
print("\n&&\n")
yaml.dump(target, sys.stdout)
sys.exit(1)
## item is a sequence
if isinstance(target, CommentedSeq):
for index in range(len(target)):
# get target comment
target_comment = _seq_comment(target, index)
master_index = len(master)
target_item = target[index]
if isinstance(target_item, CommentedMap):
merge_flag = False
for idx in range(len(master)):
if isinstance(master[idx], CommentedMap):
if master[idx].keys() == target_item.keys():
_merge(master[idx], target_item)
# nonlocal merge_flag
master_index = idx
merge_flag = True
break
if merge_flag is False:
master.append(target_item)
elif target_item not in master:
master.append(target[index])
else:
# merge(master[index], target[index])
pass
# # remove enter signal in previous item
previous_comment = _seq_comment(master, master_index - 1)
_add_eol_comment(master, _extract_comment(previous_comment), master_index - 1)
origin_comment = _seq_comment(master, master_index)
comment = _obtain_comment(origin_comment, target_comment)
if len(comment) > 0:
_add_eol_comment(master, _extract_comment(comment) + "\n\n", len(master) - 1)
## item is a map
elif isinstance(target, CommentedMap):
for item in target:
if item == "flag":
print("")
origin_comment = _map_comment(master, item)
target_comment = _map_comment(target, item)
# get origin start comment
origin_start_comment = _start_comment(master)
# get target start comment
target_start_comment = _start_comment(target)
m = master.get(item, default=None)
if m is None or \
(not (isinstance(m, CommentedMap) or
isinstance(m, CommentedSeq))):
master.update({item: target[item]})
else:
_merge(master[item], target[item])
comment = _obtain_comment(origin_comment, target_comment)
if len(comment) > 0:
_add_eol_comment(master, _extract_comment(comment), item)
start_comment = _obtain_comment(origin_start_comment, target_start_comment)
if len(start_comment) > 0:
master.yaml_set_start_comment(_extract_comment(start_comment))
def _save(_code, _file):
with open(_file, 'w') as wf:
yaml.dump(_code, wf)
def _load(_file):
with open(_file, 'r') as rf:
code = yaml.load(rf)
return code
############################################
# sub parser process operation
#
############################################
def merge_yaml(_args):
_dict = _args.__dict__
_m_file = _dict.get("merge_file", None)
_in_file, _u, _a, _i, _o, _tips = _get_update_par(_args)
if not (_in_file and _m_file):
print(_tips)
sys.exit(1)
code = _load(_in_file)
mcode = _load(_m_file)
_merge(code, mcode)
_update(code, _u, _a, _tips)
if _i:
_backup(_in_file)
_save(code, _in_file)
elif _o:
_save(code, _o)
else:
print(_tips)
sys.exit(1)
def update_yaml(_args):
_in_file, _u, _a, _i, _o, _tips = _get_update_par(_args)
if not _in_file or not _u:
print(_tips)
sys.exit(1)
code = _load(_in_file)
if _i and _o:
print(_tips)
sys.exit(1)
_update(code, _u, _a, _tips)
if _i:
_backup(_in_file)
_save(code, _in_file)
elif _o:
_save(code, _o)
def reset(_args):
_dict = _args.__dict__
_f = _dict.get('f', None) or _dict.get('file', None)
if _f:
_recovery(_f)
else:
_t = _dict.get('tips', None) or "Input \"-h\" for more information"
print(_t)
############################################
# Cli operation
#
############################################
def _set_merge_parser(_parsers):
"""
config merge parser
"""
merge_parser = _parsers.add_parser("merge", help="merge with another yaml file")
_set_merge_parser_arg(merge_parser)
_set_update_parser_arg(merge_parser)
merge_parser.set_defaults(
function=merge_yaml,
tips=merge_parser.format_help()
)
def _set_merge_parser_arg(_parser):
"""
config parser argument for merging
"""
_parser.add_argument("-m", "--merge-file", help="indicate merge yaml file")
def _set_update_parser(_parsers):
"""
config merge parser
"""
update_parser = _parsers.add_parser("update", help="update with another yaml file")
_set_update_parser_arg(update_parser)
update_parser.set_defaults(
function=update_yaml,
tips=update_parser.format_help()
)
def _set_update_parser_arg(_parser):
"""
config parser argument for updating
"""
_parser.add_argument("-f", "--file", help="source yaml file")
_parser.add_argument('-u', '--update', help="update with args, instance as \"a.b.c=d# d comment\"")
_parser.add_argument('-a', '--append', action="store_true", help="append to a seq")
group = _parser.add_mutually_exclusive_group()
group.add_argument("-o", "--out-file", help="indicate output yaml file")
group.add_argument("-i", "--inplace", action="store_true", help="indicate whether result store in origin file")
def _set_reset_parser(_parsers):
"""
config merge parser
"""
reset_parser = _parsers.add_parser("reset", help="reset yaml file")
# indicate yaml file
reset_parser.add_argument('-f', '--file', help="indicate input yaml file")
reset_parser.set_defaults(
function=reset,
tips=reset_parser.format_help()
)
def main():
parser = argparse.ArgumentParser()
sub_parsers = parser.add_subparsers()
# set merge command
_set_merge_parser(sub_parsers)
# set update command
_set_update_parser(sub_parsers)
# set reset command
_set_reset_parser(sub_parsers)
# parse argument and run func
args = parser.parse_args()
args.function(args)
if __name__ == '__main__':
main()

View File

@ -1,14 +0,0 @@
timeout(time: 120, unit: 'MINUTES') {
dir ("ci/scripts") {
withCredentials([usernamePassword(credentialsId: "${params.JFROG_CREDENTIALS_ID}", usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) {
def checkResult = sh(script: "./check_ccache.sh -l ${params.JFROG_ARTFACTORY_URL}/ccache", returnStatus: true)
if ("${BINARY_VERSION}" == "gpu") {
sh "/bin/bash --login -c \". ./before-install.sh && ./build.sh -t ${params.BUILD_TYPE} -j3 -i ${env.MILVUS_INSTALL_PREFIX} --with_fiu --coverage -l -g -u\""
} else {
sh "/bin/bash --login -c \". ./before-install.sh && ./build.sh -t ${params.BUILD_TYPE} -j3 -i ${env.MILVUS_INSTALL_PREFIX} --with_fiu --coverage -l -u\""
}
sh "./update_ccache.sh -l ${params.JFROG_ARTFACTORY_URL}/ccache -u ${USERNAME} -p ${PASSWORD}"
}
}
}

View File

@ -1,12 +0,0 @@
try {
def helmResult = sh script: "helm status -n milvus ${env.SHARDS_HELM_RELEASE_NAME}", returnStatus: true
if (!helmResult) {
sh "helm uninstall -n milvus ${env.SHARDS_HELM_RELEASE_NAME}"
}
} catch (exc) {
def helmResult = sh script: "helm status -n milvus ${env.SHARDS_HELM_RELEASE_NAME}", returnStatus: true
if (!helmResult) {
sh "helm uninstall -n milvus ${env.SHARDS_HELM_RELEASE_NAME}"
}
throw exc
}

View File

@ -1,12 +0,0 @@
try {
def helmResult = sh script: "helm status -n milvus ${env.HELM_RELEASE_NAME}", returnStatus: true
if (!helmResult) {
sh "helm uninstall -n milvus ${env.HELM_RELEASE_NAME}"
}
} catch (exc) {
def helmResult = sh script: "helm status -n milvus ${env.HELM_RELEASE_NAME}", returnStatus: true
if (!helmResult) {
sh "helm uninstall -n milvus ${env.HELM_RELEASE_NAME}"
}
throw exc
}

View File

@ -1,16 +0,0 @@
timeout(time: 30, unit: 'MINUTES') {
dir ("ci/scripts") {
sh "./coverage.sh"
boolean isNightlyTest = currentBuild.getBuildCauses('hudson.triggers.TimerTrigger$TimerTriggerCause').size() != 0 ? true : false
String formatFlag = "${BINARY_VERSION}-version-${OS_NAME}-unittest".replaceAll("\\.", "_").replaceAll("-", "_")
if (isNightlyTest) {
withCredentials([[$class: 'StringBinding', credentialsId: "milvus-ci-codecov-token", variable: 'CODECOV_TOKEN']]) {
sh "curl -s https://codecov.io/bash | bash -s - -f output_new.info -U \"--proxy http://proxy.zilliz.tech:1088\" -A \"--proxy http://proxy.zilliz.tech:1088\" -n ${BINARY_VERSION}-version-${OS_NAME}-unittest -F nightly -F ${formatFlag} || echo \"Codecov did not collect coverage reports\""
}
} else {
withCredentials([[$class: 'StringBinding', credentialsId: "milvus-ci-codecov-token", variable: 'CODECOV_TOKEN']]) {
sh "curl -s https://codecov.io/bash | bash -s - -f output_new.info -U \"--proxy http://proxy.zilliz.tech:1088\" -A \"--proxy http://proxy.zilliz.tech:1088\" -n ${BINARY_VERSION}-version-${OS_NAME}-unittest -F ${formatFlag} || echo \"Codecov did not collect coverage reports\""
}
}
}
}

View File

@ -1,8 +0,0 @@
sh "rm -rf ${env.MILVUS_INSTALL_PREFIX}/unittest"
sh "tar -zcvf ./${env.PROJECT_NAME}-${env.PACKAGE_VERSION}.tar.gz -C ${env.MILVUS_ROOT_PATH}/ ${env.PROJECT_NAME}"
withCredentials([usernamePassword(credentialsId: "${params.JFROG_CREDENTIALS_ID}", usernameVariable: 'JFROG_USERNAME', passwordVariable: 'JFROG_PASSWORD')]) {
def uploadStatus = sh(returnStatus: true, script: "curl -u${JFROG_USERNAME}:${JFROG_PASSWORD} -T ./${env.PROJECT_NAME}-${env.PACKAGE_VERSION}.tar.gz ${params.JFROG_ARTFACTORY_URL}/milvus/package/${env.PROJECT_NAME}-${env.PACKAGE_VERSION}.tar.gz")
if (uploadStatus != 0) {
error("\" ${env.PROJECT_NAME}-${env.PACKAGE_VERSION}.tar.gz \" upload to \" ${params.JFROG_ARTFACTORY_URL}/milvus/package/${env.PROJECT_NAME}-${env.PACKAGE_VERSION}.tar.gz \" failed!")
}
}

View File

@ -1,50 +0,0 @@
dir ("docker/deploy/${BINARY_VERSION}/${OS_NAME}") {
def binaryPackage = "${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz"
withCredentials([usernamePassword(credentialsId: "${params.JFROG_CREDENTIALS_ID}", usernameVariable: 'JFROG_USERNAME', passwordVariable: 'JFROG_PASSWORD')]) {
def downloadStatus = sh(returnStatus: true, script: "curl -u${JFROG_USERNAME}:${JFROG_PASSWORD} -O ${params.JFROG_ARTFACTORY_URL}/milvus/package/${binaryPackage}")
if (downloadStatus != 0) {
error("\" Download \" ${params.JFROG_ARTFACTORY_URL}/milvus/package/${binaryPackage} \" failed!")
}
}
sh "tar zxvf ${binaryPackage}"
def imageName = "${PROJECT_NAME}/engine:${DOCKER_VERSION}"
try {
deleteImages("${imageName}", true)
def customImage = docker.build("${imageName}")
deleteImages("${params.DOKCER_REGISTRY_URL}/${imageName}", true)
docker.withRegistry("https://${params.DOKCER_REGISTRY_URL}", "${params.DOCKER_CREDENTIALS_ID}") {
customImage.push()
}
} catch (exc) {
throw exc
} finally {
deleteImages("${imageName}", true)
deleteImages("${params.DOKCER_REGISTRY_URL}/${imageName}", true)
}
}
boolean deleteImages(String imageName, boolean force) {
def imageNameStr = imageName.trim()
def isExistImage = sh(returnStatus: true, script: "docker inspect --type=image ${imageNameStr} 2>&1 > /dev/null")
if (isExistImage == 0) {
def deleteImageStatus = 0
if (force) {
def imageID = sh(returnStdout: true, script: "docker inspect --type=image --format \"{{.ID}}\" ${imageNameStr}")
deleteImageStatus = sh(returnStatus: true, script: "docker rmi -f ${imageID}")
} else {
deleteImageStatus = sh(returnStatus: true, script: "docker rmi ${imageNameStr}")
}
if (deleteImageStatus != 0) {
return false
}
}
return true
}

View File

@ -1,35 +0,0 @@
timeout(time: 180, unit: 'MINUTES') {
dir ('milvus-helm') {
sh 'helm version'
sh 'helm repo add stable https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
sh 'helm repo update'
checkout([$class: 'GitSCM', branches: [[name: "${env.HELM_BRANCH}"]], userRemoteConfigs: [[url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${env.HELM_BRANCH}:refs/remotes/origin/${env.HELM_BRANCH}"]]])
// sh 'helm dep update'
retry(3) {
try {
dir ('charts/milvus') {
if ("${BINARY_VERSION}" == "CPU") {
sh "helm install --wait --timeout 300s --set cluster.enabled=true --set persistence.enabled=true --set image.repository=registry.zilliz.com/milvus/engine --set mishards.image.tag=test --set mishards.image.pullPolicy=Always --set image.tag=${DOCKER_VERSION} --set image.pullPolicy=Always --set service.type=ClusterIP --set image.resources.requests.memory=8Gi --set image.resources.requests.cpu=2.0 --set image.resources.limits.memory=12Gi --set image.resources.limits.cpu=4.0 -f ci/db_backend/mysql_${BINARY_VERSION}_values.yaml --namespace milvus ${env.SHARDS_HELM_RELEASE_NAME} ."
} else {
sh "helm install --wait --timeout 300s --set cluster.enabled=true --set persistence.enabled=true --set image.repository=registry.zilliz.com/milvus/engine --set mishards.image.tag=test --set mishards.image.pullPolicy=Always --set gpu.enabled=true --set image.tag=${DOCKER_VERSION} --set image.pullPolicy=Always --set service.type=ClusterIP -f ci/db_backend/mysql_${BINARY_VERSION}_values.yaml --namespace milvus ${env.SHARDS_HELM_RELEASE_NAME} ."
}
}
} catch (exc) {
def helmStatusCMD = "helm get manifest --namespace milvus ${env.SHARDS_HELM_RELEASE_NAME} | kubectl describe -n milvus -f - && \
kubectl logs --namespace milvus -l \"app=milvus,release=${env.SHARDS_HELM_RELEASE_NAME}\" -c milvus && \
helm status -n milvus ${env.SHARDS_HELM_RELEASE_NAME}"
def helmResult = sh script: helmStatusCMD, returnStatus: true
if (!helmResult) {
sh "helm uninstall -n milvus ${env.SHARDS_HELM_RELEASE_NAME} && sleep 1m"
}
throw exc
}
}
}
dir ("tests/milvus_python_test") {
sh 'python3 -m pip install -r requirements.txt'
sh "pytest . --level=2 --alluredir=\"test_out/dev/shards/\" --ip ${env.SHARDS_HELM_RELEASE_NAME}.milvus.svc.cluster.local >> ${WORKSPACE}/${env.DEV_TEST_ARTIFACTS}/milvus_${BINARY_VERSION}_shards_dev_test.log"
}
}

View File

@ -1,60 +0,0 @@
timeout(time: 180, unit: 'MINUTES') {
dir ('milvus-helm') {
sh 'helm version'
sh 'helm repo add stable https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
sh 'helm repo update'
checkout([$class: 'GitSCM', branches: [[name: "${env.HELM_BRANCH}"]], userRemoteConfigs: [[url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${env.HELM_BRANCH}:refs/remotes/origin/${env.HELM_BRANCH}"]]])
// sh 'helm dep update'
retry(3) {
try {
dir ('charts/milvus') {
sh "helm install --wait --timeout 300s --set image.repository=registry.zilliz.com/milvus/engine --set image.tag=${DOCKER_VERSION} --set image.pullPolicy=Always --set service.type=ClusterIP --set image.resources.requests.memory=8Gi --set image.resources.requests.cpu=2.0 --set image.resources.limits.memory=12Gi --set image.resources.limits.cpu=4.0 -f ci/db_backend/mysql_${BINARY_VERSION}_values.yaml -f ci/filebeat/values.yaml --namespace milvus ${env.HELM_RELEASE_NAME} ."
}
} catch (exc) {
def helmStatusCMD = "helm get manifest --namespace milvus ${env.HELM_RELEASE_NAME} | kubectl describe -n milvus -f - && \
kubectl logs --namespace milvus -l \"app=milvus,release=${env.HELM_RELEASE_NAME}\" -c milvus && \
helm status -n milvus ${env.HELM_RELEASE_NAME}"
def helmResult = sh script: helmStatusCMD, returnStatus: true
if (!helmResult) {
sh "helm uninstall -n milvus ${env.HELM_RELEASE_NAME} && sleep 1m"
}
throw exc
}
}
}
dir ("tests/milvus_python_test") {
// sh 'python3 -m pip install -r requirements.txt -i http://pypi.douban.com/simple --trusted-host pypi.douban.com'
sh 'python3 -m pip install -r requirements.txt'
sh "pytest . --level=2 --alluredir=\"test_out/dev/single/mysql\" --ip ${env.HELM_RELEASE_NAME}.milvus.svc.cluster.local >> ${WORKSPACE}/${env.DEV_TEST_ARTIFACTS}/milvus_${BINARY_VERSION}_mysql_dev_test.log"
}
// sqlite database backend test
load "ci/jenkins/step/cleanupSingleDev.groovy"
if (!fileExists('milvus-helm/charts/milvus')) {
dir ("milvus-helm") {
checkout([$class: 'GitSCM', branches: [[name:"${env.HELM_BRANCH}"]], userRemoteConfigs: [[url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${env.HELM_BRANCH}:refs/remotes/origin/${env.HELM_BRANCH}"]]])
}
}
retry(3) {
try {
dir ("milvus-helm/charts/milvus") {
sh "helm install --wait --timeout 300s --set image.repository=registry.zilliz.com/milvus/engine --set image.tag=${DOCKER_VERSION} --set image.pullPolicy=Always --set service.type=ClusterIP --set image.resources.requests.memory=8Gi --set image.resources.requests.cpu=2.0 --set image.resources.limits.memory=12Gi --set image.resources.limits.cpu=4.0 -f ci/db_backend/sqlite_${BINARY_VERSION}_values.yaml -f ci/filebeat/values.yaml --namespace milvus ${env.HELM_RELEASE_NAME} ."
}
} catch (exc) {
def helmStatusCMD = "helm get manifest --namespace milvus ${env.HELM_RELEASE_NAME} | kubectl describe -n milvus -f - && \
kubectl logs --namespace milvus -l \"app=milvus,release=${env.HELM_RELEASE_NAME}\" -c milvus && \
helm status -n milvus ${env.HELM_RELEASE_NAME}"
def helmResult = sh script: helmStatusCMD, returnStatus: true
if (!helmResult) {
sh "helm uninstall -n milvus ${env.HELM_RELEASE_NAME} && sleep 1m"
}
throw exc
}
}
dir ("tests/milvus_python_test") {
sh "pytest . --level=2 --alluredir=\"test_out/dev/single/sqlite\" --ip ${env.HELM_RELEASE_NAME}.milvus.svc.cluster.local >> ${WORKSPACE}/${env.DEV_TEST_ARTIFACTS}/milvus_${BINARY_VERSION}_sqlite_dev_test.log"
sh "pytest . --level=1 --ip ${env.HELM_RELEASE_NAME}.milvus.svc.cluster.local --port=19121 --handler=HTTP >> ${WORKSPACE}/${env.DEV_TEST_ARTIFACTS}/milvus_${BINARY_VERSION}_sqlite_http_dev_test.log"
}
}

View File

@ -1,32 +0,0 @@
timeout(time: 120, unit: 'MINUTES') {
dir ('milvus-helm') {
sh 'helm version'
sh 'helm repo add stable https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
sh 'helm repo update'
checkout([$class: 'GitSCM', branches: [[name: "${env.HELM_BRANCH}"]], userRemoteConfigs: [[url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${env.HELM_BRANCH}:refs/remotes/origin/${env.HELM_BRANCH}"]]])
// sh 'helm dep update'
retry(3) {
try {
dir ('charts/milvus') {
sh "helm install --wait --timeout 300s --set image.repository=registry.zilliz.com/milvus/engine --set persistence.enabled=true --set image.tag=${DOCKER_VERSION} --set image.pullPolicy=Always --set service.type=ClusterIP -f ci/db_backend/mysql_${BINARY_VERSION}_values.yaml -f ci/filebeat/values.yaml --namespace milvus ${env.HELM_RELEASE_NAME} ."
}
} catch (exc) {
def helmStatusCMD = "helm get manifest --namespace milvus ${env.HELM_RELEASE_NAME} | kubectl describe -n milvus -f - && \
kubectl logs --namespace milvus -l \"app=milvus,release=${env.HELM_RELEASE_NAME}\" -c milvus && \
helm status -n milvus ${env.HELM_RELEASE_NAME}"
def helmResult = sh script: helmStatusCMD, returnStatus: true
if (!helmResult) {
sh "helm uninstall -n milvus ${env.HELM_RELEASE_NAME} && sleep 1m"
}
throw exc
}
}
}
dir ("tests/milvus_python_test") {
// sh 'python3 -m pip install -r requirements.txt -i http://pypi.douban.com/simple --trusted-host pypi.douban.com'
sh 'python3 -m pip install -r requirements.txt'
sh "pytest . --alluredir=\"test_out/dev/single/mysql\" --level=1 --ip ${env.HELM_RELEASE_NAME}.milvus.svc.cluster.local --service ${env.HELM_RELEASE_NAME} >> ${WORKSPACE}/${env.DEV_TEST_ARTIFACTS}/milvus_${BINARY_VERSION}_mysql_dev_test.log"
// sh "pytest test_restart.py --alluredir=\"test_out/dev/single/mysql\" --level=3 --ip ${env.HELM_RELEASE_NAME}.milvus.svc.cluster.local --service ${env.HELM_RELEASE_NAME}"
}
}

View File

@ -1,5 +0,0 @@
timeout(time: 30, unit: 'MINUTES') {
dir ("ci/scripts") {
sh "./run_unittest.sh -i ${env.MILVUS_INSTALL_PREFIX} --mysql_user=root --mysql_password=123456 --mysql_host=\"127.0.0.1\""
}
}

View File

@ -1,10 +0,0 @@
#!/bin/bash
set -ex
export CCACHE_COMPRESS=1
export CCACHE_COMPRESSLEVEL=5
export CCACHE_COMPILERCHECK=content
export PATH=/usr/lib/ccache/:$PATH
set +ex

View File

@ -1,170 +0,0 @@
#!/bin/bash
set -e
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
SCRIPTS_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
MILVUS_CORE_DIR="${SCRIPTS_DIR}/../../core"
CORE_BUILD_DIR="${MILVUS_CORE_DIR}/cmake_build"
HELP="
Usage:
$0 [flags] [Arguments]
clean Remove all existing build artifacts and configuration (start over)
-i [INSTALL_PREFIX] or --install_prefix=[INSTALL_PREFIX]
Install directory used by install.
-t [BUILD_TYPE] or --build_type=[BUILD_TYPE]
Build type (default: Release)
-j[N] or --jobs=[N] Allow N jobs at once; infinite jobs with no arg.
-l Run cpplint & check clang-format
-n No make and make install step
-g Building for the architecture of the GPU in the system
--with_mkl Build with MKL (default: OFF)
--with_fiu Build with FIU (default: OFF)
-c or --coverage Build Code Coverage
-u or --tests Build unittest case
-p or --privileges Install command with elevated privileges
-v or --verbose A level above basic; includes messages about which makefiles were parsed, prerequisites that did not need to be rebuilt
-h or --help Print help information
Use \"$0 --help\" for more information about a given command.
"
ARGS=`getopt -o "i:t:j::lngcupvh" -l "install_prefix::,build_type::,jobs::,with_mkl,with_fiu,coverage,tests,privileges,help" -n "$0" -- "$@"`
eval set -- "${ARGS}"
while true ; do
case "$1" in
-i|--install_prefix)
# o has an optional argument. As we are in quoted mode,
# an empty parameter will be generated if its optional
# argument is not found.
case "$2" in
"") echo "Option install_prefix, no argument"; exit 1 ;;
*) INSTALL_PREFIX=$2 ; shift 2 ;;
esac ;;
-t|--build_type)
case "$2" in
"") echo "Option build_type, no argument"; exit 1 ;;
*) BUILD_TYPE=$2 ; shift 2 ;;
esac ;;
-j|--jobs)
case "$2" in
"") PARALLEL_LEVEL=""; shift 2 ;;
*) PARALLEL_LEVEL=$2 ; shift 2 ;;
esac ;;
-g) echo "Building for the architecture of the GPU in the system..." ; GPU_VERSION="ON" ; shift ;;
--with_mkl) echo "Build with MKL" ; WITH_MKL="ON" ; shift ;;
--with_fiu) echo "Build with FIU" ; FIU_ENABLE="ON" ; shift ;;
--coverage) echo "Build code coverage" ; BUILD_COVERAGE="ON" ; shift ;;
-u|--tests) echo "Build unittest cases" ; BUILD_UNITTEST="ON" ; shift ;;
-n) echo "No build and install step" ; COMPILE_BUILD="OFF" ; shift ;;
-l) RUN_CPPLINT="ON" ; shift ;;
-p|--privileges) PRIVILEGES="ON" ; shift ;;
-v|--verbose) VERBOSE="1" ; shift ;;
-h|--help) echo -e "${HELP}" ; exit 0 ;;
--) shift ; break ;;
*) echo "Internal error!" ; exit 1 ;;
esac
done
# Set defaults for vars modified by flags to this script
CUDA_COMPILER=/usr/local/cuda/bin/nvcc
INSTALL_PREFIX=${INSTALL_PREFIX:="/var/lib/milvus"}
VERBOSE=${VERBOSE:=""}
BUILD_TYPE=${BUILD_TYPE:="Release"}
BUILD_UNITTEST=${BUILD_UNITTEST:="OFF"}
BUILD_COVERAGE=${BUILD_COVERAGE:="OFF"}
COMPILE_BUILD=${COMPILE_BUILD:="ON"}
GPU_VERSION=${GPU_VERSION:="OFF"}
RUN_CPPLINT=${RUN_CPPLINT:="OFF"}
WITH_MKL=${WITH_MKL:="OFF"}
FIU_ENABLE=${FIU_ENABLE:="OFF"}
PRIVILEGES=${PRIVILEGES:="OFF"}
CLEANUP=${CLEANUP:="OFF"}
PARALLEL_LEVEL=${PARALLEL_LEVEL:="8"}
for arg do
if [[ $arg == "clean" ]];then
echo "Remove all existing build artifacts and configuration..."
if [ -d ${CORE_BUILD_DIR} ]; then
find ${CORE_BUILD_DIR} -mindepth 1 -delete
rmdir ${CORE_BUILD_DIR} || true
fi
exit 0
fi
done
if [[ ! -d ${CORE_BUILD_DIR} ]]; then
mkdir ${CORE_BUILD_DIR}
fi
echo -e "===\n=== ccache statistics before build\n==="
ccache --show-stats
pushd ${CORE_BUILD_DIR}
CMAKE_CMD="cmake \
-DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX}
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
-DCMAKE_CUDA_COMPILER=${CUDA_COMPILER} \
-DMILVUS_GPU_VERSION=${GPU_VERSION} \
-DBUILD_UNIT_TEST=${BUILD_UNITTEST} \
-DBUILD_COVERAGE=${BUILD_COVERAGE} \
-DFAISS_WITH_MKL=${WITH_MKL} \
-DArrow_SOURCE=AUTO \
-DFAISS_SOURCE=AUTO \
-DOpenBLAS_SOURCE=AUTO \
-DMILVUS_WITH_FIU=${FIU_ENABLE} \
${MILVUS_CORE_DIR}"
echo ${CMAKE_CMD}
${CMAKE_CMD}
if [[ ${RUN_CPPLINT} == "ON" ]]; then
# cpplint check
make lint
if [ $? -ne 0 ]; then
echo "ERROR! cpplint check failed"
exit 1
fi
echo "cpplint check passed!"
# clang-format check
make check-clang-format
if [ $? -ne 0 ]; then
echo "ERROR! clang-format check failed"
exit 1
fi
echo "clang-format check passed!"
# # clang-tidy check
# make check-clang-tidy
# if [ $? -ne 0 ]; then
# echo "ERROR! clang-tidy check failed"
# rm -f CMakeCache.txt
# exit 1
# fi
# echo "clang-tidy check passed!"
fi
if [[ ${COMPILE_BUILD} == "ON" ]];then
# compile and build
make -j${PARALLEL_LEVEL} VERBOSE=${VERBOSE} || exit 1
if [[ ${PRIVILEGES} == "ON" ]];then
sudo make install || exit 1
else
make install || exit 1
fi
fi
popd

View File

@ -1,106 +0,0 @@
#!/bin/bash
HELP="
Usage:
$0 [flags] [Arguments]
-l [ARTIFACTORY_URL] Artifactory URL
--cache_dir=[CCACHE_DIR] Ccache directory
-f [FILE] or --file=[FILE] Ccache compress package file
-h or --help Print help information
Use \"$0 --help\" for more information about a given command.
"
ARGS=$(getopt -o "l:f:h" -l "cache_dir::,file::,help" -n "$0" -- "$@")
eval set -- "${ARGS}"
while true ; do
case "$1" in
-l)
# o has an optional argument. As we are in quoted mode,
# an empty parameter will be generated if its optional
# argument is not found.
case "$2" in
"") echo "Option Artifactory URL, no argument"; exit 1 ;;
*) ARTIFACTORY_URL=$2 ; shift 2 ;;
esac ;;
--cache_dir)
case "$2" in
"") echo "Option cache_dir, no argument"; exit 1 ;;
*) CCACHE_DIR=$2 ; shift 2 ;;
esac ;;
-f|--file)
case "$2" in
"") echo "Option file, no argument"; exit 1 ;;
*) PACKAGE_FILE=$2 ; shift 2 ;;
esac ;;
-h|--help) echo -e "${HELP}" ; exit 0 ;;
--) shift ; break ;;
*) echo "Internal error!" ; exit 1 ;;
esac
done
# Set defaults for vars modified by flags to this script
CCACHE_DIR=${CCACHE_DIR:="${HOME}/.ccache"}
PACKAGE_FILE=${PACKAGE_FILE:="ccache-${OS_NAME}-${BUILD_ENV_IMAGE_ID}.tar.gz"}
BRANCH_NAMES=$(git log --decorate | head -n 1 | sed 's/.*(\(.*\))/\1/' | sed 's=[a-zA-Z]*\/==g' | awk -F", " '{$1=""; print $0}')
if [[ -z "${ARTIFACTORY_URL}" || "${ARTIFACTORY_URL}" == "" ]];then
echo "You have not input ARTIFACTORY_URL !"
exit 1
fi
function check_ccache() {
BRANCH=$1
echo "fetching ${BRANCH}/${PACKAGE_FILE}"
wget -q --spider "${ARTIFACTORY_URL}/${BRANCH}/${PACKAGE_FILE}"
return $?
}
function download_file() {
BRANCH=$1
wget -q "${ARTIFACTORY_URL}/${BRANCH}/${PACKAGE_FILE}" && \
mkdir -p "${CCACHE_DIR}" && \
tar zxf "${PACKAGE_FILE}" -C "${CCACHE_DIR}" && \
rm ${PACKAGE_FILE}
return $?
}
if [[ -n "${CHANGE_TARGET}" && "${BRANCH_NAME}" =~ "PR-" ]];then
check_ccache ${CHANGE_TARGET}
if [[ $? == 0 ]];then
download_file ${CHANGE_TARGET}
if [[ $? == 0 ]];then
echo "found cache"
exit 0
fi
fi
check_ccache ${BRANCH_NAME}
if [[ $? == 0 ]];then
download_file ${BRANCH_NAME}
if [[ $? == 0 ]];then
echo "found cache"
exit 0
fi
fi
fi
for CURRENT_BRANCH in ${BRANCH_NAMES}
do
if [[ "${CURRENT_BRANCH}" != "HEAD" ]];then
check_ccache ${CURRENT_BRANCH}
if [[ $? == 0 ]];then
download_file ${CURRENT_BRANCH}
if [[ $? == 0 ]];then
echo "found cache"
exit 0
fi
fi
fi
done
echo "could not download cache" && exit 1

View File

@ -1,104 +0,0 @@
#!/bin/bash
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
SCRIPTS_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
HELP="
Usage:
$0 [flags] [Arguments]
-b Core Code build directory
-c Codecov token
-h or --help Print help information
Use \"$0 --help\" for more information about a given command.
"
ARGS=`getopt -o "b:c:h" -l "help" -n "$0" -- "$@"`
eval set -- "${ARGS}"
while true ; do
case "$1" in
-b)
# o has an optional argument. As we are in quoted mode,
# an empty parameter will be generated if its optional
# argument is not found.
case "$2" in
"") echo "Option CORE_BUILD_DIR, no argument"; exit 1 ;;
*) CORE_BUILD_DIR=$2 ; shift 2 ;;
esac ;;
-c)
case "$2" in
"") echo "Option CODECOV_TOKEN, no argument"; exit 1 ;;
*) CODECOV_TOKEN=$2 ; shift 2 ;;
esac ;;
-h|--help) echo -e "${HELP}" ; exit 0 ;;
--) shift ; break ;;
*) echo "Internal error!" ; exit 1 ;;
esac
done
# Set defaults for vars modified by flags to this script
MILVUS_CORE_DIR="${SCRIPTS_DIR}/../../core"
CORE_BUILD_DIR=${CORE_BUILD_DIR:="${MILVUS_CORE_DIR}/cmake_build"}
LCOV_CMD="lcov"
# LCOV_GEN_CMD="genhtml"
FILE_INFO_BASE="base.info"
FILE_INFO_MILVUS="server.info"
FILE_INFO_OUTPUT="output.info"
FILE_INFO_OUTPUT_NEW="output_new.info"
DIR_LCOV_OUTPUT="lcov_out"
DIR_GCNO="${CORE_BUILD_DIR}"
DIR_UNITTEST="${INSTALL_PREFIX}/unittest"
cd ${SCRIPTS_DIR}
# delete old code coverage info files
rm -rf ${DIR_LCOV_OUTPUT}
rm -f ${FILE_INFO_BASE} ${FILE_INFO_MILVUS} ${FILE_INFO_OUTPUT} ${FILE_INFO_OUTPUT_NEW}
# get baseline
${LCOV_CMD} -c -i -d ${DIR_GCNO} -o "${FILE_INFO_BASE}"
if [ $? -ne 0 ]; then
echo "gen baseline coverage run failed"
exit -1
fi
# gen code coverage
${LCOV_CMD} -d ${DIR_GCNO} -o "${FILE_INFO_MILVUS}" -c
# merge coverage
${LCOV_CMD} -a ${FILE_INFO_BASE} -a ${FILE_INFO_MILVUS} -o "${FILE_INFO_OUTPUT}"
# remove third party from tracefiles
${LCOV_CMD} -r "${FILE_INFO_OUTPUT}" -o "${FILE_INFO_OUTPUT_NEW}" \
"/usr/*" \
"*/boost/*" \
"*/cmake_build/*_ep-prefix/*" \
"*/src/index/cmake_build*" \
"*/src/index/thirdparty*" \
"*/src/grpc*" \
"*/src/metrics/MetricBase.h" \
"*/src/server/Server.cpp" \
"*/src/server/DBWrapper.cpp" \
"*/src/server/grpc_impl/GrpcServer.cpp" \
"*/thirdparty/*"
if [ $? -ne 0 ]; then
echo "gen ${FILE_INFO_OUTPUT_NEW} failed"
exit 2
fi
if [[ -n ${CODECOV_TOKEN} ]];then
export CODECOV_TOKEN="${CODECOV_TOKEN}"
curl -s https://codecov.io/bash | bash -s - -f output_new.info || echo "Codecov did not collect coverage reports"
fi

View File

@ -1,121 +0,0 @@
#!/bin/bash
set -e
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
SCRIPTS_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
HELP="
Usage:
$0 [flags] [Arguments]
-i [INSTALL_PREFIX] or --install_prefix=[INSTALL_PREFIX]
Install directory used by install.
--mysql_user=[MYSQL_USER_NAME] MySQL User Name
--mysql_password=[MYSQL_PASSWORD]
MySQL Password
--mysql_host=[MYSQL_HOST] MySQL Host
-h or --help Print help information
Use \"$0 --help\" for more information about a given command.
"
ARGS=`getopt -o "i:h" -l "install_prefix::,mysql_user::,mysql_password::,mysql_host::,help" -n "$0" -- "$@"`
eval set -- "${ARGS}"
while true ; do
case "$1" in
-i|--install_prefix)
# o has an optional argument. As we are in quoted mode,
# an empty parameter will be generated if its optional
# argument is not found.
case "$2" in
"") echo "Option install_prefix, no argument"; exit 1 ;;
*) INSTALL_PREFIX=$2 ; shift 2 ;;
esac ;;
--mysql_user)
case "$2" in
"") echo "Option mysql_user, no argument"; exit 1 ;;
*) MYSQL_USER_NAME=$2 ; shift 2 ;;
esac ;;
--mysql_password)
case "$2" in
"") echo "Option mysql_password, no argument"; exit 1 ;;
*) MYSQL_PASSWORD=$2 ; shift 2 ;;
esac ;;
--mysql_host)
case "$2" in
"") echo "Option mysql_host, no argument"; exit 1 ;;
*) MYSQL_HOST=$2 ; shift 2 ;;
esac ;;
-h|--help) echo -e "${HELP}" ; exit 0 ;;
--) shift ; break ;;
*) echo "Internal error!" ; exit 1 ;;
esac
done
# Set defaults for vars modified by flags to this script
INSTALL_PREFIX=${INSTALL_PREFIX:="/var/lib/milvus"}
MYSQL_USER_NAME=${MYSQL_USER_NAME:="root"}
MYSQL_PASSWORD=${MYSQL_PASSWORD:="123456"}
MYSQL_HOST=${MYSQL_HOST:="127.0.0.1"}
MYSQL_PORT=${MYSQL_PORT:="3306"}
DIR_UNITTEST="${INSTALL_PREFIX}/unittest"
if [ -d ${INSTALL_PREFIX}/lib ]; then
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${INSTALL_PREFIX}/lib
fi
if [ ! -d ${DIR_UNITTEST} ]; then
echo "The unittest folder does not exist!"
exit 1
fi
pushd ${SCRIPTS_DIR}
MYSQL_DB_NAME=milvus_`date +%s%N`
function mysql_exc()
{
cmd=$1
mysql -h${MYSQL_HOST} -u${MYSQL_USER_NAME} -p${MYSQL_PASSWORD} -e "${cmd}"
if [ $? -ne 0 ]; then
echo "mysql $cmd run failed"
fi
}
mysql_exc "CREATE DATABASE IF NOT EXISTS ${MYSQL_DB_NAME};"
mysql_exc "GRANT ALL PRIVILEGES ON ${MYSQL_DB_NAME}.* TO '${MYSQL_USER_NAME}'@'%';"
mysql_exc "FLUSH PRIVILEGES;"
mysql_exc "USE ${MYSQL_DB_NAME};"
for test in `ls ${DIR_UNITTEST}`; do
echo $test
case ${test} in
test_db)
# set run args for test_db
args="mysql://${MYSQL_USER_NAME}:${MYSQL_PASSWORD}@${MYSQL_HOST}:${MYSQL_PORT}/${MYSQL_DB_NAME}"
;;
*test_*)
args=""
;;
esac
# run unittest
${DIR_UNITTEST}/${test} "${args}"
if [ $? -ne 0 ]; then
echo ${args}
echo ${DIR_UNITTEST}/${test} "run failed"
exit 1
fi
done
mysql_exc "DROP DATABASE IF EXISTS ${MYSQL_DB_NAME};"
popd

View File

@ -1,104 +0,0 @@
#!/bin/bash
HELP="
Usage:
$0 [flags] [Arguments]
-l [ARTIFACTORY_URL] Artifactory URL
--cache_dir=[CCACHE_DIR] Ccache directory
-f [FILE] or --file=[FILE] Ccache compress package file
-u [USERNAME] Artifactory Username
-p [PASSWORD] Artifactory Password
-h or --help Print help information
Use \"$0 --help\" for more information about a given command.
"
ARGS=$(getopt -o "l:f:u:p:h" -l "cache_dir::,file::,help" -n "$0" -- "$@")
eval set -- "${ARGS}"
while true ; do
case "$1" in
-l)
# o has an optional argument. As we are in quoted mode,
# an empty parameter will be generated if its optional
# argument is not found.
case "$2" in
"") echo "Option Artifactory URL, no argument"; exit 1 ;;
*) ARTIFACTORY_URL=$2 ; shift 2 ;;
esac ;;
--cache_dir)
case "$2" in
"") echo "Option cache_dir, no argument"; exit 1 ;;
*) CCACHE_DIR=$2 ; shift 2 ;;
esac ;;
-u)
case "$2" in
"") echo "Option Username, no argument"; exit 1 ;;
*) USERNAME=$2 ; shift 2 ;;
esac ;;
-p)
case "$2" in
"") echo "Option Password, no argument"; exit 1 ;;
*) PASSWORD=$2 ; shift 2 ;;
esac ;;
-f|--file)
case "$2" in
"") echo "Option file, no argument"; exit 1 ;;
*) PACKAGE_FILE=$2 ; shift 2 ;;
esac ;;
-h|--help) echo -e "${HELP}" ; exit 0 ;;
--) shift ; break ;;
*) echo "Internal error!" ; exit 1 ;;
esac
done
# Set defaults for vars modified by flags to this script
CCACHE_DIR=${CCACHE_DIR:="${HOME}/.ccache"}
PACKAGE_FILE=${PACKAGE_FILE:="ccache-${OS_NAME}-${BUILD_ENV_IMAGE_ID}.tar.gz"}
BRANCH_NAME=$(git log --decorate | head -n 1 | sed 's/.*(\(.*\))/\1/' | sed 's/.*, //' | sed 's=[a-zA-Z]*\/==g')
if [[ -z "${ARTIFACTORY_URL}" || "${ARTIFACTORY_URL}" == "" ]];then
echo "You have not input ARTIFACTORY_URL !"
exit 1
fi
if [[ ! -d "${CCACHE_DIR}" ]]; then
echo "\"${CCACHE_DIR}\" directory does not exist !"
exit 1
fi
function check_ccache() {
BRANCH=$1
wget -q --spider "${ARTIFACTORY_URL}/${BRANCH}/${PACKAGE_FILE}"
return $?
}
if [[ -n "${CHANGE_TARGET}" && "${BRANCH_NAME}" =~ "PR-" ]]; then
check_ccache ${CHANGE_TARGET}
if [[ $? == 0 ]];then
echo "Skip Update ccache package ..." && exit 0
fi
fi
echo -e "===\n=== ccache statistics after build\n==="
ccache --show-stats
if [[ "${BRANCH_NAME}" != "HEAD" ]];then
REMOTE_PACKAGE_PATH="${ARTIFACTORY_URL}/${BRANCH_NAME}"
echo "Updating ccache package file: ${PACKAGE_FILE}"
tar zcf ./"${PACKAGE_FILE}" -C "${CCACHE_DIR}" .
echo "Uploading ccache package file ${PACKAGE_FILE} to ${REMOTE_PACKAGE_PATH}"
curl -u"${USERNAME}":"${PASSWORD}" -T "${PACKAGE_FILE}" "${REMOTE_PACKAGE_PATH}"/"${PACKAGE_FILE}"
if [[ $? == 0 ]];then
echo "Uploading ccache package file success !"
exit 0
else
echo "Uploading ccache package file fault !"
exit 1
fi
fi
echo "Skip Update ccache package ..."

View File

@ -1,31 +0,0 @@
#Configuration File for CodeCov
coverage:
notify:
require_ci_to_pass: yes
precision: 2
round: down
range: "70...100"
status:
project:
default:
threshold: 0.2 #Allow the coverage to drop by threshold%, and posting a success status.
patch: yes
changes: no
comment:
layout: "reach, diff, flags, files"
behavior: default
require_changes: no
flags:
nightly:
joined: false
ignore:
- "LICENSES"
- ".git"
- "*.yml"
- "*.md"
- "docs/.*"

14
core/.gitignore vendored
View File

@ -1,14 +0,0 @@
milvus/
conf/server_config.yaml
conf/server_config.yaml.ori
conf/log_config.conf
src/config.h
src/version.h
lcov_out/
base.info
output.info
output_new.info
server.info
*.pyc
src/grpc/python_gen.h
src/grpc/python/

View File

@ -1,332 +0,0 @@
#-------------------------------------------------------------------------------
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under the License.
#-------------------------------------------------------------------------------
cmake_minimum_required(VERSION 3.12)
message(STATUS "Building using CMake version: ${CMAKE_VERSION}")
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
# get build time
MACRO(GET_CURRENT_TIME CURRENT_TIME)
execute_process(COMMAND "date" +"%Y-%m-%d %H:%M.%S" OUTPUT_VARIABLE ${CURRENT_TIME})
ENDMACRO(GET_CURRENT_TIME)
GET_CURRENT_TIME(BUILD_TIME)
string(REGEX REPLACE "\n" "" BUILD_TIME ${BUILD_TIME})
message(STATUS "Build time = ${BUILD_TIME}")
if (NOT DEFINED CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build.")
endif ()
# get Milvus version via branch name
set(GIT_BRANCH_NAME_REGEX "[0-9]+\\.[0-9]+\\.[0-9]")
MACRO(GET_GIT_BRANCH_NAME GIT_BRANCH_NAME)
execute_process(COMMAND sh "-c" "git log --decorate | head -n 1 | sed 's/.*(\\(.*\\))/\\1/' | sed 's/.*, //' | sed 's=[a-zA-Z]*\/==g'"
OUTPUT_VARIABLE ${GIT_BRANCH_NAME})
if (NOT GIT_BRANCH_NAME MATCHES "${GIT_BRANCH_NAME_REGEX}")
execute_process(COMMAND "git" rev-parse --abbrev-ref HEAD OUTPUT_VARIABLE ${GIT_BRANCH_NAME})
endif ()
if (NOT GIT_BRANCH_NAME MATCHES "${GIT_BRANCH_NAME_REGEX}")
execute_process(COMMAND "git" symbolic-ref --short -q HEAD HEAD OUTPUT_VARIABLE ${GIT_BRANCH_NAME})
endif ()
ENDMACRO(GET_GIT_BRANCH_NAME)
GET_GIT_BRANCH_NAME(GIT_BRANCH_NAME)
message(STATUS "GIT_BRANCH_NAME = ${GIT_BRANCH_NAME}")
if (NOT GIT_BRANCH_NAME STREQUAL "")
string(REGEX REPLACE "\n" "" GIT_BRANCH_NAME ${GIT_BRANCH_NAME})
endif ()
set(MILVUS_VERSION "${GIT_BRANCH_NAME}")
string(REGEX MATCH "${GIT_BRANCH_NAME_REGEX}" MILVUS_VERSION "${MILVUS_VERSION}")
# get last commit id
MACRO(GET_LAST_COMMIT_ID LAST_COMMIT_ID)
execute_process(COMMAND sh "-c" "git log --decorate | head -n 1 | awk '{print $2}'"
OUTPUT_VARIABLE ${LAST_COMMIT_ID})
ENDMACRO(GET_LAST_COMMIT_ID)
GET_LAST_COMMIT_ID(LAST_COMMIT_ID)
message(STATUS "LAST_COMMIT_ID = ${LAST_COMMIT_ID}")
if (NOT LAST_COMMIT_ID STREQUAL "")
string(REGEX REPLACE "\n" "" LAST_COMMIT_ID ${LAST_COMMIT_ID})
set(LAST_COMMIT_ID "${LAST_COMMIT_ID}")
else ()
set(LAST_COMMIT_ID "Unknown")
endif ()
# set build type
if (CMAKE_BUILD_TYPE STREQUAL "Release")
set(BUILD_TYPE "Release")
else ()
set(BUILD_TYPE "Debug")
endif ()
message(STATUS "Build type = ${BUILD_TYPE}")
project(milvus VERSION "${MILVUS_VERSION}")
project(milvus_engine LANGUAGES CXX)
unset(CMAKE_EXPORT_COMPILE_COMMANDS CACHE)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
set(MILVUS_VERSION_MAJOR "${milvus_VERSION_MAJOR}")
set(MILVUS_VERSION_MINOR "${milvus_VERSION_MINOR}")
set(MILVUS_VERSION_PATCH "${milvus_VERSION_PATCH}")
if (MILVUS_VERSION_MAJOR STREQUAL ""
OR MILVUS_VERSION_MINOR STREQUAL ""
OR MILVUS_VERSION_PATCH STREQUAL "")
message(WARNING "Failed to determine Milvus version from git branch name")
set(MILVUS_VERSION "0.10.3")
endif ()
message(STATUS "Build version = ${MILVUS_VERSION}")
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/src/version.h.in ${CMAKE_CURRENT_SOURCE_DIR}/src/version.h @ONLY)
message(STATUS "Milvus version: "
"${MILVUS_VERSION_MAJOR}.${MILVUS_VERSION_MINOR}.${MILVUS_VERSION_PATCH} "
"(full: '${MILVUS_VERSION}')")
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED on)
if (CMAKE_SYSTEM_PROCESSOR MATCHES "(x86)|(X86)|(amd64)|(AMD64)")
message(STATUS "Building milvus_engine on x86 architecture")
set(MILVUS_BUILD_ARCH x86_64)
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "(ppc)")
message(STATUS "Building milvus_engine on ppc architecture")
set(MILVUS_BUILD_ARCH ppc64le)
else ()
message(WARNING "Unknown processor type")
message(WARNING "CMAKE_SYSTEM_PROCESSOR=${CMAKE_SYSTEM_PROCESSOR}")
set(MILVUS_BUILD_ARCH unknown)
endif ()
# Ensure that a default make is set
if ("${MAKE}" STREQUAL "")
if (NOT MSVC)
find_program(MAKE make)
endif ()
endif ()
find_path(MYSQL_INCLUDE_DIR
NAMES "mysql.h"
PATH_SUFFIXES "mysql")
if (${MYSQL_INCLUDE_DIR} STREQUAL "MYSQL_INCLUDE_DIR-NOTFOUND")
message(FATAL_ERROR "Could not found MySQL include directory")
else ()
include_directories(${MYSQL_INCLUDE_DIR})
endif ()
set(MILVUS_SOURCE_DIR ${PROJECT_SOURCE_DIR})
set(MILVUS_BINARY_DIR ${PROJECT_BINARY_DIR})
set(MILVUS_ENGINE_SRC ${PROJECT_SOURCE_DIR}/src)
set(MILVUS_THIRDPARTY_SRC ${PROJECT_SOURCE_DIR}/thirdparty)
include(ExternalProject)
include(DefineOptions)
include(BuildUtils)
include(ThirdPartyPackages)
if (MILVUS_USE_CCACHE)
find_program(CCACHE_FOUND ccache)
if (CCACHE_FOUND)
message(STATUS "Using ccache: ${CCACHE_FOUND}")
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ${CCACHE_FOUND})
set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ${CCACHE_FOUND})
# let ccache preserve C++ comments, because some of them may be
# meaningful to the compiler
set(ENV{CCACHE_COMMENTS} "1")
endif (CCACHE_FOUND)
endif ()
if (MILVUS_GPU_VERSION)
message(STATUS "Building Milvus GPU version")
add_compile_definitions("MILVUS_GPU_VERSION")
enable_language(CUDA)
find_package(CUDA 10 REQUIRED)
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler -fPIC -std=c++11 -D_FORCE_INLINES --expt-extended-lambda")
else ()
message(STATUS "Building Milvus CPU version")
endif ()
if (MILVUS_WITH_PROMETHEUS)
add_compile_definitions("MILVUS_WITH_PROMETHEUS")
endif ()
message("ENABLE_CPU_PROFILING = ${ENABLE_CPU_PROFILING}")
if (ENABLE_CPU_PROFILING STREQUAL "ON")
ADD_DEFINITIONS(-DENABLE_CPU_PROFILING)
endif()
if (MILVUS_WITH_FIU)
add_compile_definitions("FIU_ENABLE")
endif ()
if (CMAKE_BUILD_TYPE STREQUAL "Release")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -fPIC -DELPP_THREAD_SAFE -fopenmp")
if (MILVUS_GPU_VERSION)
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -O3")
endif ()
else ()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O0 -g -fPIC -DELPP_THREAD_SAFE -fopenmp")
if (MILVUS_GPU_VERSION)
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -O0 -g")
endif ()
endif ()
config_summary()
add_subdirectory(src)
if (BUILD_UNIT_TEST STREQUAL "ON")
if (BUILD_COVERAGE STREQUAL "ON")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage")
endif ()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DELPP_DISABLE_LOGS")
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/unittest)
endif ()
add_custom_target(Clean-All COMMAND ${CMAKE_BUILD_TOOL} clean)
if ("${MILVUS_DB_PATH}" STREQUAL "")
set(MILVUS_DB_PATH "${CMAKE_INSTALL_PREFIX}")
endif ()
if (MILVUS_GPU_VERSION)
set(GPU_ENABLE "true")
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.template
${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.yaml
@ONLY)
else ()
set(GPU_ENABLE "false")
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.template
${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.yaml
@ONLY)
endif ()
install(DIRECTORY scripts/
DESTINATION scripts
FILE_PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ
GROUP_EXECUTE GROUP_READ
WORLD_EXECUTE WORLD_READ
FILES_MATCHING PATTERN "*.sh")
install(DIRECTORY scripts/migration
DESTINATION scripts
FILE_PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ
GROUP_EXECUTE GROUP_READ
WORLD_EXECUTE WORLD_READ)
install(FILES
conf/server_config.yaml
DESTINATION
conf)
find_package(Python COMPONENTS Interpreter Development)
find_package(ClangTools)
set(BUILD_SUPPORT_DIR "${CMAKE_SOURCE_DIR}/build-support")
#
# "make lint" target
#
if (NOT MILVUS_VERBOSE_LINT)
set(MILVUS_LINT_QUIET "--quiet")
endif ()
if (NOT LINT_EXCLUSIONS_FILE)
# source files matching a glob from a line in this file
# will be excluded from linting (cpplint, clang-tidy, clang-format)
set(LINT_EXCLUSIONS_FILE ${BUILD_SUPPORT_DIR}/lint_exclusions.txt)
endif ()
find_program(CPPLINT_BIN NAMES cpplint cpplint.py HINTS ${BUILD_SUPPORT_DIR})
message(STATUS "Found cpplint executable at ${CPPLINT_BIN}")
#
# "make lint" targets
#
add_custom_target(lint
${PYTHON_EXECUTABLE}
${BUILD_SUPPORT_DIR}/run_cpplint.py
--cpplint_binary
${CPPLINT_BIN}
--exclude_globs
${LINT_EXCLUSIONS_FILE}
--source_dir
${CMAKE_CURRENT_SOURCE_DIR}
${MILVUS_LINT_QUIET})
#
# "make clang-format" and "make check-clang-format" targets
#
if (${CLANG_FORMAT_FOUND})
# runs clang format and updates files in place.
add_custom_target(clang-format
${PYTHON_EXECUTABLE}
${BUILD_SUPPORT_DIR}/run_clang_format.py
--clang_format_binary
${CLANG_FORMAT_BIN}
--exclude_globs
${LINT_EXCLUSIONS_FILE}
--source_dir
${CMAKE_CURRENT_SOURCE_DIR}/src
--fix
${MILVUS_LINT_QUIET})
# runs clang format and exits with a non-zero exit code if any files need to be reformatted
add_custom_target(check-clang-format
${PYTHON_EXECUTABLE}
${BUILD_SUPPORT_DIR}/run_clang_format.py
--clang_format_binary
${CLANG_FORMAT_BIN}
--exclude_globs
${LINT_EXCLUSIONS_FILE}
--source_dir
${CMAKE_CURRENT_SOURCE_DIR}/src
${MILVUS_LINT_QUIET})
endif ()
#
# "make clang-tidy" and "make check-clang-tidy" targets
#
if (${CLANG_TIDY_FOUND})
# runs clang-tidy and attempts to fix any warning automatically
add_custom_target(clang-tidy
${PYTHON_EXECUTABLE}
${BUILD_SUPPORT_DIR}/run_clang_tidy.py
--clang_tidy_binary
${CLANG_TIDY_BIN}
--exclude_globs
${LINT_EXCLUSIONS_FILE}
--compile_commands
${CMAKE_BINARY_DIR}/compile_commands.json
--source_dir
${CMAKE_CURRENT_SOURCE_DIR}/src
--fix
${MILVUS_LINT_QUIET})
# runs clang-tidy and exits with a non-zero exit code if any errors are found.
add_custom_target(check-clang-tidy
${PYTHON_EXECUTABLE}
${BUILD_SUPPORT_DIR}/run_clang_tidy.py
--clang_tidy_binary
${CLANG_TIDY_BIN}
--exclude_globs
${LINT_EXCLUSIONS_FILE}
--compile_commands
${CMAKE_BINARY_DIR}/compile_commands.json
--source_dir
${CMAKE_CURRENT_SOURCE_DIR}/src
${MILVUS_LINT_QUIET})
endif ()

View File

@ -1,38 +0,0 @@
<code_scheme name="milvus" version="173">
<Objective-C>
<option name="INDENT_NAMESPACE_MEMBERS" value="0" />
<option name="INDENT_VISIBILITY_KEYWORDS" value="1" />
<option name="KEEP_STRUCTURES_IN_ONE_LINE" value="true" />
<option name="KEEP_CASE_EXPRESSIONS_IN_ONE_LINE" value="true" />
<option name="FUNCTION_NON_TOP_AFTER_RETURN_TYPE_WRAP" value="0" />
<option name="FUNCTION_TOP_AFTER_RETURN_TYPE_WRAP" value="2" />
<option name="FUNCTION_PARAMETERS_WRAP" value="5" />
<option name="FUNCTION_CALL_ARGUMENTS_WRAP" value="5" />
<option name="TEMPLATE_CALL_ARGUMENTS_WRAP" value="5" />
<option name="TEMPLATE_CALL_ARGUMENTS_ALIGN_MULTILINE" value="true" />
<option name="CLASS_CONSTRUCTOR_INIT_LIST_WRAP" value="5" />
<option name="ALIGN_INIT_LIST_IN_COLUMNS" value="false" />
<option name="SPACE_BEFORE_PROTOCOLS_BRACKETS" value="false" />
<option name="SPACE_BEFORE_POINTER_IN_DECLARATION" value="false" />
<option name="SPACE_AFTER_POINTER_IN_DECLARATION" value="true" />
<option name="SPACE_BEFORE_REFERENCE_IN_DECLARATION" value="false" />
<option name="SPACE_AFTER_REFERENCE_IN_DECLARATION" value="true" />
<option name="KEEP_BLANK_LINES_BEFORE_END" value="1" />
</Objective-C>
<codeStyleSettings language="ObjectiveC">
<option name="KEEP_BLANK_LINES_IN_DECLARATIONS" value="1" />
<option name="KEEP_BLANK_LINES_IN_CODE" value="1" />
<option name="KEEP_BLANK_LINES_BEFORE_RBRACE" value="1" />
<option name="BLANK_LINES_AROUND_CLASS" value="0" />
<option name="BLANK_LINES_AROUND_METHOD_IN_INTERFACE" value="0" />
<option name="BLANK_LINES_AFTER_CLASS_HEADER" value="1" />
<option name="SPACE_AFTER_TYPE_CAST" value="false" />
<option name="BINARY_OPERATION_SIGN_ON_NEXT_LINE" value="true" />
<option name="KEEP_SIMPLE_BLOCKS_IN_ONE_LINE" value="false" />
<option name="FOR_STATEMENT_WRAP" value="1" />
<option name="ASSIGNMENT_WRAP" value="1" />
<indentOptions>
<option name="CONTINUATION_INDENT_SIZE" value="4" />
</indentOptions>
</codeStyleSettings>
</code_scheme>

File diff suppressed because it is too large Load Diff

View File

@ -1,11 +0,0 @@
*cmake-build-debug*
*cmake-build-release*
*cmake_build*
*src/index/thirdparty*
*thirdparty*
*easylogging++*
*SqliteMetaImpl.cpp
*src/grpc*
*thirdparty*
*milvus/include*
*unittest/server/test_web.cpp

View File

@ -1,110 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import multiprocessing as mp
import os
from fnmatch import fnmatch
from subprocess import Popen
def chunk(seq, n):
"""
divide a sequence into equal sized chunks
(the last chunk may be smaller, but won't be empty)
"""
chunks = []
some = []
for element in seq:
if len(some) == n:
chunks.append(some)
some = []
some.append(element)
if len(some) > 0:
chunks.append(some)
return chunks
def dechunk(chunks):
"flatten chunks into a single list"
seq = []
for chunk in chunks:
seq.extend(chunk)
return seq
def run_parallel(cmds, **kwargs):
"""
Run each of cmds (with shared **kwargs) using subprocess.Popen
then wait for all of them to complete.
Runs batches of multiprocessing.cpu_count() * 2 from cmds
returns a list of tuples containing each process'
returncode, stdout, stderr
"""
complete = []
for cmds_batch in chunk(cmds, mp.cpu_count() * 2):
procs_batch = [Popen(cmd, **kwargs) for cmd in cmds_batch]
for proc in procs_batch:
stdout, stderr = proc.communicate()
complete.append((proc.returncode, stdout, stderr))
return complete
_source_extensions = '''
.h
.cc
.cpp
'''.split()
def get_sources(source_dir, exclude_globs=[]):
sources = []
for directory, subdirs, basenames in os.walk(source_dir):
for path in [os.path.join(directory, basename)
for basename in basenames]:
# filter out non-source files
if os.path.splitext(path)[1] not in _source_extensions:
continue
path = os.path.abspath(path)
# filter out files that match the globs in the globs file
if any([fnmatch(path, glob) for glob in exclude_globs]):
continue
sources.append(path)
return sources
def stdout_pathcolonline(completed_process, filenames):
"""
given a completed process which may have reported some files as problematic
by printing the path name followed by ':' then a line number, examine
stdout and return the set of actually reported file names
"""
returncode, stdout, stderr = completed_process
bfilenames = set()
for filename in filenames:
bfilenames.add(filename.encode('utf-8') + b':')
problem_files = set()
for line in stdout.splitlines():
for filename in bfilenames:
if line.startswith(filename):
problem_files.add(filename.decode('utf-8'))
bfilenames.remove(filename)
break
return problem_files, stdout

View File

@ -1,142 +0,0 @@
#!/usr/bin/env python2
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import lintutils
from subprocess import PIPE
import argparse
import difflib
import multiprocessing as mp
import sys
from functools import partial
# examine the output of clang-format and if changes are
# present assemble a (unified)patch of the difference
def _check_one_file(completed_processes, filename):
with open(filename, "rb") as reader:
original = reader.read()
returncode, stdout, stderr = completed_processes[filename]
formatted = stdout
if formatted != original:
# Run the equivalent of diff -u
diff = list(difflib.unified_diff(
original.decode('utf8').splitlines(True),
formatted.decode('utf8').splitlines(True),
fromfile=filename,
tofile="{} (after clang format)".format(
filename)))
else:
diff = None
return filename, diff
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Runs clang-format on all of the source "
"files. If --fix is specified enforce format by "
"modifying in place, otherwise compare the output "
"with the existing file and output any necessary "
"changes as a patch in unified diff format")
parser.add_argument("--clang_format_binary",
required=True,
help="Path to the clang-format binary")
parser.add_argument("--exclude_globs",
help="Filename containing globs for files "
"that should be excluded from the checks")
parser.add_argument("--source_dir",
required=True,
help="Root directory of the source code")
parser.add_argument("--fix", default=False,
action="store_true",
help="If specified, will re-format the source "
"code instead of comparing the re-formatted "
"output, defaults to %(default)s")
parser.add_argument("--quiet", default=False,
action="store_true",
help="If specified, only print errors")
arguments = parser.parse_args()
exclude_globs = []
if arguments.exclude_globs:
for line in open(arguments.exclude_globs):
exclude_globs.append(line.strip())
formatted_filenames = []
for path in lintutils.get_sources(arguments.source_dir, exclude_globs):
formatted_filenames.append(str(path))
if arguments.fix:
if not arguments.quiet:
print("\n".join(map(lambda x: "Formatting {}".format(x),
formatted_filenames)))
# Break clang-format invocations into chunks: each invocation formats
# 16 files. Wait for all processes to complete
results = lintutils.run_parallel([
[arguments.clang_format_binary, "-i"] + some
for some in lintutils.chunk(formatted_filenames, 16)
])
for returncode, stdout, stderr in results:
# if any clang-format reported a parse error, bubble it
if returncode != 0:
sys.exit(returncode)
else:
# run an instance of clang-format for each source file in parallel,
# then wait for all processes to complete
results = lintutils.run_parallel([
[arguments.clang_format_binary, filename]
for filename in formatted_filenames
], stdout=PIPE, stderr=PIPE)
for returncode, stdout, stderr in results:
# if any clang-format reported a parse error, bubble it
if returncode != 0:
sys.exit(returncode)
error = False
checker = partial(_check_one_file, {
filename: result
for filename, result in zip(formatted_filenames, results)
})
pool = mp.Pool()
try:
# check the output from each invocation of clang-format in parallel
for filename, diff in pool.imap(checker, formatted_filenames):
if not arguments.quiet:
print("Checking {}".format(filename))
if diff:
print("{} had clang-format style issues".format(filename))
# Print out the diff to stderr
error = True
# pad with a newline
print(file=sys.stderr)
diff_out = []
for diff_str in diff:
diff_out.append(diff_str.encode('raw_unicode_escape'))
sys.stderr.writelines(diff_out)
except Exception:
error = True
raise
finally:
pool.terminate()
pool.join()
sys.exit(1 if error else 0)

View File

@ -1,126 +0,0 @@
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import argparse
import multiprocessing as mp
import lintutils
from subprocess import PIPE
import sys
from functools import partial
def _get_chunk_key(filenames):
# lists are not hashable so key on the first filename in a chunk
return filenames[0]
# clang-tidy outputs complaints in '/path:line_number: complaint' format,
# so we can scan its output to get a list of files to fix
def _check_some_files(completed_processes, filenames):
result = completed_processes[_get_chunk_key(filenames)]
return lintutils.stdout_pathcolonline(result, filenames)
def _check_all(cmd, filenames):
# each clang-tidy instance will process 16 files
chunks = lintutils.chunk(filenames, 16)
cmds = [cmd + some for some in chunks]
results = lintutils.run_parallel(cmds, stderr=PIPE, stdout=PIPE)
error = False
# record completed processes (keyed by the first filename in the input
# chunk) for lookup in _check_some_files
completed_processes = {
_get_chunk_key(some): result
for some, result in zip(chunks, results)
}
checker = partial(_check_some_files, completed_processes)
pool = mp.Pool()
try:
# check output of completed clang-tidy invocations in parallel
for problem_files, stdout in pool.imap(checker, chunks):
if problem_files:
msg = "clang-tidy suggested fixes for {}"
print("\n".join(map(msg.format, problem_files)))
print(stdout)
error = True
except Exception:
error = True
raise
finally:
pool.terminate()
pool.join()
if error:
sys.exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Runs clang-tidy on all ")
parser.add_argument("--clang_tidy_binary",
required=True,
help="Path to the clang-tidy binary")
parser.add_argument("--exclude_globs",
help="Filename containing globs for files "
"that should be excluded from the checks")
parser.add_argument("--compile_commands",
required=True,
help="compile_commands.json to pass clang-tidy")
parser.add_argument("--source_dir",
required=True,
help="Root directory of the source code")
parser.add_argument("--fix", default=False,
action="store_true",
help="If specified, will attempt to fix the "
"source code instead of recommending fixes, "
"defaults to %(default)s")
parser.add_argument("--quiet", default=False,
action="store_true",
help="If specified, only print errors")
arguments = parser.parse_args()
exclude_globs = []
if arguments.exclude_globs:
for line in open(arguments.exclude_globs):
exclude_globs.append(line.strip())
linted_filenames = []
for path in lintutils.get_sources(arguments.source_dir, exclude_globs):
linted_filenames.append(path)
if not arguments.quiet:
msg = 'Tidying {}' if arguments.fix else 'Checking {}'
print("\n".join(map(msg.format, linted_filenames)))
cmd = [
arguments.clang_tidy_binary,
'-p',
arguments.compile_commands
]
if arguments.fix:
cmd.append('-fix')
results = lintutils.run_parallel(
[cmd + some for some in lintutils.chunk(linted_filenames, 16)])
for returncode, stdout, stderr in results:
if returncode != 0:
sys.exit(returncode)
else:
_check_all(cmd, linted_filenames)

View File

@ -1,132 +0,0 @@
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import lintutils
from subprocess import PIPE, STDOUT
import argparse
import multiprocessing as mp
import sys
import platform
from functools import partial
# NOTE(wesm):
#
# * readability/casting is disabled as it aggressively warns about functions
# with names like "int32", so "int32(x)", where int32 is a function name,
# warns with
_filters = '''
-whitespace/comments
-readability/casting
-readability/todo
-readability/alt_tokens
-build/header_guard
-build/c++11
-runtime/references
-build/include_order
'''.split()
def _get_chunk_key(filenames):
# lists are not hashable so key on the first filename in a chunk
return filenames[0]
def _check_some_files(completed_processes, filenames):
# cpplint outputs complaints in '/path:line_number: complaint' format,
# so we can scan its output to get a list of files to fix
result = completed_processes[_get_chunk_key(filenames)]
return lintutils.stdout_pathcolonline(result, filenames)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Runs cpplint on all of the source files.")
parser.add_argument("--cpplint_binary",
required=True,
help="Path to the cpplint binary")
parser.add_argument("--exclude_globs",
help="Filename containing globs for files "
"that should be excluded from the checks")
parser.add_argument("--source_dir",
required=True,
help="Root directory of the source code")
parser.add_argument("--quiet", default=False,
action="store_true",
help="If specified, only print errors")
arguments = parser.parse_args()
exclude_globs = []
if arguments.exclude_globs:
for line in open(arguments.exclude_globs):
exclude_globs.append(line.strip())
linted_filenames = []
for path in lintutils.get_sources(arguments.source_dir, exclude_globs):
linted_filenames.append(str(path))
cmd = [
arguments.cpplint_binary,
'--verbose=2',
'--linelength=120',
'--filter=' + ','.join(_filters)
]
if (arguments.cpplint_binary.endswith('.py') and
platform.system() == 'Windows'):
# Windows doesn't support executable scripts; execute with
# sys.executable
cmd.insert(0, sys.executable)
if arguments.quiet:
cmd.append('--quiet')
else:
print("\n".join(map(lambda x: "Linting {}".format(x),
linted_filenames)))
# lint files in chunks: each invocation of cpplint will process 16 files
chunks = lintutils.chunk(linted_filenames, 16)
cmds = [cmd + some for some in chunks]
results = lintutils.run_parallel(cmds, stdout=PIPE, stderr=STDOUT)
error = False
# record completed processes (keyed by the first filename in the input
# chunk) for lookup in _check_some_files
completed_processes = {
_get_chunk_key(filenames): result
for filenames, result in zip(chunks, results)
}
checker = partial(_check_some_files, completed_processes)
pool = mp.Pool()
try:
# scan the outputs of various cpplint invocations in parallel to
# distill a list of problematic files
for problem_files, stdout in pool.imap(checker, chunks):
if problem_files:
if isinstance(stdout, bytes):
stdout = stdout.decode('utf8')
print(stdout, file=sys.stderr)
error = True
except Exception:
error = True
raise
finally:
pool.terminate()
pool.join()
sys.exit(1 if error else 0)

View File

@ -1,150 +0,0 @@
#!/bin/bash
BUILD_OUTPUT_DIR="cmake_build"
BUILD_TYPE="Debug"
BUILD_UNITTEST="OFF"
INSTALL_PREFIX=$(pwd)/milvus
MAKE_CLEAN="OFF"
BUILD_COVERAGE="OFF"
DB_PATH="/tmp/milvus"
PROFILING="OFF"
RUN_CPPLINT="OFF"
CUDA_COMPILER=/usr/local/cuda/bin/nvcc
GPU_VERSION="OFF" #defaults to CPU version
WITH_MKL="OFF"
WITH_PROMETHEUS="ON"
FIU_ENABLE="OFF"
BUILD_OPENBLAS="ON"
while getopts "p:d:t:f:ulrcghzmei" arg; do
case $arg in
p)
INSTALL_PREFIX=$OPTARG
;;
d)
DB_PATH=$OPTARG
;;
t)
BUILD_TYPE=$OPTARG # BUILD_TYPE
;;
u)
echo "Build and run unittest cases"
BUILD_UNITTEST="ON"
;;
l)
RUN_CPPLINT="ON"
;;
r)
if [[ -d ${BUILD_OUTPUT_DIR} ]]; then
rm ./${BUILD_OUTPUT_DIR} -r
MAKE_CLEAN="ON"
fi
;;
c)
BUILD_COVERAGE="ON"
;;
z)
PROFILING="ON"
;;
g)
GPU_VERSION="ON"
;;
m)
WITH_MKL="ON"
;;
e)
WITH_PROMETHEUS="OFF"
;;
i)
FIU_ENABLE="ON"
;;
h) # help
echo "
parameter:
-p: install prefix(default: $(pwd)/milvus)
-d: db data path(default: /tmp/milvus)
-t: build type(default: Debug)
-u: building unit test options(default: OFF)
-l: run cpplint, clang-format and clang-tidy(default: OFF)
-r: remove previous build directory(default: OFF)
-c: code coverage(default: OFF)
-z: profiling(default: OFF)
-g: build GPU version(default: OFF)
-m: build with MKL(default: OFF)
-e: build without prometheus(default: OFF)
-i: build FIU_ENABLE(default: OFF)
-h: help
usage:
./build.sh -p \${INSTALL_PREFIX} -t \${BUILD_TYPE} [-u] [-l] [-r] [-c] [-z] [-g] [-m] [-e] [-h]
"
exit 0
;;
?)
echo "ERROR! unknown argument"
exit 1
;;
esac
done
if [[ ! -d ${BUILD_OUTPUT_DIR} ]]; then
mkdir ${BUILD_OUTPUT_DIR}
fi
cd ${BUILD_OUTPUT_DIR}
# remove make cache since build.sh -l use default variables
# force update the variables each time
make rebuild_cache >/dev/null 2>&1
CMAKE_CMD="cmake \
-DBUILD_UNIT_TEST=${BUILD_UNITTEST} \
-DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX}
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
-DOpenBLAS_SOURCE=AUTO \
-DCMAKE_CUDA_COMPILER=${CUDA_COMPILER} \
-DBUILD_COVERAGE=${BUILD_COVERAGE} \
-DMILVUS_DB_PATH=${DB_PATH} \
-DENABLE_CPU_PROFILING=${PROFILING} \
-DMILVUS_GPU_VERSION=${GPU_VERSION} \
-DFAISS_WITH_MKL=${WITH_MKL} \
-DMILVUS_WITH_PROMETHEUS=${WITH_PROMETHEUS} \
-DMILVUS_WITH_FIU=${FIU_ENABLE} \
../"
echo ${CMAKE_CMD}
${CMAKE_CMD}
if [[ ${MAKE_CLEAN} == "ON" ]]; then
make clean
fi
if [[ ${RUN_CPPLINT} == "ON" ]]; then
# cpplint check
make lint
if [ $? -ne 0 ]; then
echo "ERROR! cpplint check failed"
exit 1
fi
echo "cpplint check passed!"
# clang-format check
make check-clang-format
if [ $? -ne 0 ]; then
echo "ERROR! clang-format check failed"
exit 1
fi
echo "clang-format check passed!"
# # clang-tidy check
# make check-clang-tidy
# if [ $? -ne 0 ]; then
# echo "ERROR! clang-tidy check failed"
# exit 1
# fi
# echo "clang-tidy check passed!"
else
# compile and build
make -j 8 install || exit 1
fi

View File

@ -1,12 +0,0 @@
#!/bin/bash
sudo yum install -y epel-release centos-release-scl-rh && sudo yum install -y wget curl which && \
sudo wget -qO- "https://cmake.org/files/v3.14/cmake-3.14.3-Linux-x86_64.tar.gz" | sudo tar --strip-components=1 -xz -C /usr/local && \
sudo yum install -y ccache make automake git python3-pip libcurl-devel python3-devel boost-static mysql-devel \
devtoolset-7-gcc devtoolset-7-gcc-c++ devtoolset-7-gcc-gfortran llvm-toolset-7.0-clang llvm-toolset-7.0-clang-tools-extra lcov \
lapack-devel openssl-devel
echo "source scl_source enable devtoolset-7" | sudo tee -a /etc/profile.d/devtoolset-7.sh
echo "source scl_source enable llvm-toolset-7.0" | sudo tee -a /etc/profile.d/llvm-toolset-7.sh
echo "export CLANG_TOOLS_PATH=/opt/rh/llvm-toolset-7.0/root/usr/bin" | sudo tee -a /etc/profile.d/llvm-toolset-7.sh

View File

@ -1,204 +0,0 @@
# Define a function that check last file modification
function(Check_Last_Modify cache_check_lists_file_path working_dir last_modified_commit_id)
if(EXISTS "${working_dir}")
if(EXISTS "${cache_check_lists_file_path}")
set(GIT_LOG_SKIP_NUM 0)
set(_MATCH_ALL ON CACHE BOOL "Match all")
set(_LOOP_STATUS ON CACHE BOOL "Whether out of loop")
file(STRINGS ${cache_check_lists_file_path} CACHE_IGNORE_TXT)
while(_LOOP_STATUS)
foreach(_IGNORE_ENTRY ${CACHE_IGNORE_TXT})
if(NOT _IGNORE_ENTRY MATCHES "^[^#]+")
continue()
endif()
set(_MATCH_ALL OFF)
execute_process(COMMAND git log --no-merges -1 --skip=${GIT_LOG_SKIP_NUM} --name-status --pretty= WORKING_DIRECTORY ${working_dir} OUTPUT_VARIABLE CHANGE_FILES)
if(NOT CHANGE_FILES STREQUAL "")
string(REPLACE "\n" ";" _CHANGE_FILES ${CHANGE_FILES})
foreach(_FILE_ENTRY ${_CHANGE_FILES})
string(REGEX MATCH "[^ \t]+$" _FILE_NAME ${_FILE_ENTRY})
execute_process(COMMAND sh -c "echo ${_FILE_NAME} | grep ${_IGNORE_ENTRY}" RESULT_VARIABLE return_code)
if (return_code EQUAL 0)
execute_process(COMMAND git log --no-merges -1 --skip=${GIT_LOG_SKIP_NUM} --pretty=%H WORKING_DIRECTORY ${working_dir} OUTPUT_VARIABLE LAST_MODIFIED_COMMIT_ID)
set (${last_modified_commit_id} ${LAST_MODIFIED_COMMIT_ID} PARENT_SCOPE)
set(_LOOP_STATUS OFF)
endif()
endforeach()
else()
set(_LOOP_STATUS OFF)
endif()
endforeach()
if(_MATCH_ALL)
execute_process(COMMAND git log --no-merges -1 --skip=${GIT_LOG_SKIP_NUM} --pretty=%H WORKING_DIRECTORY ${working_dir} OUTPUT_VARIABLE LAST_MODIFIED_COMMIT_ID)
set (${last_modified_commit_id} ${LAST_MODIFIED_COMMIT_ID} PARENT_SCOPE)
set(_LOOP_STATUS OFF)
endif()
math(EXPR GIT_LOG_SKIP_NUM "${GIT_LOG_SKIP_NUM} + 1")
endwhile(_LOOP_STATUS)
else()
execute_process(COMMAND git log --no-merges -1 --skip=${GIT_LOG_SKIP_NUM} --pretty=%H WORKING_DIRECTORY ${working_dir} OUTPUT_VARIABLE LAST_MODIFIED_COMMIT_ID)
set (${last_modified_commit_id} ${LAST_MODIFIED_COMMIT_ID} PARENT_SCOPE)
endif()
else()
message(FATAL_ERROR "The directory ${working_dir} does not exist")
endif()
endfunction()
# Define a function that extracts a cached package
function(ExternalProject_Use_Cache project_name package_file install_path)
message(STATUS "Will use cached package file: ${package_file}")
ExternalProject_Add(${project_name}
DOWNLOAD_COMMAND ${CMAKE_COMMAND} -E echo
"No download step needed (using cached package)"
CONFIGURE_COMMAND ${CMAKE_COMMAND} -E echo
"No configure step needed (using cached package)"
BUILD_COMMAND ${CMAKE_COMMAND} -E echo
"No build step needed (using cached package)"
INSTALL_COMMAND ${CMAKE_COMMAND} -E echo
"No install step needed (using cached package)"
)
# We want our tar files to contain the Install/<package> prefix (not for any
# very special reason, only for consistency and so that we can identify them
# in the extraction logs) which means that we must extract them in the
# binary (top-level build) directory to have them installed in the right
# place for subsequent ExternalProjects to pick them up. It seems that the
# only way to control the working directory is with Add_Step!
ExternalProject_Add_Step(${project_name} extract
ALWAYS 1
COMMAND
${CMAKE_COMMAND} -E echo
"Extracting ${package_file} to ${install_path}"
COMMAND
${CMAKE_COMMAND} -E tar xzf ${package_file} ${install_path}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
)
ExternalProject_Add_StepTargets(${project_name} extract)
endfunction()
# Define a function that to create a new cached package
function(ExternalProject_Create_Cache project_name package_file install_path cache_username cache_password cache_path)
if(EXISTS ${package_file})
message(STATUS "Removing existing package file: ${package_file}")
file(REMOVE ${package_file})
endif()
string(REGEX REPLACE "(.+)/.+$" "\\1" package_dir ${package_file})
if(NOT EXISTS ${package_dir})
file(MAKE_DIRECTORY ${package_dir})
endif()
message(STATUS "Will create cached package file: ${package_file}")
ExternalProject_Add_Step(${project_name} package
DEPENDEES install
BYPRODUCTS ${package_file}
COMMAND ${CMAKE_COMMAND} -E echo "Updating cached package file: ${package_file}"
COMMAND ${CMAKE_COMMAND} -E tar czvf ${package_file} ${install_path}
COMMAND ${CMAKE_COMMAND} -E echo "Uploading package file ${package_file} to ${cache_path}"
COMMAND curl -u${cache_username}:${cache_password} -T ${package_file} ${cache_path}
)
ExternalProject_Add_StepTargets(${project_name} package)
endfunction()
function(ADD_THIRDPARTY_LIB LIB_NAME)
set(options)
set(one_value_args SHARED_LIB STATIC_LIB)
set(multi_value_args DEPS INCLUDE_DIRECTORIES)
cmake_parse_arguments(ARG
"${options}"
"${one_value_args}"
"${multi_value_args}"
${ARGN})
if(ARG_UNPARSED_ARGUMENTS)
message(SEND_ERROR "Error: unrecognized arguments: ${ARG_UNPARSED_ARGUMENTS}")
endif()
if(ARG_STATIC_LIB AND ARG_SHARED_LIB)
if(NOT ARG_STATIC_LIB)
message(FATAL_ERROR "No static or shared library provided for ${LIB_NAME}")
endif()
set(AUG_LIB_NAME "${LIB_NAME}_static")
add_library(${AUG_LIB_NAME} STATIC IMPORTED)
set_target_properties(${AUG_LIB_NAME}
PROPERTIES IMPORTED_LOCATION "${ARG_STATIC_LIB}")
if(ARG_DEPS)
set_target_properties(${AUG_LIB_NAME}
PROPERTIES INTERFACE_LINK_LIBRARIES "${ARG_DEPS}")
endif()
message(STATUS "Added static library dependency ${AUG_LIB_NAME}: ${ARG_STATIC_LIB}")
if(ARG_INCLUDE_DIRECTORIES)
set_target_properties(${AUG_LIB_NAME}
PROPERTIES INTERFACE_INCLUDE_DIRECTORIES
"${ARG_INCLUDE_DIRECTORIES}")
endif()
set(AUG_LIB_NAME "${LIB_NAME}_shared")
add_library(${AUG_LIB_NAME} SHARED IMPORTED)
if(WIN32)
# Mark the ".lib" location as part of a Windows DLL
set_target_properties(${AUG_LIB_NAME}
PROPERTIES IMPORTED_IMPLIB "${ARG_SHARED_LIB}")
else()
set_target_properties(${AUG_LIB_NAME}
PROPERTIES IMPORTED_LOCATION "${ARG_SHARED_LIB}")
endif()
if(ARG_DEPS)
set_target_properties(${AUG_LIB_NAME}
PROPERTIES INTERFACE_LINK_LIBRARIES "${ARG_DEPS}")
endif()
message(STATUS "Added shared library dependency ${AUG_LIB_NAME}: ${ARG_SHARED_LIB}")
if(ARG_INCLUDE_DIRECTORIES)
set_target_properties(${AUG_LIB_NAME}
PROPERTIES INTERFACE_INCLUDE_DIRECTORIES
"${ARG_INCLUDE_DIRECTORIES}")
endif()
elseif(ARG_STATIC_LIB)
set(AUG_LIB_NAME "${LIB_NAME}_static")
add_library(${AUG_LIB_NAME} STATIC IMPORTED)
set_target_properties(${AUG_LIB_NAME}
PROPERTIES IMPORTED_LOCATION "${ARG_STATIC_LIB}")
if(ARG_DEPS)
set_target_properties(${AUG_LIB_NAME}
PROPERTIES INTERFACE_LINK_LIBRARIES "${ARG_DEPS}")
endif()
message(STATUS "Added static library dependency ${AUG_LIB_NAME}: ${ARG_STATIC_LIB}")
if(ARG_INCLUDE_DIRECTORIES)
set_target_properties(${AUG_LIB_NAME}
PROPERTIES INTERFACE_INCLUDE_DIRECTORIES
"${ARG_INCLUDE_DIRECTORIES}")
endif()
elseif(ARG_SHARED_LIB)
set(AUG_LIB_NAME "${LIB_NAME}_shared")
add_library(${AUG_LIB_NAME} SHARED IMPORTED)
if(WIN32)
# Mark the ".lib" location as part of a Windows DLL
set_target_properties(${AUG_LIB_NAME}
PROPERTIES IMPORTED_IMPLIB "${ARG_SHARED_LIB}")
else()
set_target_properties(${AUG_LIB_NAME}
PROPERTIES IMPORTED_LOCATION "${ARG_SHARED_LIB}")
endif()
message(STATUS "Added shared library dependency ${AUG_LIB_NAME}: ${ARG_SHARED_LIB}")
if(ARG_DEPS)
set_target_properties(${AUG_LIB_NAME}
PROPERTIES INTERFACE_LINK_LIBRARIES "${ARG_DEPS}")
endif()
if(ARG_INCLUDE_DIRECTORIES)
set_target_properties(${AUG_LIB_NAME}
PROPERTIES INTERFACE_INCLUDE_DIRECTORIES
"${ARG_INCLUDE_DIRECTORIES}")
endif()
else()
message(FATAL_ERROR "No static or shared library provided for ${LIB_NAME}")
endif()
endfunction()

View File

@ -1,173 +0,0 @@
macro(set_option_category name)
set(MILVUS_OPTION_CATEGORY ${name})
list(APPEND "MILVUS_OPTION_CATEGORIES" ${name})
endmacro()
macro(define_option name description default)
option(${name} ${description} ${default})
list(APPEND "MILVUS_${MILVUS_OPTION_CATEGORY}_OPTION_NAMES" ${name})
set("${name}_OPTION_DESCRIPTION" ${description})
set("${name}_OPTION_DEFAULT" ${default})
set("${name}_OPTION_TYPE" "bool")
endmacro()
function(list_join lst glue out)
if ("${${lst}}" STREQUAL "")
set(${out} "" PARENT_SCOPE)
return()
endif ()
list(GET ${lst} 0 joined)
list(REMOVE_AT ${lst} 0)
foreach (item ${${lst}})
set(joined "${joined}${glue}${item}")
endforeach ()
set(${out} ${joined} PARENT_SCOPE)
endfunction()
macro(define_option_string name description default)
set(${name} ${default} CACHE STRING ${description})
list(APPEND "MILVUS_${MILVUS_OPTION_CATEGORY}_OPTION_NAMES" ${name})
set("${name}_OPTION_DESCRIPTION" ${description})
set("${name}_OPTION_DEFAULT" "\"${default}\"")
set("${name}_OPTION_TYPE" "string")
set("${name}_OPTION_ENUM" ${ARGN})
list_join("${name}_OPTION_ENUM" "|" "${name}_OPTION_ENUM")
if (NOT ("${${name}_OPTION_ENUM}" STREQUAL ""))
set_property(CACHE ${name} PROPERTY STRINGS ${ARGN})
endif ()
endmacro()
#----------------------------------------------------------------------
set_option_category("Milvus Build Option")
define_option(MILVUS_GPU_VERSION "Build GPU version" OFF)
#----------------------------------------------------------------------
set_option_category("Thirdparty")
set(MILVUS_DEPENDENCY_SOURCE_DEFAULT "BUNDLED")
define_option_string(MILVUS_DEPENDENCY_SOURCE
"Method to use for acquiring MILVUS's build dependencies"
"${MILVUS_DEPENDENCY_SOURCE_DEFAULT}"
"AUTO"
"BUNDLED"
"SYSTEM")
define_option(MILVUS_USE_CCACHE "Use ccache when compiling (if available)" ON)
define_option(MILVUS_VERBOSE_THIRDPARTY_BUILD
"Show output from ExternalProjects rather than just logging to files" ON)
define_option(MILVUS_WITH_EASYLOGGINGPP "Build with Easylogging++ library" ON)
define_option(MILVUS_WITH_PROMETHEUS "Build with PROMETHEUS library" ON)
define_option(MILVUS_WITH_SQLITE "Build with SQLite library" ON)
define_option(MILVUS_WITH_SQLITE_ORM "Build with SQLite ORM library" ON)
define_option(MILVUS_WITH_MYSQLPP "Build with MySQL++" ON)
define_option(MILVUS_WITH_YAMLCPP "Build with yaml-cpp library" ON)
if (ENABLE_CPU_PROFILING STREQUAL "ON")
define_option(MILVUS_WITH_LIBUNWIND "Build with libunwind" ON)
define_option(MILVUS_WITH_GPERFTOOLS "Build with gperftools" ON)
endif ()
define_option(MILVUS_WITH_GRPC "Build with GRPC" ON)
define_option(MILVUS_WITH_ZLIB "Build with zlib compression" ON)
define_option(MILVUS_WITH_OPENTRACING "Build with Opentracing" ON)
define_option(MILVUS_WITH_FIU "Build with fiu" OFF)
define_option(MILVUS_WITH_AWS "Build with aws" OFF)
define_option(MILVUS_WITH_OATPP "Build with oatpp" ON)
#----------------------------------------------------------------------
set_option_category("Test and benchmark")
unset(MILVUS_BUILD_TESTS CACHE)
if (BUILD_UNIT_TEST)
define_option(MILVUS_BUILD_TESTS "Build the MILVUS googletest unit tests" ON)
else ()
define_option(MILVUS_BUILD_TESTS "Build the MILVUS googletest unit tests" OFF)
endif (BUILD_UNIT_TEST)
#----------------------------------------------------------------------
macro(config_summary)
message(STATUS "---------------------------------------------------------------------")
message(STATUS "MILVUS version: ${MILVUS_VERSION}")
message(STATUS)
message(STATUS "Build configuration summary:")
message(STATUS " Generator: ${CMAKE_GENERATOR}")
message(STATUS " Build type: ${CMAKE_BUILD_TYPE}")
message(STATUS " Source directory: ${CMAKE_CURRENT_SOURCE_DIR}")
if (${CMAKE_EXPORT_COMPILE_COMMANDS})
message(
STATUS " Compile commands: ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json")
endif ()
foreach (category ${MILVUS_OPTION_CATEGORIES})
message(STATUS)
message(STATUS "${category} options:")
set(option_names ${MILVUS_${category}_OPTION_NAMES})
set(max_value_length 0)
foreach (name ${option_names})
string(LENGTH "\"${${name}}\"" value_length)
if (${max_value_length} LESS ${value_length})
set(max_value_length ${value_length})
endif ()
endforeach ()
foreach (name ${option_names})
if ("${${name}_OPTION_TYPE}" STREQUAL "string")
set(value "\"${${name}}\"")
else ()
set(value "${${name}}")
endif ()
set(default ${${name}_OPTION_DEFAULT})
set(description ${${name}_OPTION_DESCRIPTION})
string(LENGTH ${description} description_length)
if (${description_length} LESS 70)
string(
SUBSTRING
" "
${description_length} -1 description_padding)
else ()
set(description_padding "
")
endif ()
set(comment "[${name}]")
if ("${value}" STREQUAL "${default}")
set(comment "[default] ${comment}")
endif ()
if (NOT ("${${name}_OPTION_ENUM}" STREQUAL ""))
set(comment "${comment} [${${name}_OPTION_ENUM}]")
endif ()
string(
SUBSTRING "${value} "
0 ${max_value_length} value)
message(STATUS " ${description} ${description_padding} ${value} ${comment}")
endforeach ()
endforeach ()
endmacro()

View File

@ -1,109 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Tries to find the clang-tidy and clang-format modules
#
# Usage of this module as follows:
#
# find_package(ClangTools)
#
# Variables used by this module, they can change the default behaviour and need
# to be set before calling find_package:
#
# ClangToolsBin_HOME -
# When set, this path is inspected instead of standard library binary locations
# to find clang-tidy and clang-format
#
# This module defines
# CLANG_TIDY_BIN, The path to the clang tidy binary
# CLANG_TIDY_FOUND, Whether clang tidy was found
# CLANG_FORMAT_BIN, The path to the clang format binary
# CLANG_TIDY_FOUND, Whether clang format was found
find_program(CLANG_TIDY_BIN
NAMES
clang-tidy-7.0
clang-tidy-6.0
clang-tidy-5.0
clang-tidy-4.0
clang-tidy-3.9
clang-tidy-3.8
clang-tidy-3.7
clang-tidy-3.6
clang-tidy
PATHS ${ClangTools_PATH} $ENV{CLANG_TOOLS_PATH} /usr/local/bin /usr/bin
NO_DEFAULT_PATH
)
if ( "${CLANG_TIDY_BIN}" STREQUAL "CLANG_TIDY_BIN-NOTFOUND" )
set(CLANG_TIDY_FOUND 0)
message("clang-tidy not found")
else()
set(CLANG_TIDY_FOUND 1)
message("clang-tidy found at ${CLANG_TIDY_BIN}")
endif()
if (CLANG_FORMAT_VERSION)
find_program(CLANG_FORMAT_BIN
NAMES clang-format-${CLANG_FORMAT_VERSION}
PATHS
${ClangTools_PATH}
$ENV{CLANG_TOOLS_PATH}
/usr/local/bin /usr/bin
NO_DEFAULT_PATH
)
# If not found yet, search alternative locations
if (("${CLANG_FORMAT_BIN}" STREQUAL "CLANG_FORMAT_BIN-NOTFOUND") AND APPLE)
# Homebrew ships older LLVM versions in /usr/local/opt/llvm@version/
STRING(REGEX REPLACE "^([0-9]+)\\.[0-9]+" "\\1" CLANG_FORMAT_MAJOR_VERSION "${CLANG_FORMAT_VERSION}")
STRING(REGEX REPLACE "^[0-9]+\\.([0-9]+)" "\\1" CLANG_FORMAT_MINOR_VERSION "${CLANG_FORMAT_VERSION}")
if ("${CLANG_FORMAT_MINOR_VERSION}" STREQUAL "0")
find_program(CLANG_FORMAT_BIN
NAMES clang-format
PATHS /usr/local/opt/llvm@${CLANG_FORMAT_MAJOR_VERSION}/bin
NO_DEFAULT_PATH
)
else()
find_program(CLANG_FORMAT_BIN
NAMES clang-format
PATHS /usr/local/opt/llvm@${CLANG_FORMAT_VERSION}/bin
NO_DEFAULT_PATH
)
endif()
endif()
else()
find_program(CLANG_FORMAT_BIN
NAMES
clang-format-7.0
clang-format-6.0
clang-format-5.0
clang-format-4.0
clang-format-3.9
clang-format-3.8
clang-format-3.7
clang-format-3.6
clang-format
PATHS ${ClangTools_PATH} $ENV{CLANG_TOOLS_PATH} /usr/local/bin /usr/bin
NO_DEFAULT_PATH
)
endif()
if ( "${CLANG_FORMAT_BIN}" STREQUAL "CLANG_FORMAT_BIN-NOTFOUND" )
set(CLANG_FORMAT_FOUND 0)
message("clang-format not found")
else()
set(CLANG_FORMAT_FOUND 1)
message("clang-format found at ${CLANG_FORMAT_BIN}")
endif()

File diff suppressed because it is too large Load Diff

View File

@ -1,189 +0,0 @@
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under the License.
version: 0.5
#----------------------+------------------------------------------------------------+------------+-----------------+
# Cluster Config | Description | Type | Default |
#----------------------+------------------------------------------------------------+------------+-----------------+
# enable | If runinng with Mishards, set true, otherwise false. | Boolean | false |
#----------------------+------------------------------------------------------------+------------+-----------------+
# role | Milvus deployment role: rw / ro | role | rw |
#----------------------+------------------------------------------------------------+------------+-----------------+
cluster:
enable: false
role: rw
#----------------------+------------------------------------------------------------+------------+-----------------+
# General Config | Description | Type | Default |
#----------------------+------------------------------------------------------------+------------+-----------------+
# time_zone | Use UTC-x or UTC+x to specify a time zone. | Timezone | UTC+8 |
#----------------------+------------------------------------------------------------+------------+-----------------+
# meta_uri | URI for metadata storage, using SQLite (for single server | URL | sqlite://:@:/ |
# | Milvus) or MySQL (for distributed cluster Milvus). | | |
# | Format: dialect://username:password@host:port/database | | |
# | Keep 'dialect://:@:/', 'dialect' can be either 'sqlite' or | | |
# | 'mysql', replace other texts with real values. | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
general:
timezone: UTC+8
meta_uri: sqlite://:@:/
#----------------------+------------------------------------------------------------+------------+-----------------+
# Network Config | Description | Type | Default |
#----------------------+------------------------------------------------------------+------------+-----------------+
# bind.address | IP address that Milvus server monitors. | IP | 0.0.0.0 |
#----------------------+------------------------------------------------------------+------------+-----------------+
# bind.port | Port that Milvus server monitors. Port range (1024, 65535) | Integer | 19530 |
#----------------------+------------------------------------------------------------+------------+-----------------+
# http.enable | Enable web server or not. | Boolean | true |
#----------------------+------------------------------------------------------------+------------+-----------------+
# http.port | Port that Milvus web server monitors. | Integer | 19121 |
# | Port range (1024, 65535) | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
network:
bind.address: 0.0.0.0
bind.port: 19530
http.enable: true
http.port: 19121
#----------------------+------------------------------------------------------------+------------+-----------------+
# Storage Config | Description | Type | Default |
#----------------------+------------------------------------------------------------+------------+-----------------+
# path | Path used to save meta data, vector data and index data. | Path | /var/lib/milvus |
#----------------------+------------------------------------------------------------+------------+-----------------+
# auto_flush_interval | The interval, in seconds, at which Milvus automatically | Integer | 1 (s) |
# | flushes data to disk. | | |
# | 0 means disable the regular flush. | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
storage:
path: /var/lib/milvus
auto_flush_interval: 1
#----------------------+------------------------------------------------------------+------------+-----------------+
# WAL Config | Description | Type | Default |
#----------------------+------------------------------------------------------------+------------+-----------------+
# enable | Whether to enable write-ahead logging (WAL) in Milvus. | Boolean | true |
# | If WAL is enabled, Milvus writes all data changes to log | | |
# | files in advance before implementing data changes. WAL | | |
# | ensures the atomicity and durability for Milvus operations.| | |
#----------------------+------------------------------------------------------------+------------+-----------------+
# recovery_error_ignore| Whether to ignore logs with errors that happens during WAL | Boolean | false |
# | recovery. If true, when Milvus restarts for recovery and | | |
# | there are errors in WAL log files, log files with errors | | |
# | are ignored. If false, Milvus does not restart when there | | |
# | are errors in WAL log files. | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
# buffer_size | Sum total of the read buffer and the write buffer in MBs. | Integer | 256 (MB) |
# | buffer_size must be in range [64, 4096] (MB). | | |
# | If the value you specified is out of range, Milvus | | |
# | automatically uses the boundary value closest to the | | |
# | specified value. It is recommended you set buffer_size to | | |
# | a value greater than the inserted data size of a single | | |
# | insert operation for better performance. | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
# path | Location of WAL log files. | String | |
#----------------------+------------------------------------------------------------+------------+-----------------+
wal:
enable: true
recovery_error_ignore: false
buffer_size: 256MB
path: /var/lib/milvus/wal
#----------------------+------------------------------------------------------------+------------+-----------------+
# Cache Config | Description | Type | Default |
#----------------------+------------------------------------------------------------+------------+-----------------+
# cache_size | The size of CPU memory used for caching data for faster | Integer | 4 (GB) |
# | query. The sum of 'cpu_cache_capacity' and | | |
# | 'insert_buffer_size' must be less than system memory size. | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
# insert_buffer_size | Buffer size used for data insertion. | Integer | 1 (GB) |
# | The sum of 'insert_buffer_size' and 'cpu_cache_capacity' | | |
# | must be less than system memory size. | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
# preload_collection | A comma-separated list of collection names that need to | StringList | |
# | be pre-loaded when Milvus server starts up. | | |
# | '*' means preload all existing tables (single-quote or | | |
# | double-quote required). | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
cache:
cache_size: 4GB
insert_buffer_size: 1GB
preload_collection:
#----------------------+------------------------------------------------------------+------------+-----------------+
# GPU Config | Description | Type | Default |
#----------------------+------------------------------------------------------------+------------+-----------------+
# enable | Enable GPU resources or not. | Boolean | false |
#----------------------+------------------------------------------------------------+------------+-----------------+
# cache_size | The size of GPU memory per card used for cache. | Integer | 1 (GB) |
#----------------------+------------------------------------------------------------+------------+-----------------+
# gpu_search_threshold | A Milvus performance tuning parameter. This value will be | Integer | 1000 |
# | compared with 'nq' to decide if the search computation will| | |
# | be executed on GPUs only. | | |
# | If nq >= gpu_search_threshold, the search computation will | | |
# | be executed on GPUs only; | | |
# | if nq < gpu_search_threshold, the search computation will | | |
# | be executed on CPUs only. | | |
# | The SQ8H index is special, if nq < gpu_search_threshold, | | |
# | the search will be executed on both CPUs and GPUs. | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
# search_resources | The list of GPU devices used for search computation. | DeviceList | gpu0 |
# | Must be in format gpux. | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
# build_index_resources| The list of GPU devices used for index building. | DeviceList | gpu0 |
# | Must be in format gpux. | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
gpu:
enable: false
cache_size: 1GB
gpu_search_threshold: 1000
search_devices:
- gpu0
build_index_devices:
- gpu0
#----------------------+------------------------------------------------------------+------------+-----------------+
# Logs Config | Description | Type | Default |
#----------------------+------------------------------------------------------------+------------+-----------------+
# level | Log level in Milvus. Must be one of debug, info, warning, | String | debug |
# | error, fatal | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
# trace.enable | Whether to enable trace level logging in Milvus. | Boolean | true |
#----------------------+------------------------------------------------------------+------------+-----------------+
# path | Absolute path to the folder holding the log files. | String | |
#----------------------+------------------------------------------------------------+------------+-----------------+
# max_log_file_size | The maximum size of each log file, size range [512, 4096] | Integer | 1024 (MB) |
#----------------------+------------------------------------------------------------+------------+-----------------+
# log_rotate_num | The maximum number of log files that Milvus keeps for each | Integer | 0 |
# | logging level, num range [0, 1024], 0 means unlimited. | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
logs:
level: debug
trace.enable: true
path: /var/lib/milvus/logs
max_log_file_size: 1024MB
log_rotate_num: 0
#----------------------+------------------------------------------------------------+------------+-----------------+
# Metric Config | Description | Type | Default |
#----------------------+------------------------------------------------------------+------------+-----------------+
# enable | Enable monitoring function or not. | Boolean | false |
#----------------------+------------------------------------------------------------+------------+-----------------+
# address | Pushgateway address | IP | 127.0.0.1 +
#----------------------+------------------------------------------------------------+------------+-----------------+
# port | Pushgateway port, port range (1024, 65535) | Integer | 9091 |
#----------------------+------------------------------------------------------------+------------+-----------------+
metric:
enable: false
address: 127.0.0.1
port: 9091

View File

@ -1,190 +0,0 @@
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under the License.
version: 0.5
#----------------------+------------------------------------------------------------+------------+-----------------+
# Cluster Config | Description | Type | Default |
#----------------------+------------------------------------------------------------+------------+-----------------+
# enable | If running with Mishards, set true, otherwise false. | Boolean | false |
#----------------------+------------------------------------------------------------+------------+-----------------+
# role | Milvus deployment role: rw / ro | Role | rw |
#----------------------+------------------------------------------------------------+------------+-----------------+
cluster:
enable: false
role: rw
#----------------------+------------------------------------------------------------+------------+-----------------+
# General Config | Description | Type | Default |
#----------------------+------------------------------------------------------------+------------+-----------------+
# timezone | Use UTC-x or UTC+x to specify a time zone. | Timezone | UTC+8 |
#----------------------+------------------------------------------------------------+------------+-----------------+
# meta_uri | URI for metadata storage, using SQLite (for single server | URI | sqlite://:@:/ |
# | Milvus) or MySQL (for distributed cluster Milvus). | | |
# | Format: dialect://username:password@host:port/database | | |
# | Keep 'dialect://:@:/', 'dialect' can be either 'sqlite' or | | |
# | 'mysql', replace other texts with real values. | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
general:
timezone: UTC+8
meta_uri: sqlite://:@:/
#----------------------+------------------------------------------------------------+------------+-----------------+
# Network Config | Description | Type | Default |
#----------------------+------------------------------------------------------------+------------+-----------------+
# bind.address | IP address that Milvus server monitors. | IP | 0.0.0.0 |
#----------------------+------------------------------------------------------------+------------+-----------------+
# bind.port | Port that Milvus server monitors. Port range (1024, 65535) | Integer | 19530 |
#----------------------+------------------------------------------------------------+------------+-----------------+
# http.enable | Enable HTTP server or not. | Boolean | true |
#----------------------+------------------------------------------------------------+------------+-----------------+
# http.port | Port that Milvus HTTP server monitors. | Integer | 19121 |
# | Port range (1024, 65535) | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
network:
bind.address: 0.0.0.0
bind.port: 19530
http.enable: true
http.port: 19121
#----------------------+------------------------------------------------------------+------------+-----------------+
# Storage Config | Description | Type | Default |
#----------------------+------------------------------------------------------------+------------+-----------------+
# path | Path used to save meta data, vector data and index data. | Path | /var/lib/milvus |
#----------------------+------------------------------------------------------------+------------+-----------------+
# auto_flush_interval | The interval, in seconds, at which Milvus automatically | Integer | 1 (s) |
# | flushes data to disk. | | |
# | 0 means disable the regular flush. | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
storage:
path: @MILVUS_DB_PATH@
auto_flush_interval: 1
#----------------------+------------------------------------------------------------+------------+-----------------+
# WAL Config | Description | Type | Default |
#----------------------+------------------------------------------------------------+------------+-----------------+
# enable | Whether to enable write-ahead logging (WAL) in Milvus. | Boolean | true |
# | If WAL is enabled, Milvus writes all data changes to log | | |
# | files in advance before implementing data changes. WAL | | |
# | ensures the atomicity and durability for Milvus operations.| | |
#----------------------+------------------------------------------------------------+------------+-----------------+
# recovery_error_ignore| Whether to ignore logs with errors that happens during WAL | Boolean | false |
# | recovery. If true, when Milvus restarts for recovery and | | |
# | there are errors in WAL log files, log files with errors | | |
# | are ignored. If false, Milvus does not restart when there | | |
# | are errors in WAL log files. | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
# buffer_size | Sum total of the read buffer and the write buffer in Bytes.| String | 256MB |
# | buffer_size must be in range [64MB, 4096MB]. | | |
# | If the value you specified is out of range, Milvus | | |
# | automatically uses the boundary value closest to the | | |
# | specified value. It is recommended you set buffer_size to | | |
# | a value greater than the inserted data size of a single | | |
# | insert operation for better performance. | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
# path | Location of WAL log files. | String | |
#----------------------+------------------------------------------------------------+------------+-----------------+
wal:
enable: true
recovery_error_ignore: false
buffer_size: 256MB
path: @MILVUS_DB_PATH@/wal
#----------------------+------------------------------------------------------------+------------+-----------------+
# Cache Config | Description | Type | Default |
#----------------------+------------------------------------------------------------+------------+-----------------+
# cache_size | The size of CPU memory used for caching data for faster | String | 4GB |
# | query. The sum of 'cache_size' and 'insert_buffer_size' | | |
# | must be less than system memory size. | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
# insert_buffer_size | Buffer size used for data insertion. | String | 1GB |
# | The sum of 'insert_buffer_size' and 'cache_size' | | |
# | must be less than system memory size. | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
# preload_collection | A comma-separated list of collection names that need to | StringList | |
# | be pre-loaded when Milvus server starts up. | | |
# | '*' means preload all existing tables (single-quote or | | |
# | double-quote required). | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
cache:
cache_size: 4GB
insert_buffer_size: 1GB
preload_collection:
#----------------------+------------------------------------------------------------+------------+-----------------+
# GPU Config | Description | Type | Default |
#----------------------+------------------------------------------------------------+------------+-----------------+
# enable | Use GPU devices or not. | Boolean | false |
#----------------------+------------------------------------------------------------+------------+-----------------+
# cache_size | The size of GPU memory per card used for cache. | String | 1GB |
#----------------------+------------------------------------------------------------+------------+-----------------+
# gpu_search_threshold | A Milvus performance tuning parameter. This value will be | Integer | 1000 |
# | compared with 'nq' to decide if the search computation will| | |
# | be executed on GPUs only. | | |
# | If nq >= gpu_search_threshold, the search computation will | | |
# | be executed on GPUs only; | | |
# | if nq < gpu_search_threshold, the search computation will | | |
# | be executed on CPUs only. | | |
# | The SQ8H index is special, if nq < gpu_search_threshold, | | |
# | the search will be executed on both CPUs and GPUs. | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
# search_devices | The list of GPU devices used for search computation. | DeviceList | gpu0 |
# | Must be in format gpux. | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
# build_index_devices | The list of GPU devices used for index building. | DeviceList | gpu0 |
# | Must be in format gpux. | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
gpu:
enable: @GPU_ENABLE@
cache_size: 1GB
gpu_search_threshold: 1000
search_devices:
- gpu0
build_index_devices:
- gpu0
#----------------------+------------------------------------------------------------+------------+-----------------+
# Logs Config | Description | Type | Default |
#----------------------+------------------------------------------------------------+------------+-----------------+
# level | Log level in Milvus. Must be one of debug, info, warning, | String | debug |
# | error, fatal | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
# trace.enable | Whether to enable trace level logging in Milvus. | Boolean | true |
#----------------------+------------------------------------------------------------+------------+-----------------+
# path | Absolute path to the folder holding the log files. | String | |
#----------------------+------------------------------------------------------------+------------+-----------------+
# max_log_file_size | The maximum size of each log file, size range | String | 1024MB |
# | [512MB, 4096MB]. | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
# log_rotate_num | The maximum number of log files that Milvus keeps for each | Integer | 0 |
# | logging level, num range [0, 1024], 0 means unlimited. | | |
#----------------------+------------------------------------------------------------+------------+-----------------+
logs:
level: debug
trace.enable: true
path: @MILVUS_DB_PATH@/logs
max_log_file_size: 1024MB
log_rotate_num: 0
#----------------------+------------------------------------------------------------+------------+-----------------+
# Metric Config | Description | Type | Default |
#----------------------+------------------------------------------------------------+------------+-----------------+
# enable | Enable monitoring function or not. | Boolean | false |
#----------------------+------------------------------------------------------------+------------+-----------------+
# address | Pushgateway address | IP | 127.0.0.1 +
#----------------------+------------------------------------------------------------+------------+-----------------+
# port | Pushgateway port, port range (1024, 65535) | Integer | 9091 |
#----------------------+------------------------------------------------------------+------------+-----------------+
metric:
enable: false
address: 127.0.0.1
port: 9091

View File

@ -1,26 +0,0 @@
{
"host": "127.0.0.1",
"port": "5666",
"tracer_library": "/path/to/shared_tracing_library",
"tracer_configuration": {
"service_name": "milvus_server",
"sampler": {
"type": "const",
"param": "1"
},
"disabled": false,
"reporter": {
"localAgentHostPort": "127.0.0.1:6831"
},
"headers": {
"jaegerDebugHeader": "jaeger_debug_header",
"jaegerBaggageHeader": "jarger_baggage_header",
"TraceContextHeaderName": "trace_context_header_name",
"traceBaggageHeaderPrefix": "trace_baggage_header_prefix"
},
"baggage_restrictions": {
"denyBaggageOnInitializationFailure": false,
"hostPort": ""
}
}
}

View File

@ -1,132 +0,0 @@
#!/bin/bash
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/milvus/lib
MYSQL_USER_NAME=root
MYSQL_PASSWORD=123456
MYSQL_HOST='127.0.0.1'
MYSQL_PORT='3306'
while getopts "u:p:t:h" arg
do
case $arg in
u)
MYSQL_USER_NAME=$OPTARG
;;
p)
MYSQL_PASSWORD=$OPTARG
;;
t)
MYSQL_HOST=$OPTARG
;;
h) # help
echo "
parameter:
-u: mysql account
-p: mysql password
-t: mysql host
-h: help
usage:
./coverage.sh -u \${MYSQL_USER} -p \${MYSQL_PASSWORD} -t \${MYSQL_HOST} [-h]
"
exit 0
;;
?)
echo "ERROR! unknown argument"
exit 1
;;
esac
done
LCOV_CMD="lcov"
LCOV_GEN_CMD="genhtml"
FILE_INFO_BASE="base.info"
FILE_INFO_MILVUS="server.info"
FILE_INFO_OUTPUT="output.info"
FILE_INFO_OUTPUT_NEW="output_new.info"
DIR_LCOV_OUTPUT="lcov_out"
DIR_GCNO="cmake_build"
DIR_UNITTEST="milvus/unittest"
# delete old code coverage info files
rm -f FILE_INFO_BASE
rm -f FILE_INFO_MILVUS
rm -f FILE_INFO_OUTPUT
rm -f FILE_INFO_OUTPUT_NEW
rm -rf lcov_out
rm -f FILE_INFO_BASE FILE_INFO_MILVUS FILE_INFO_OUTPUT FILE_INFO_OUTPUT_NEW
MYSQL_DB_NAME=milvus_`date +%s%N`
function mysql_exc()
{
cmd=$1
mysql -h${MYSQL_HOST} -u${MYSQL_USER_NAME} -p${MYSQL_PASSWORD} -e "${cmd}"
if [ $? -ne 0 ]; then
echo "mysql $cmd run failed"
fi
}
mysql_exc "CREATE DATABASE IF NOT EXISTS ${MYSQL_DB_NAME};"
mysql_exc "GRANT ALL PRIVILEGES ON ${MYSQL_DB_NAME}.* TO '${MYSQL_USER_NAME}'@'%';"
mysql_exc "FLUSH PRIVILEGES;"
mysql_exc "USE ${MYSQL_DB_NAME};"
# get baseline
${LCOV_CMD} -c -i -d ${DIR_GCNO} -o "${FILE_INFO_BASE}"
if [ $? -ne 0 ]; then
echo "gen baseline coverage run failed"
exit -1
fi
for test in `ls ${DIR_UNITTEST}`; do
echo $test
case ${test} in
test_db)
# set run args for test_db
args="mysql://${MYSQL_USER_NAME}:${MYSQL_PASSWORD}@${MYSQL_HOST}:${MYSQL_PORT}/${MYSQL_DB_NAME}"
;;
*test_*)
args=""
;;
esac
# run unittest
./${DIR_UNITTEST}/${test} "${args}"
if [ $? -ne 0 ]; then
echo ${args}
echo ${DIR_UNITTEST}/${test} "run failed"
exit -1
fi
done
mysql_exc "DROP DATABASE IF EXISTS ${MYSQL_DB_NAME};"
# gen code coverage
${LCOV_CMD} -d ${DIR_GCNO} -o "${FILE_INFO_MILVUS}" -c
# merge coverage
${LCOV_CMD} -a ${FILE_INFO_BASE} -a ${FILE_INFO_MILVUS} -o "${FILE_INFO_OUTPUT}"
# remove third party from tracefiles
${LCOV_CMD} -r "${FILE_INFO_OUTPUT}" -o "${FILE_INFO_OUTPUT_NEW}" \
"/usr/*" \
"*/boost/*" \
"*/cmake_build/*_ep-prefix/*" \
"*/src/index/cmake_build*" \
"*/src/index/thirdparty*" \
"*/src/grpc*" \
"*/src/metrics/MetricBase.h" \
"*/src/server/Server.cpp" \
"*/src/server/DBWrapper.cpp" \
"*/src/server/grpc_impl/GrpcServer.cpp" \
"*/thirdparty/*"
if [ $? -ne 0 ]; then
echo "generate ${FILE_INFO_OUTPUT_NEW} failed"
exit -2
fi
# gen html report
${LCOV_GEN_CMD} "${FILE_INFO_OUTPUT_NEW}" --output-directory ${DIR_LCOV_OUTPUT}/

View File

@ -1,28 +0,0 @@
## Data Migration
####0.3.x
legacy data is not migrate-able for later versions
####0.4.x
legacy data can be reused directly by 0.5.x
legacy data can be migrated to 0.6.x
####0.5.x
legacy data can be migrated to 0.6.x
####0.6.x
how to migrate legacy 0.4.x/0.5.x data
for sqlite meta:
```shell
$ sqlite3 [parth_to]/meta.sqlite < sqlite_4_to_6.sql
```
for mysql meta:
```shell
$ mysql -h127.0.0.1 -uroot -p123456 -Dmilvus < mysql_4_to_6.sql
```

View File

@ -1,4 +0,0 @@
alter table Tables add column owner_table VARCHAR(255) DEFAULT '' NOT NULL;
alter table Tables add column partition_tag VARCHAR(255) DEFAULT '' NOT NULL;
alter table Tables add column version VARCHAR(64) DEFAULT '0.6.0' NOT NULL;
update Tables set version='0.6.0';

View File

@ -1,3 +0,0 @@
alter table Tables drop column owner_table;
alter table Tables drop column partition_tag;
alter table Tables drop column version;

View File

@ -1,4 +0,0 @@
alter table Tables add column 'owner_table' TEXT DEFAULT '' NOT NULL;
alter table Tables add column 'partition_tag' TEXT DEFAULT '' NOT NULL;
alter table Tables add column 'version' TEXT DEFAULT '0.6.0' NOT NULL;
update Tables set version='0.6.0';

View File

@ -1,7 +0,0 @@
CREATE TABLE 'TempTables' ( 'id' INTEGER PRIMARY KEY NOT NULL , 'table_id' TEXT UNIQUE NOT NULL , 'state' INTEGER NOT NULL , 'dimension' INTEGER NOT NULL , 'created_on' INTEGER NOT NULL , 'flag' INTEGER DEFAULT 0 NOT NULL , 'index_file_size' INTEGER NOT NULL , 'engine_type' INTEGER NOT NULL , 'nlist' INTEGER NOT NULL , 'metric_type' INTEGER NOT NULL);
INSERT INTO TempTables SELECT id, table_id, state, dimension, created_on, flag, index_file_size, engine_type, nlist, metric_type FROM Tables;
DROP TABLE Tables;
ALTER TABLE TempTables RENAME TO Tables;

View File

@ -1,4 +0,0 @@
#!/bin/bash
../bin/milvus_server -c ../conf/server_config.yaml

View File

@ -1,16 +0,0 @@
#!/bin/bash
function kill_progress()
{
kill -s SIGUSR2 $(pgrep $1)
sleep 2
}
STATUS=$(kill_progress "milvus_server" )
if [[ ${STATUS} == "false" ]];then
echo "Milvus server closed abnormally!"
else
echo "Milvus server closed successfully!"
fi

View File

@ -1,343 +0,0 @@
#-------------------------------------------------------------------------------
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under the License.
#-------------------------------------------------------------------------------
include_directories(${MILVUS_SOURCE_DIR})
include_directories(${MILVUS_ENGINE_SRC})
include_directories(${MILVUS_THIRDPARTY_SRC})
include_directories(${MILVUS_ENGINE_SRC}/grpc/gen-status)
include_directories(${MILVUS_ENGINE_SRC}/grpc/gen-milvus)
set(FOUND_OPENBLAS "unknown")
add_subdirectory(index)
if (FAISS_WITH_MKL)
add_compile_definitions("WITH_MKL")
endif ()
set(INDEX_INCLUDE_DIRS ${INDEX_INCLUDE_DIRS} PARENT_SCOPE)
foreach (dir ${INDEX_INCLUDE_DIRS})
include_directories(${dir})
endforeach ()
aux_source_directory(${MILVUS_ENGINE_SRC}/cache cache_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/config config_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/config/handler config_handler_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/metrics metrics_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/metrics/prometheus metrics_prometheus_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/db db_main_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/db/engine db_engine_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/db/insert db_insert_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/db/meta db_meta_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/db/merge db_merge_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/db/wal db_wal_files)
set(grpc_service_files
${MILVUS_ENGINE_SRC}/grpc/gen-milvus/milvus.grpc.pb.cc
${MILVUS_ENGINE_SRC}/grpc/gen-milvus/milvus.pb.cc
${MILVUS_ENGINE_SRC}/grpc/gen-status/status.grpc.pb.cc
${MILVUS_ENGINE_SRC}/grpc/gen-status/status.pb.cc
)
aux_source_directory(${MILVUS_ENGINE_SRC}/query query_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/context context_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/search search_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/scheduler scheduler_main_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/scheduler/action scheduler_action_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/scheduler/event scheduler_event_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/scheduler/job scheduler_job_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/scheduler/selector scheduler_selector_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/scheduler/resource scheduler_resource_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/scheduler/task scheduler_task_files)
set(scheduler_files
${scheduler_main_files}
${scheduler_action_files}
${scheduler_event_files}
${scheduler_job_files}
${scheduler_selector_files}
${scheduler_resource_files}
${scheduler_task_files}
)
aux_source_directory(${MILVUS_THIRDPARTY_SRC}/easyloggingpp thirdparty_easyloggingpp_files)
aux_source_directory(${MILVUS_THIRDPARTY_SRC}/nlohmann thirdparty_nlohmann_files)
aux_source_directory(${MILVUS_THIRDPARTY_SRC}/dablooms thirdparty_dablooms_files)
set(thirdparty_files
${thirdparty_easyloggingpp_files}
${thirdparty_nlohmann_files}
${thirdparty_dablooms_files}
)
aux_source_directory(${MILVUS_ENGINE_SRC}/server server_service_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/server/init server_init_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/server/delivery/request delivery_request_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/server/delivery/hybrid_request delivery_hybrid_request_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/server/delivery/strategy delivery_strategy_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/server/delivery delivery_files)
set(server_files
${server_init_files}
${server_service_files}
${server_init_files}
${delivery_request_files}
${delivery_hybrid_request_files}
${delivery_strategy_files}
${delivery_files}
)
aux_source_directory(${MILVUS_ENGINE_SRC}/server/grpc_impl grpc_impl_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/server/grpc_impl/interceptor grpc_interceptor_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/server/context server_context_files)
set(grpc_server_files
${grpc_request_files}
${grpc_impl_files}
${grpc_interceptor_files}
)
aux_source_directory(${MILVUS_ENGINE_SRC}/server/web_impl/handler web_handler_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/server/web_impl/component web_conponent_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/server/web_impl/controller web_controller_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/server/web_impl/dto web_dto_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/server/web_impl/utils web_utils_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/server/web_impl web_impl_files)
set(web_server_files
${web_handler_files}
${web_conponent_files}
${web_controller_files}
${web_dto_files}
${web_utils_files}
${web_impl_files}
)
aux_source_directory(${MILVUS_ENGINE_SRC}/storage storage_main_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/storage/disk storage_disk_files)
#aux_source_directory(${MILVUS_ENGINE_SRC}/storage/s3 storage_s3_files)
set(storage_files
${storage_main_files}
${storage_disk_files}
# ${storage_s3_files}
)
aux_source_directory(${MILVUS_ENGINE_SRC}/utils utils_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/index/archive wrapper_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/tracing tracing_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/codecs codecs_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/codecs/default codecs_default_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/segment segment_files)
set(engine_files
${CMAKE_CURRENT_SOURCE_DIR}/main.cpp
${cache_files}
${db_main_files}
${db_engine_files}
${db_insert_files}
${db_meta_files}
${db_merge_files}
${db_wal_files}
${metrics_files}
${storage_files}
${thirdparty_files}
${utils_files}
${wrapper_files}
${codecs_files}
${codecs_default_files}
${segment_files}
)
if (MILVUS_WITH_PROMETHEUS)
set(engine_files ${engine_files}
${metrics_prometheus_files})
endif ()
set(grpc_lib
grpcpp_channelz
grpc++
grpc
grpc_protobuf
grpc_protoc
)
set(prometheus_lib
prometheus-cpp-push
prometheus-cpp-pull
prometheus-cpp-core
curl
)
set(boost_lib
libboost_system.a
libboost_filesystem.a
libboost_serialization.a
)
set(s3_client_lib
aws-cpp-sdk-s3
aws-cpp-sdk-core
)
set(third_party_libs
sqlite
${grpc_lib}
yaml-cpp
mysqlpp
zlib
fiu
${boost_lib}
)
if (MILVUS_GPU_VERSION)
include_directories(${CUDA_INCLUDE_DIRS})
link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib64")
set(cuda_lib
${CUDA_TOOLKIT_ROOT_DIR}/lib64/stubs/libnvidia-ml.so
cudart
cublas
)
set(third_party_libs ${third_party_libs}
${cuda_lib}
)
aux_source_directory(${MILVUS_ENGINE_SRC}/wrapper/gpu wrapper_gpu_files)
set(engine_files ${engine_files}
${wrapper_gpu_files}
)
endif ()
# cannot be enabled together with ENABLE_CPU_PROFILING
if (ENABLE_MEM_PROFILING)
set(third_party_libs ${third_party_libs}
tcmalloc
)
endif ()
if (ENABLE_CPU_PROFILING)
set(third_party_libs ${third_party_libs}
gperftools
libunwind
)
endif ()
if (MILVUS_WITH_PROMETHEUS)
set(third_party_libs ${third_party_libs}
${prometheus_lib}
)
endif ()
if (MILVUS_WITH_AWS)
set(third_party_libs ${third_party_libs}
${s3_client_lib}
curl
crypto
)
endif ()
set(engine_libs
pthread
libgomp.a
libgfortran.a
dl
)
if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64")
set(engine_libs
${engine_libs}
libquadmath.a
)
endif ()
add_library(milvus_engine STATIC ${engine_files})
add_dependencies(milvus_engine fiu)
target_link_libraries(milvus_engine
knowhere
${third_party_libs}
${engine_libs}
)
if (MILVUS_WITH_PROMETHEUS)
add_library(metrics STATIC ${metrics_files} ${metrics_prometheus_files})
else ()
add_library(metrics STATIC ${metrics_files})
endif ()
add_dependencies(metrics fiu)
set(metrics_lib
yaml-cpp
)
if (MILVUS_WITH_PROMETHEUS)
set(metrics_lib ${metrics_lib}
${prometheus_lib}
)
endif ()
target_link_libraries(metrics ${metrics_lib})
add_library(tracing STATIC ${tracing_files} ${thirdparty_files})
add_dependencies(tracing fiu)
set(tracing_lib
opentracing
opentracing_mocktracer
${grpc_lib}
pthread
z
)
target_link_libraries(tracing ${tracing_lib})
set(server_libs
milvus_engine
metrics
tracing
oatpp
)
add_executable(milvus_server
${config_files}
${config_handler_files}
${metrics_files}
${query_files}
${search_files}
${context_files}
${scheduler_files}
${server_files}
${grpc_server_files}
${grpc_service_files}
${web_server_files}
${server_context_files}
${utils_files}
${tracing_files}
)
target_link_libraries(milvus_server
${server_libs}
)
install(TARGETS milvus_server DESTINATION bin)
install(FILES
${CMAKE_BINARY_DIR}/mysqlpp_ep-prefix/src/mysqlpp_ep/lib/${CMAKE_SHARED_LIBRARY_PREFIX}mysqlpp${CMAKE_SHARED_LIBRARY_SUFFIX}
${CMAKE_BINARY_DIR}/mysqlpp_ep-prefix/src/mysqlpp_ep/lib/${CMAKE_SHARED_LIBRARY_PREFIX}mysqlpp${CMAKE_SHARED_LIBRARY_SUFFIX}.3
${CMAKE_BINARY_DIR}/mysqlpp_ep-prefix/src/mysqlpp_ep/lib/${CMAKE_SHARED_LIBRARY_PREFIX}mysqlpp${CMAKE_SHARED_LIBRARY_SUFFIX}.3.2.4
${CMAKE_BINARY_DIR}/fiu_ep-prefix/src/fiu_ep/lib/${CMAKE_SHARED_LIBRARY_PREFIX}fiu${CMAKE_SHARED_LIBRARY_SUFFIX}
${CMAKE_BINARY_DIR}/fiu_ep-prefix/src/fiu_ep/lib/${CMAKE_SHARED_LIBRARY_PREFIX}fiu${CMAKE_SHARED_LIBRARY_SUFFIX}.0
${CMAKE_BINARY_DIR}/fiu_ep-prefix/src/fiu_ep/lib/${CMAKE_SHARED_LIBRARY_PREFIX}fiu${CMAKE_SHARED_LIBRARY_SUFFIX}.1.00
DESTINATION lib)
if (FOUND_OPENBLAS STREQUAL "false")
install(FILES
${CMAKE_BINARY_DIR}/src/index/openblas_ep-prefix/src/openblas_ep/lib/${CMAKE_SHARED_LIBRARY_PREFIX}openblas${CMAKE_SHARED_LIBRARY_SUFFIX}
${CMAKE_BINARY_DIR}/src/index/openblas_ep-prefix/src/openblas_ep/lib/${CMAKE_SHARED_LIBRARY_PREFIX}openblas${CMAKE_SHARED_LIBRARY_SUFFIX}.0
${CMAKE_BINARY_DIR}/src/index/openblas_ep-prefix/src/openblas_ep/lib/${CMAKE_SHARED_LIBRARY_PREFIX}openblas${CMAKE_SHARED_LIBRARY_SUFFIX}.0.3
DESTINATION lib)
endif()

104
core/src/cache/Cache.h vendored
View File

@ -1,104 +0,0 @@
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
#pragma once
#include "LRU.h"
#include "utils/Log.h"
#include <atomic>
#include <mutex>
#include <set>
#include <string>
namespace milvus {
namespace cache {
template <typename ItemObj>
class Cache {
public:
// mem_capacity, units:GB
Cache(int64_t capacity_gb, int64_t cache_max_count, const std::string& header = "");
~Cache() = default;
int64_t
usage() const {
return usage_;
}
// unit: BYTE
int64_t
capacity() const {
return capacity_;
}
// unit: BYTE
void
set_capacity(int64_t capacity);
double
freemem_percent() const {
return freemem_percent_;
}
void
set_freemem_percent(double percent) {
freemem_percent_ = percent;
}
size_t
size() const;
bool
exists(const std::string& key);
ItemObj
get(const std::string& key);
void
insert(const std::string& key, const ItemObj& item);
void
erase(const std::string& key);
bool
reserve(const int64_t size);
void
print();
void
clear();
private:
void
insert_internal(const std::string& key, const ItemObj& item);
void
erase_internal(const std::string& key);
void
free_memory_internal(const int64_t target_size);
private:
std::string header_;
int64_t usage_;
int64_t capacity_;
double freemem_percent_;
LRU<std::string, ItemObj> lru_;
mutable std::mutex mutex_;
};
} // namespace cache
} // namespace milvus
#include "cache/Cache.inl"

View File

@ -1,191 +0,0 @@
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
namespace milvus {
namespace cache {
constexpr double DEFAULT_THRESHOLD_PERCENT = 0.7;
template <typename ItemObj>
Cache<ItemObj>::Cache(int64_t capacity, int64_t cache_max_count, const std::string& header)
: header_(header),
usage_(0),
capacity_(capacity),
freemem_percent_(DEFAULT_THRESHOLD_PERCENT),
lru_(cache_max_count) {
}
template <typename ItemObj>
void
Cache<ItemObj>::set_capacity(int64_t capacity) {
std::lock_guard<std::mutex> lock(mutex_);
if (capacity > 0) {
capacity_ = capacity;
free_memory_internal(capacity);
}
}
template <typename ItemObj>
size_t
Cache<ItemObj>::size() const {
std::lock_guard<std::mutex> lock(mutex_);
return lru_.size();
}
template <typename ItemObj>
bool
Cache<ItemObj>::exists(const std::string& key) {
std::lock_guard<std::mutex> lock(mutex_);
return lru_.exists(key);
}
template <typename ItemObj>
ItemObj
Cache<ItemObj>::get(const std::string& key) {
std::lock_guard<std::mutex> lock(mutex_);
if (!lru_.exists(key)) {
return nullptr;
}
return lru_.get(key);
}
template <typename ItemObj>
void
Cache<ItemObj>::insert(const std::string& key, const ItemObj& item) {
std::lock_guard<std::mutex> lock(mutex_);
insert_internal(key, item);
}
template <typename ItemObj>
void
Cache<ItemObj>::erase(const std::string& key) {
std::lock_guard<std::mutex> lock(mutex_);
erase_internal(key);
}
template <typename ItemObj>
bool
Cache<ItemObj>::reserve(const int64_t item_size) {
std::lock_guard<std::mutex> lock(mutex_);
if (item_size > capacity_) {
LOG_SERVER_ERROR_ << header_ << " item size " << (item_size >> 20) << "MB too big to insert into cache capacity"
<< (capacity_ >> 20) << "MB";
return false;
}
if (item_size > capacity_ - usage_) {
free_memory_internal(capacity_ - item_size);
}
return true;
}
template <typename ItemObj>
void
Cache<ItemObj>::clear() {
std::lock_guard<std::mutex> lock(mutex_);
lru_.clear();
usage_ = 0;
LOG_SERVER_DEBUG_ << header_ << " Clear cache !";
}
template <typename ItemObj>
void
Cache<ItemObj>::print() {
std::lock_guard<std::mutex> lock(mutex_);
size_t cache_count = lru_.size();
// for (auto it = lru_.begin(); it != lru_.end(); ++it) {
// LOG_SERVER_DEBUG_ << it->first;
// }
LOG_SERVER_DEBUG_ << header_ << " [item count]: " << cache_count << ", [usage] " << (usage_ >> 20)
<< "MB, [capacity] " << (capacity_ >> 20) << "MB";
}
template <typename ItemObj>
void
Cache<ItemObj>::insert_internal(const std::string& key, const ItemObj& item) {
if (item == nullptr) {
return;
}
size_t item_size = item->Size();
// if key already exist, subtract old item size
if (lru_.exists(key)) {
const ItemObj& old_item = lru_.get(key);
usage_ -= old_item->Size();
}
// plus new item size
usage_ += item_size;
// if usage exceed capacity, free some items
if (usage_ > capacity_) {
LOG_SERVER_DEBUG_ << header_ << " Current usage " << (usage_ >> 20) << "MB is too high for capacity "
<< (capacity_ >> 20) << "MB, start free memory";
free_memory_internal(capacity_);
}
// insert new item
lru_.put(key, item);
LOG_SERVER_DEBUG_ << header_ << " Insert " << key << " size: " << (item_size >> 20) << "MB into cache";
LOG_SERVER_DEBUG_ << header_ << " Count: " << lru_.size() << ", Usage: " << (usage_ >> 20) << "MB, Capacity: "
<< (capacity_ >> 20) << "MB";
}
template <typename ItemObj>
void
Cache<ItemObj>::erase_internal(const std::string& key) {
if (!lru_.exists(key)) {
return;
}
const ItemObj& item = lru_.get(key);
size_t item_size = item->Size();
lru_.erase(key);
usage_ -= item_size;
LOG_SERVER_DEBUG_ << header_ << " Erase " << key << " size: " << (item_size >> 20) << "MB from cache";
LOG_SERVER_DEBUG_ << header_ << " Count: " << lru_.size() << ", Usage: " << (usage_ >> 20) << "MB, Capacity: "
<< (capacity_ >> 20) << "MB";
}
template <typename ItemObj>
void
Cache<ItemObj>::free_memory_internal(const int64_t target_size) {
int64_t threshold = std::min((int64_t)(capacity_ * freemem_percent_), target_size);
int64_t delta_size = usage_ - threshold;
if (delta_size <= 0) {
delta_size = 1; // ensure at least one item erased
}
std::set<std::string> key_array;
int64_t released_size = 0;
auto it = lru_.rbegin();
while (it != lru_.rend() && released_size < delta_size) {
auto& key = it->first;
auto& obj_ptr = it->second;
key_array.emplace(key);
released_size += obj_ptr->Size();
++it;
}
LOG_SERVER_DEBUG_ << header_ << " To be released memory size: " << (released_size >> 20) << "MB";
for (auto& key : key_array) {
erase_internal(key);
}
}
} // namespace cache
} // namespace milvus

View File

@ -1,72 +0,0 @@
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
#pragma once
#include "Cache.h"
#include "metrics/Metrics.h"
#include "utils/Log.h"
#include <memory>
#include <string>
namespace milvus {
namespace cache {
template <typename ItemObj>
class CacheMgr {
public:
virtual uint64_t
ItemCount() const;
virtual bool
ItemExists(const std::string& key);
virtual ItemObj
GetItem(const std::string& key);
virtual void
InsertItem(const std::string& key, const ItemObj& data);
virtual void
EraseItem(const std::string& key);
virtual bool
Reserve(const int64_t size);
virtual void
PrintInfo();
virtual void
ClearCache();
int64_t
CacheUsage() const;
int64_t
CacheCapacity() const;
void
SetCapacity(int64_t capacity);
protected:
CacheMgr();
virtual ~CacheMgr();
protected:
std::shared_ptr<Cache<ItemObj>> cache_;
};
} // namespace cache
} // namespace milvus
#include "cache/CacheMgr.inl"

View File

@ -1,137 +0,0 @@
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
namespace milvus {
namespace cache {
template <typename ItemObj>
CacheMgr<ItemObj>::CacheMgr() {
}
template <typename ItemObj>
CacheMgr<ItemObj>::~CacheMgr() {
}
template <typename ItemObj>
uint64_t
CacheMgr<ItemObj>::ItemCount() const {
if (cache_ == nullptr) {
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return 0;
}
return (uint64_t)(cache_->size());
}
template <typename ItemObj>
bool
CacheMgr<ItemObj>::ItemExists(const std::string& key) {
if (cache_ == nullptr) {
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return false;
}
return cache_->exists(key);
}
template <typename ItemObj>
ItemObj
CacheMgr<ItemObj>::GetItem(const std::string& key) {
if (cache_ == nullptr) {
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return nullptr;
}
server::Metrics::GetInstance().CacheAccessTotalIncrement();
return cache_->get(key);
}
template <typename ItemObj>
void
CacheMgr<ItemObj>::InsertItem(const std::string& key, const ItemObj& data) {
if (cache_ == nullptr) {
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return;
}
cache_->insert(key, data);
server::Metrics::GetInstance().CacheAccessTotalIncrement();
}
template <typename ItemObj>
void
CacheMgr<ItemObj>::EraseItem(const std::string& key) {
if (cache_ == nullptr) {
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return;
}
cache_->erase(key);
server::Metrics::GetInstance().CacheAccessTotalIncrement();
}
template <typename ItemObj>
bool
CacheMgr<ItemObj>::Reserve(const int64_t size) {
if (cache_ == nullptr) {
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return false;
}
return cache_->reserve(size);
}
template <typename ItemObj>
void
CacheMgr<ItemObj>::PrintInfo() {
if (cache_ == nullptr) {
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return;
}
cache_->print();
}
template <typename ItemObj>
void
CacheMgr<ItemObj>::ClearCache() {
if (cache_ == nullptr) {
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return;
}
cache_->clear();
}
template <typename ItemObj>
int64_t
CacheMgr<ItemObj>::CacheUsage() const {
if (cache_ == nullptr) {
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return 0;
}
return cache_->usage();
}
template <typename ItemObj>
int64_t
CacheMgr<ItemObj>::CacheCapacity() const {
if (cache_ == nullptr) {
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return 0;
}
return cache_->capacity();
}
template <typename ItemObj>
void
CacheMgr<ItemObj>::SetCapacity(int64_t capacity) {
if (cache_ == nullptr) {
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return;
}
cache_->set_capacity(capacity);
}
} // namespace cache
} // namespace milvus

View File

@ -1,66 +0,0 @@
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
#include "cache/CpuCacheMgr.h"
#include <utility>
#include <fiu-local.h>
#include "config/Config.h"
#include "utils/Log.h"
namespace milvus {
namespace cache {
namespace {
// constexpr int64_t unit = 1024 * 1024 * 1024;
constexpr int64_t unit = 1;
} // namespace
CpuCacheMgr::CpuCacheMgr() {
// All config values have been checked in Config::ValidateConfig()
server::Config& config = server::Config::GetInstance();
int64_t cpu_cache_cap;
config.GetCacheConfigCpuCacheCapacity(cpu_cache_cap);
int64_t cap = cpu_cache_cap * unit;
LOG_SERVER_DEBUG_ << "cpu cache.size: " << cap;
LOG_SERVER_INFO_ << "cpu cache.size: " << cap;
cache_ = std::make_shared<Cache<DataObjPtr>>(cap, 1UL << 32, "[CACHE CPU]");
float cpu_cache_threshold;
config.GetCacheConfigCpuCacheThreshold(cpu_cache_threshold);
cache_->set_freemem_percent(cpu_cache_threshold);
SetIdentity("CpuCacheMgr");
AddCpuCacheCapacityListener();
}
CpuCacheMgr*
CpuCacheMgr::GetInstance() {
static CpuCacheMgr s_mgr;
return &s_mgr;
}
DataObjPtr
CpuCacheMgr::GetIndex(const std::string& key) {
DataObjPtr obj = GetItem(key);
return obj;
}
void
CpuCacheMgr::OnCpuCacheCapacityChanged(int64_t value) {
SetCapacity(value * unit);
}
} // namespace cache
} // namespace milvus

View File

@ -1,42 +0,0 @@
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
#pragma once
#include <memory>
#include <string>
#include "cache/CacheMgr.h"
#include "cache/DataObj.h"
#include "config/handler/CacheConfigHandler.h"
namespace milvus {
namespace cache {
class CpuCacheMgr : public CacheMgr<DataObjPtr>, public server::CacheConfigHandler {
private:
CpuCacheMgr();
public:
// TODO(myh): use smart pointer instead
static CpuCacheMgr*
GetInstance();
DataObjPtr
GetIndex(const std::string& key);
protected:
void
OnCpuCacheCapacityChanged(int64_t value) override;
};
} // namespace cache
} // namespace milvus

View File

@ -1,28 +0,0 @@
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
#pragma once
#include <memory>
namespace milvus {
namespace cache {
class DataObj {
public:
virtual int64_t
Size() = 0;
};
using DataObjPtr = std::shared_ptr<DataObj>;
} // namespace cache
} // namespace milvus

View File

@ -1,94 +0,0 @@
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
#include "cache/GpuCacheMgr.h"
#include "config/Config.h"
#include "utils/Log.h"
#include <fiu-local.h>
#include <sstream>
#include <utility>
namespace milvus {
namespace cache {
#ifdef MILVUS_GPU_VERSION
std::mutex GpuCacheMgr::global_mutex_;
std::unordered_map<int64_t, GpuCacheMgrPtr> GpuCacheMgr::instance_;
namespace {
constexpr int64_t G_BYTE = 1024 * 1024 * 1024;
}
GpuCacheMgr::GpuCacheMgr(int64_t gpu_id) : gpu_id_(gpu_id) {
// All config values have been checked in Config::ValidateConfig()
server::Config& config = server::Config::GetInstance();
int64_t gpu_cache_cap;
config.GetGpuResourceConfigCacheCapacity(gpu_cache_cap);
int64_t cap = gpu_cache_cap * G_BYTE;
std::string header = "[CACHE GPU" + std::to_string(gpu_id) + "]";
cache_ = std::make_shared<Cache<DataObjPtr>>(cap, 1UL << 32, header);
float gpu_mem_threshold;
config.GetGpuResourceConfigCacheThreshold(gpu_mem_threshold);
cache_->set_freemem_percent(gpu_mem_threshold);
SetIdentity("GpuCacheMgr");
AddGpuEnableListener();
AddGpuCacheCapacityListener();
}
GpuCacheMgr::~GpuCacheMgr() {
server::Config& config = server::Config::GetInstance();
config.CancelCallBack(server::CONFIG_GPU_RESOURCE, server::CONFIG_GPU_RESOURCE_ENABLE, identity_);
}
DataObjPtr
GpuCacheMgr::GetIndex(const std::string& key) {
DataObjPtr obj = GetItem(key);
return obj;
}
void
GpuCacheMgr::InsertItem(const std::string& key, const milvus::cache::DataObjPtr& data) {
if (gpu_enable_) {
CacheMgr<DataObjPtr>::InsertItem(key, data);
}
}
bool
GpuCacheMgr::Reserve(const int64_t size) {
return CacheMgr<DataObjPtr>::Reserve(size);
}
GpuCacheMgrPtr
GpuCacheMgr::GetInstance(int64_t gpu_id) {
if (instance_.find(gpu_id) == instance_.end()) {
std::lock_guard<std::mutex> lock(global_mutex_);
if (instance_.find(gpu_id) == instance_.end()) {
instance_[gpu_id] = std::make_shared<GpuCacheMgr>(gpu_id);
}
}
return instance_[gpu_id];
}
void
GpuCacheMgr::OnGpuCacheCapacityChanged(int64_t capacity) {
for (auto& iter : instance_) {
iter.second->SetCapacity(capacity * G_BYTE);
}
}
#endif
} // namespace cache
} // namespace milvus

View File

@ -1,62 +0,0 @@
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
#include <memory>
#include <mutex>
#include <string>
#include <unordered_map>
#include <utility>
#include "cache/CacheMgr.h"
#include "cache/DataObj.h"
#include "config/handler/GpuResourceConfigHandler.h"
namespace milvus {
namespace cache {
#ifdef MILVUS_GPU_VERSION
class GpuCacheMgr;
using GpuCacheMgrPtr = std::shared_ptr<GpuCacheMgr>;
using MutexPtr = std::shared_ptr<std::mutex>;
class GpuCacheMgr : public CacheMgr<DataObjPtr>, public server::GpuResourceConfigHandler {
public:
explicit GpuCacheMgr(int64_t gpu_id);
~GpuCacheMgr();
DataObjPtr
GetIndex(const std::string& key);
void
InsertItem(const std::string& key, const DataObjPtr& data);
bool
Reserve(const int64_t size);
static GpuCacheMgrPtr
GetInstance(int64_t gpu_id);
protected:
void
OnGpuCacheCapacityChanged(int64_t capacity) override;
private:
bool gpu_enable_ = true;
int64_t gpu_id_;
static std::mutex global_mutex_;
static std::unordered_map<int64_t, GpuCacheMgrPtr> instance_;
std::string identity_;
};
#endif
} // namespace cache
} // namespace milvus

116
core/src/cache/LRU.h vendored
View File

@ -1,116 +0,0 @@
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
#pragma once
#include <cstddef>
#include <list>
#include <stdexcept>
#include <unordered_map>
#include <utility>
namespace milvus {
namespace cache {
template <typename key_t, typename value_t>
class LRU {
public:
typedef typename std::pair<key_t, value_t> key_value_pair_t;
typedef typename std::list<key_value_pair_t>::iterator list_iterator_t;
typedef typename std::list<key_value_pair_t>::reverse_iterator reverse_list_iterator_t;
explicit LRU(size_t max_size) : max_size_(max_size) {
}
void
put(const key_t& key, const value_t& value) {
auto it = cache_items_map_.find(key);
cache_items_list_.push_front(key_value_pair_t(key, value));
if (it != cache_items_map_.end()) {
cache_items_list_.erase(it->second);
cache_items_map_.erase(it);
}
cache_items_map_[key] = cache_items_list_.begin();
if (cache_items_map_.size() > max_size_) {
auto last = cache_items_list_.end();
last--;
cache_items_map_.erase(last->first);
cache_items_list_.pop_back();
}
}
const value_t&
get(const key_t& key) {
auto it = cache_items_map_.find(key);
if (it == cache_items_map_.end()) {
throw std::range_error("There is no such key in cache");
} else {
cache_items_list_.splice(cache_items_list_.begin(), cache_items_list_, it->second);
return it->second->second;
}
}
void
erase(const key_t& key) {
auto it = cache_items_map_.find(key);
if (it != cache_items_map_.end()) {
cache_items_list_.erase(it->second);
cache_items_map_.erase(it);
}
}
bool
exists(const key_t& key) const {
return cache_items_map_.find(key) != cache_items_map_.end();
}
size_t
size() const {
return cache_items_map_.size();
}
list_iterator_t
begin() {
iter_ = cache_items_list_.begin();
return iter_;
}
list_iterator_t
end() {
return cache_items_list_.end();
}
reverse_list_iterator_t
rbegin() {
return cache_items_list_.rbegin();
}
reverse_list_iterator_t
rend() {
return cache_items_list_.rend();
}
void
clear() {
cache_items_list_.clear();
cache_items_map_.clear();
}
private:
std::list<key_value_pair_t> cache_items_list_;
std::unordered_map<key_t, list_iterator_t> cache_items_map_;
size_t max_size_;
list_iterator_t iter_;
};
} // namespace cache
} // namespace milvus

View File

@ -1,44 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include <memory>
#include <vector>
#include "segment/Attrs.h"
#include "storage/FSHandler.h"
namespace milvus {
namespace codec {
class AttrsFormat {
public:
virtual void
read(const storage::FSHandlerPtr& fs_ptr, segment::AttrsPtr& attrs_read) = 0;
virtual void
write(const storage::FSHandlerPtr& fs_ptr, const segment::AttrsPtr& attr) = 0;
virtual void
read_uids(const storage::FSHandlerPtr& fs_ptr, std::vector<int64_t>& uids) = 0;
};
using AttrsFormatPtr = std::shared_ptr<AttrsFormat>;
} // namespace codec
} // namespace milvus

View File

@ -1,33 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
namespace milvus {
namespace codec {
class AttrsIndexFormat {
// public:
// virtual AttrsIndex
// read() = 0;
//
// virtual void
// write(AttrsIndex attrs_index) = 0;
};
} // namespace codec
} // namespace milvus

View File

@ -1,63 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include "AttrsFormat.h"
#include "AttrsIndexFormat.h"
#include "DeletedDocsFormat.h"
#include "IdBloomFilterFormat.h"
#include "IdIndexFormat.h"
#include "VectorIndexFormat.h"
#include "VectorsFormat.h"
namespace milvus {
namespace codec {
class Codec {
public:
virtual VectorsFormatPtr
GetVectorsFormat() = 0;
virtual AttrsFormatPtr
GetAttrsFormat() = 0;
virtual VectorIndexFormatPtr
GetVectorIndexFormat() = 0;
virtual DeletedDocsFormatPtr
GetDeletedDocsFormat() = 0;
virtual IdBloomFilterFormatPtr
GetIdBloomFilterFormat() = 0;
// TODO(zhiru)
/*
virtual AttrsFormat
GetAttrsFormat() = 0;
virtual AttrsIndexFormat
GetAttrsIndexFormat() = 0;
virtual IdIndexFormat
GetIdIndexFormat() = 0;
*/
};
} // namespace codec
} // namespace milvus

Some files were not shown because too many files have changed in this diff Show More