Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

v2.3.4 #82

Merged
merged 1 commit into from
Jan 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ jobs:
CCACHE_COMPRESS: 1
CCACHE_COMPRESSLEVEL: 5
CCACHE_MAXSIZE: 2G
timeout-minutes: 360
timeout-minutes: 900
steps:
- name: Checkout
uses: actions/checkout@v3
Expand Down
2 changes: 1 addition & 1 deletion milvus_binary/env.sh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@

MILVUS_REPO="https://github.com/milvus-io/milvus.git"
MILVUS_VERSION="v2.3.3"
MILVUS_VERSION="v2.3.4"
BUILD_PROXY=
BUILD_FORCE=NO
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
From 3ef7229943e2a879d845467d31b616aabe1bcf53 Mon Sep 17 00:00:00 2001
From: Ji Bin <[email protected]>
Date: Mon, 13 Nov 2023 23:20:16 +0800
Subject: [PATCH 1/2] link with CoreServices for macos

Signed-off-by: Ji Bin <[email protected]>
---
internal/core/src/storage/azure-blob-storage/CMakeLists.txt | 5 +++++
1 file changed, 5 insertions(+)

diff --git a/internal/core/src/storage/azure-blob-storage/CMakeLists.txt b/internal/core/src/storage/azure-blob-storage/CMakeLists.txt
index 7f27d5838..bde7f4457 100644
--- a/internal/core/src/storage/azure-blob-storage/CMakeLists.txt
+++ b/internal/core/src/storage/azure-blob-storage/CMakeLists.txt
@@ -25,6 +25,11 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-parameter -Wno-return-type -
add_library(blob-chunk-manager SHARED AzureBlobChunkManager.cpp)
target_link_libraries(blob-chunk-manager PUBLIC Azure::azure-identity Azure::azure-storage-blobs)

+if (APPLE)
+ find_library(CORESERVICES_LIBRARY CoreServices)
+ target_link_libraries(blob-chunk-manager PRIVATE ${CORESERVICES_LIBRARY})
+endif (APPLE)
+
install(TARGETS blob-chunk-manager DESTINATION "${CMAKE_INSTALL_LIBDIR}")

if ( BUILD_UNIT_TEST STREQUAL "ON" )
--
2.43.0

25 changes: 25 additions & 0 deletions milvus_binary/patches/milvus-v2.3.4/0002-fix-for-gettid.patch
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
From e5556b236823272485da41e3fb092957e4d7387a Mon Sep 17 00:00:00 2001
From: Ji Bin <[email protected]>
Date: Sun, 5 Nov 2023 18:49:29 +0800
Subject: [PATCH 2/2] fix for gettid

Signed-off-by: Ji Bin <[email protected]>
---
internal/core/thirdparty/knowhere/CMakeLists.txt | 1 +
1 file changed, 1 insertion(+)

diff --git a/internal/core/thirdparty/knowhere/CMakeLists.txt b/internal/core/thirdparty/knowhere/CMakeLists.txt
index 80781717c..c46b50783 100644
--- a/internal/core/thirdparty/knowhere/CMakeLists.txt
+++ b/internal/core/thirdparty/knowhere/CMakeLists.txt
@@ -41,6 +41,7 @@ FetchContent_Declare(
GIT_TAG ${KNOWHERE_VERSION}
SOURCE_DIR ${CMAKE_CURRENT_BINARY_DIR}/knowhere-src
BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/knowhere-build
+ PATCH_COMMAND patch -p1 < ${CMAKE_SOURCE_DIR}/../../../patches/knowhere-v2.2.2.patch
DOWNLOAD_DIR ${THIRDPARTY_DOWNLOAD_PATH} )

FetchContent_GetProperties( knowhere )
--
2.43.0

2 changes: 1 addition & 1 deletion src/milvus/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import json
import hashlib

__version__ = '2.3.3'
__version__ = '2.3.4'

LOGGERS = {}

Expand Down
52 changes: 44 additions & 8 deletions src/milvus/data/config.yaml.template
Original file line number Diff line number Diff line change
Expand Up @@ -201,9 +201,31 @@ proxy:
maxTaskNum: 1024 # max task number of proxy task queue
accessLog:
enable: false
filename: milvus_access_log.log # Log filename, leave empty to disable file log.
localPath: {{ system_log_path }}
# maxSize: 64 # max log file size of singal log file to trigger rotate.
# Log filename, set as "" to use stdout.
# filename: ""
# define formatters for access log by XXX:{format: XXX, method:[XXX,XXX]}
formatters:
# "base" formatter could not set methods
# all method will use "base" formatter default
base:
# will not print access log if set as ""
format: "[$time_now] [ACCESS] <$user_name: $user_addr> $method_name [status: $method_status] [code: $error_code] [sdk: $sdk_version] [msg: $error_msg] [traceID: $trace_id] [timeCost: $time_cost]"
query:
format: "[$time_now] [ACCESS] <$user_name: $user_addr> $method_name [status: $method_status] [code: $error_code] [sdk: $sdk_version] [msg: $error_msg] [traceID: $trace_id] [timeCost: $time_cost] [database: $database_name] [collection: $collection_name] [partitions: $partition_name] [expr: $method_expr]"
# set formatter owners by method name(method was all milvus external interface)
# all method will use base formatter default
# one method only could use one formatter
# if set a method formatter mutiple times, will use random fomatter.
methods: ["Query", "Search", "Delete"]
# localPath: /tmp/milvus_accesslog // log file rootpath
# maxSize: 64 # max log file size(MB) of singal log file, mean close when time <= 0.
# rotatedTime: 0 # max time range of singal log file, mean close when time <= 0;
# maxBackups: 8 # num of reserved backups. will rotate and crate a new backup when access log file trigger maxSize or rotatedTime.
# cacheSize: 10240 # write cache of accesslog in Byte

# minioEnable: false # update backups to milvus minio when minioEnable is true.
# remotePath: "access_log/" # file path when update backups to minio
# remoteMaxTime: 0 # max time range(in Hour) of backups in minio, 0 means close time retention.
http:
enabled: true # Whether to enable the http server
debug_mode: false # Whether to enable http server debug mode
Expand Down Expand Up @@ -264,11 +286,12 @@ queryNode:
# Use more threads to make better use of SSD throughput in disk index.
# This parameter is only useful when enable-disk = true.
# And this value should be a number greater than 1 and less than 32.
chunkRows: 1024 # The number of vectors in a chunk.
growing: # growing a vector index for growing segment to accelerate search
chunkRows: 128 # The number of vectors in a chunk.
interimIndex: # build a vector temperate index for growing segment or binlog to accelerate search
enableIndex: true
nlist: 128 # growing segment index nlist
nprobe: 16 # nprobe to search growing segment, based on your accuracy requirement, must smaller than nlist
nlist: 128 # segment index nlist
nprobe: 16 # nprobe to search segment, based on your accuracy requirement, must smaller than nlist
memExpansionRate: 1.15 # the ratio of building interim index memory usage to raw data
loadMemoryUsageFactor: 1 # The multiply factor of calculating the memory usage while loading segments
enableDisk: false # enable querynode load disk index, and search on disk index
maxDiskUsagePercentage: 95
Expand Down Expand Up @@ -402,6 +425,12 @@ dataNode:
maxQueueLength: 16 # Maximum length of task queue in flowgraph
maxParallelism: 1024 # Maximum number of tasks executed in parallel in the flowgraph
maxParallelSyncTaskNum: 6 # Maximum number of sync tasks executed in parallel in each flush manager
skipMode:
# when there are only timetick msg in flowgraph for a while (longer than coldTime),
# flowGraph will turn on skip mode to skip most timeticks to reduce cost, especially there are a lot of channels
enable: true
skipNum: 4
coldTime: 60
segment:
insertBufSize: 16777216 # Max buffer size to flush for a single segment.
deleteBufBytes: 67108864 # Max buffer size to flush del for a single channel
Expand All @@ -427,6 +456,10 @@ dataNode:
# if this parameter <= 0, will set it as the maximum number of CPUs that can be executing
# suggest to set it bigger on large collection numbers to avoid blocking
workPoolSize: -1
# specify the size of global work pool for channel checkpoint updating
# if this parameter <= 0, will set it as 1000
# suggest to set it bigger on large collection numbers to avoid blocking
updateChannelCheckpointMaxParallel: 1000

# Configures the system log output.
log:
Expand Down Expand Up @@ -662,12 +695,15 @@ quotaAndLimits:

trace:
# trace exporter type, default is stdout,
# optional values: ['stdout', 'jaeger']
# optional values: ['stdout', 'jaeger', 'otlp']
exporter: stdout
# fraction of traceID based sampler,
# optional values: [0, 1]
# Fractions >= 1 will always sample. Fractions < 0 are treated as zero.
sampleFraction: 0
otlp:
endpoint: # "127.0.0.1:4318"
secure: true
jaeger:
url: # "http://127.0.0.1:14268/api/traces"
# when exporter is jaeger should set the jaeger's URL
Expand Down
Loading