diff --git a/.github/mergify.yml b/.github/mergify.yml index 42b74125d..3044530a4 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -4,10 +4,12 @@ pull_request_rules: - or: - base=master - base~=2\.\d - - "status-success=Run Python Tests (3.8)" + - "status-success=Run Python Tests (3.8, windows-latest)" + - "status-success=Run Python Tests (3.12, windows-latest)" + - "status-success=Run Python Tests (3.8, ubuntu-latest)" + - "status-success=Run Python Tests (3.12, ubuntu-latest)" - "status-success=Run Check Proto (3.8)" - "status-success=Code lint check (3.8)" - - "status-success=Run Python Tests (3.12)" - "status-success=Run Check Proto (3.12)" - "status-success=Code lint check (3.12)" actions: diff --git a/.github/workflows/check_milvus_proto.yml b/.github/workflows/check_milvus_proto.yml index b44c8d3fc..2c6239f50 100644 --- a/.github/workflows/check_milvus_proto.yml +++ b/.github/workflows/check_milvus_proto.yml @@ -26,10 +26,11 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install -e . + pip install -e ".[dev]" - name: Try generate proto run: | git submodule update --init + make gen_proto make check_proto_product diff --git a/.github/workflows/code_checker.yml b/.github/workflows/code_checker.yml index aaeee55ac..8a15b70d4 100644 --- a/.github/workflows/code_checker.yml +++ b/.github/workflows/code_checker.yml @@ -19,12 +19,12 @@ jobs: uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - - name: check pyproject.toml install + - name: Check pyproject.toml install run: | pip install -e . - name: Install requirements run: | - pip install -r requirements.txt + pip install -e ".[dev]" - name: Run pylint shell: bash run: | diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 01aaba7ba..73cc36cb3 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -8,10 +8,11 @@ on: jobs: build: name: Run Python Tests - runs-on: ubuntu-latest strategy: matrix: python-version: [3.8, 3.12] + os: [ubuntu-latest, windows-latest] + runs-on: ${{ matrix.os }} steps: - name: Checkout code @@ -28,7 +29,7 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install -e ".[test]" + pip install -e ".[dev]" - name: Test with pytest run: | diff --git a/Makefile b/Makefile index 8fcea583c..642fa4043 100644 --- a/Makefile +++ b/Makefile @@ -2,12 +2,12 @@ unittest: PYTHONPATH=`pwd` python3 -m pytest tests --cov=pymilvus -v lint: - PYTHONPATH=`pwd` black pymilvus --check - PYTHONPATH=`pwd` ruff check pymilvus + PYTHONPATH=`pwd` python3 -m black pymilvus --check + PYTHONPATH=`pwd` python3 -m ruff check pymilvus format: - PYTHONPATH=`pwd` black pymilvus - PYTHONPATH=`pwd` ruff check pymilvus --fix + PYTHONPATH=`pwd` python3 -m black pymilvus + PYTHONPATH=`pwd` python3 -m ruff check pymilvus --fix codecov: PYTHONPATH=`pwd` pytest --cov=pymilvus --cov-report=xml tests -x -v -rxXs @@ -25,6 +25,7 @@ get_proto: git submodule update --init gen_proto: + pip install -e ".[dev]" cd pymilvus/grpc_gen && ./python_gen.sh check_proto_product: gen_proto diff --git a/README.md b/README.md index 1d99640cb..15bdd5519 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ The following collection shows Milvus versions and recommended PyMilvus versions | 2.1.\* | 2.1.3 | | 2.2.\* | 2.2.15 | | 2.3.\* | 2.3.7 | -| 2.4.\* | 2.4.0 | +| 2.4.\* | 2.4.9 | ## Installation @@ -37,12 +37,13 @@ You can install PyMilvus via `pip` or `pip3` for Python 3.8+: ```shell $ pip3 install pymilvus $ pip3 install pymilvus[model] # for milvus-model +$ pip3 install pymilvus[bulk_writer] # for bulk_writer ``` You can install a specific version of PyMilvus by: ```shell -$ pip3 install pymilvus==2.3.7 +$ pip3 install pymilvus==2.4.9 ``` You can upgrade PyMilvus to the latest version by: @@ -62,8 +63,6 @@ $ git submodule update --init Q2. How to generate python files from milvus-proto? -**Before generating python files, please install requirements in `requirements.txt`** - A2. ```shell $ make gen_proto @@ -94,10 +93,10 @@ Q6. How to run unittests? A6 ```shell -$ pip install ".[test]" +$ pip install ".[dev]" $ make unittest ``` -Q7. `zsh: no matches found: pymilvus[model]` in mac, how do I solve this? +Q7. `zsh: no matches found: pymilvus[model]`, how do I solve this? A7 ```shell diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 000000000..df635b4e6 --- /dev/null +++ b/examples/README.md @@ -0,0 +1 @@ +# Examples diff --git a/examples/milvus_client/alias.py b/examples/alias.py similarity index 100% rename from examples/milvus_client/alias.py rename to examples/alias.py diff --git a/examples/bm25.py b/examples/bm25.py new file mode 100644 index 000000000..b4ff796a7 --- /dev/null +++ b/examples/bm25.py @@ -0,0 +1,89 @@ +from pymilvus import ( + MilvusClient, + Function, + FunctionType, + DataType, +) + +fmt = "\n=== {:30} ===\n" +collection_name = "doc_in_doc_out" +milvus_client = MilvusClient("http://localhost:19530") + +has_collection = milvus_client.has_collection(collection_name, timeout=5) +if has_collection: + milvus_client.drop_collection(collection_name) + +schema = milvus_client.create_schema() +schema.add_field("id", DataType.INT64, is_primary=True, auto_id=False) +schema.add_field("document_content", DataType.VARCHAR, max_length=9000, enable_analyzer=True) +schema.add_field("sparse_vector", DataType.SPARSE_FLOAT_VECTOR) + +bm25_function = Function( + name="bm25_fn", + input_field_names=["document_content"], + output_field_names="sparse_vector", + function_type=FunctionType.BM25, +) +schema.add_function(bm25_function) + +index_params = milvus_client.prepare_index_params() +index_params.add_index( + field_name="sparse_vector", + index_name="sparse_inverted_index", + index_type="SPARSE_INVERTED_INDEX", + metric_type="BM25", + params={"bm25_k1": 1.2, "bm25_b": 0.75}, +) + +ret = milvus_client.create_collection(collection_name, schema=schema, index_params=index_params, consistency_level="Strong") +print(ret) + +print(fmt.format(" all collections ")) +print(milvus_client.list_collections()) + +print(fmt.format(f"schema of collection {collection_name}")) +print(milvus_client.describe_collection(collection_name)) + +rows = [ + {"id": 1, "document_content": "hello world"}, + {"id": 2, "document_content": "hello milvus"}, + {"id": 3, "document_content": "hello zilliz"}, +] + +print(fmt.format("Start inserting entities")) +insert_result = milvus_client.insert(collection_name, rows, progress_bar=True) +print(fmt.format("Inserting entities done")) +print(insert_result) + +texts_to_search = ["zilliz"] +search_params = { + "metric_type": "BM25", + "params": {} +} +print(fmt.format(f"Start search with retrieve several fields.")) +result = milvus_client.search(collection_name, texts_to_search, limit=3, output_fields=["document_content"], search_params=search_params) +for hits in result: + for hit in hits: + print(f"hit: {hit}") + +print(fmt.format("Start query by specifying primary keys")) +query_results = milvus_client.query(collection_name, ids=[3]) +print(query_results[0]) + +upsert_ret = milvus_client.upsert(collection_name, {"id": 2 , "document_content": "hello milvus again"}) +print(upsert_ret) + +print(fmt.format("Start query by specifying filtering expression")) +query_results = milvus_client.query(collection_name, filter="document_content == 'hello milvus again'") +for ret in query_results: + print(ret) + +print(f"start to delete by specifying filter in collection {collection_name}") +delete_result = milvus_client.delete(collection_name, ids=[3]) +print(delete_result) + +print(fmt.format("Start query by specifying filtering expression")) +query_results = milvus_client.query(collection_name, filter="document_content == 'hello zilliz'") +print(f"Query results after deletion: {query_results}") + +milvus_client.drop_collection(collection_name) diff --git a/examples/bulk_import/example_bulkinsert_csv.py b/examples/bulk_import/example_bulkinsert_csv.py new file mode 100644 index 000000000..f8effcb1e --- /dev/null +++ b/examples/bulk_import/example_bulkinsert_csv.py @@ -0,0 +1,403 @@ +import random +import json +import csv +import time +import os + +from minio import Minio +from minio.error import S3Error + +from pymilvus import ( + connections, + FieldSchema, CollectionSchema, DataType, + Collection, + utility, + BulkInsertState, +) + + +LOCAL_FILES_PATH = "/tmp/milvus_bulkinsert" + +# Milvus service address +_HOST = '127.0.0.1' +_PORT = '19530' + +# Const names +_COLLECTION_NAME = 'demo_bulk_insert_csv' +_ID_FIELD_NAME = 'id_field' +_VECTOR_FIELD_NAME = 'float_vector_field' +_JSON_FIELD_NAME = "json_field" +_VARCHAR_FIELD_NAME = "varchar_field" +_DYNAMIC_FIELD_NAME = "$meta" # dynamic field, the internal name is "$meta", enable_dynamic_field=True + +# minio +DEFAULT_BUCKET_NAME = "a-bucket" +MINIO_ADDRESS = "0.0.0.0:9000" +MINIO_SECRET_KEY = "minioadmin" +MINIO_ACCESS_KEY = "minioadmin" + +# Vector field parameter +_DIM = 128 + +# to generate increment ID +id_start = 1 + +# Create a Milvus connection +def create_connection(): + retry = True + while retry: + try: + print(f"\nCreate connection...") + connections.connect(host=_HOST, port=_PORT) + retry = False + except Exception as e: + print("Cannot connect to Milvus. Error: " + str(e)) + print(f"Cannot connect to Milvus. Trying to connect Again. Sleeping for: 1") + time.sleep(1) + + print(f"\nList connections:") + print(connections.list_connections()) + +# Create a collection +def create_collection(has_partition_key: bool): + field1 = FieldSchema(name=_ID_FIELD_NAME, dtype=DataType.INT64, description="int64", is_primary=True, auto_id=False) + field2 = FieldSchema(name=_VECTOR_FIELD_NAME, dtype=DataType.FLOAT_VECTOR, description="float vector", dim=_DIM, + is_primary=False) + field3 = FieldSchema(name=_JSON_FIELD_NAME, dtype=DataType.JSON) + # if has partition key, we use this varchar field as partition key field + field4 = FieldSchema(name=_VARCHAR_FIELD_NAME, dtype=DataType.VARCHAR, max_length=256, is_partition_key=has_partition_key) + schema = CollectionSchema(fields=[field1, field2, field3, field4], enable_dynamic_field=True) + if has_partition_key: + collection = Collection(name=_COLLECTION_NAME, schema=schema, num_partitions=10) + else: + collection = Collection(name=_COLLECTION_NAME, schema=schema) + print("\nCollection created:", _COLLECTION_NAME) + return collection + +# Test existence of a collection +def has_collection(): + return utility.has_collection(_COLLECTION_NAME) + + +# Drop a collection in Milvus +def drop_collection(): + collection = Collection(_COLLECTION_NAME) + collection.drop() + print("\nDrop collection:", _COLLECTION_NAME) + + +# List all collections in Milvus +def list_collections(): + print("\nList collections:") + print(utility.list_collections()) + +# Create a partition +def create_partition(collection, partition_name): + collection.create_partition(partition_name=partition_name) + print("\nPartition created:", partition_name) + return collection.partition(partition_name) + +def gen_csv_rowbased(num, path, partition_name, sep=","): + global id_start + header = [_ID_FIELD_NAME, _JSON_FIELD_NAME, _VECTOR_FIELD_NAME, _VARCHAR_FIELD_NAME, _DYNAMIC_FIELD_NAME] + rows = [] + for i in range(num): + rows.append([ + id_start, # id field + json.dumps({"Number": id_start, "Name": "book_"+str(id_start)}), # json field + [round(random.random(), 6) for _ in range(_DIM)], # vector field + "{}_{}".format(partition_name, id_start) if partition_name is not None else "description_{}".format(id_start), # varchar field + json.dumps({"dynamic_field": id_start}), # no field matches this value, this value will be put into dynamic field + ]) + id_start = id_start + 1 + data = [header] + rows + with open(path, "w") as f: + writer = csv.writer(f, delimiter=sep) + for row in data: + writer.writerow(row) + + +def bulk_insert_rowbased(row_count_per_file, file_count, partition_name = None): + # make sure the files folder is created + os.makedirs(name=LOCAL_FILES_PATH, exist_ok=True) + + task_ids = [] + for i in range(file_count): + data_folder = os.path.join(LOCAL_FILES_PATH, "csv_{}".format(i)) + os.makedirs(name=data_folder, exist_ok=True) + file_path = os.path.join(data_folder, "csv_{}.csv".format(i)) + print("Generate csv file:", file_path) + sep ="\t" + gen_csv_rowbased(row_count_per_file, file_path, partition_name, sep) + + ok, remote_files = upload(data_folder=data_folder) + if ok: + print("Import csv file:", remote_files) + task_id = utility.do_bulk_insert(collection_name=_COLLECTION_NAME, + partition_name=partition_name, + files=remote_files, + sep=sep) + task_ids.append(task_id) + + return wait_tasks_competed(task_ids) + +# Wait all bulkinsert tasks to be a certain state +# return the states of all the tasks, including failed task +def wait_tasks_to_state(task_ids, state_code): + wait_ids = task_ids + states = [] + while True: + time.sleep(2) + temp_ids = [] + for id in wait_ids: + state = utility.get_bulk_insert_state(task_id=id) + if state.state == BulkInsertState.ImportFailed or state.state == BulkInsertState.ImportFailedAndCleaned: + print(state) + print("The task", state.task_id, "failed, reason:", state.failed_reason) + continue + + if state.state >= state_code: + states.append(state) + continue + + temp_ids.append(id) + + wait_ids = temp_ids + if len(wait_ids) == 0: + break + print("Wait {} tasks to be state: {}. Next round check".format(len(wait_ids), BulkInsertState.state_2_name.get(state_code, "unknown"))) + + return states + +# If the state of bulkinsert task is BulkInsertState.ImportCompleted, that means the data file has been parsed and data has been persisted, +# some segments have been created and waiting for index. +# ImportCompleted state doesn't mean the data is queryable, to query the data, you need to wait until the segment is +# indexed successfully and loaded into memory. +def wait_tasks_competed(task_ids): + print("=========================================================================================================") + states = wait_tasks_to_state(task_ids, BulkInsertState.ImportCompleted) + complete_count = 0 + for state in states: + if state.state == BulkInsertState.ImportCompleted: + complete_count = complete_count + 1 + # print(state) + # if you want to get the auto-generated primary keys, use state.ids to fetch + # print("Auto-generated ids:", state.ids) + + print("{} of {} tasks have successfully generated segments, able to be compacted and indexed as normal".format(complete_count, len(task_ids))) + print("=========================================================================================================\n") + return states + +# List all bulkinsert tasks, including pending tasks, working tasks and finished tasks. +# the parameter 'limit' is: how many latest tasks should be returned, if the limit<=0, all the tasks will be returned +def list_all_bulk_insert_tasks(collection_name=_COLLECTION_NAME, limit=0): + tasks = utility.list_bulk_insert_tasks(limit=limit, collection_name=collection_name) + print("=========================================================================================================") + print("List bulkinsert tasks with limit", limit) + pending = 0 + started = 0 + persisted = 0 + completed = 0 + failed = 0 + for task in tasks: + print(task) + if task.state == BulkInsertState.ImportPending: + pending = pending + 1 + elif task.state == BulkInsertState.ImportStarted: + started = started + 1 + elif task.state == BulkInsertState.ImportPersisted: + persisted = persisted + 1 + elif task.state == BulkInsertState.ImportCompleted: + completed = completed + 1 + elif task.state == BulkInsertState.ImportFailed: + failed = failed + 1 + print("There are {} bulkinsert tasks: {} pending, {} started, {} persisted, {} completed, {} failed" + .format(len(tasks), pending, started, persisted, completed, failed)) + print("=========================================================================================================\n") + +# Get collection row count. +def get_entity_num(collection): + print("=========================================================================================================") + print("The number of entity:", collection.num_entities) + +# Specify an index type +def create_index(collection): + print("Start Creating index IVF_FLAT") + index = { + "index_type": "IVF_FLAT", + "metric_type": "L2", + "params": {"nlist": 128}, + } + collection.create_index(_VECTOR_FIELD_NAME, index) + +# Load collection data into memory. If collection is not loaded, the search() and query() methods will return error. +def load_collection(collection): + collection.load() + +# Release collection data to free memory. +def release_collection(collection): + collection.release() + +# ANN search +def search(collection, search_vector, expr = None, consistency_level = "Eventually"): + search_param = { + "expr": expr, + "data": [search_vector], + "anns_field": _VECTOR_FIELD_NAME, + "param": {"metric_type": "L2", "params": {"nprobe": 10}}, + "limit": 5, + "output_fields": [_JSON_FIELD_NAME, _VARCHAR_FIELD_NAME, _DYNAMIC_FIELD_NAME], + "consistency_level": consistency_level, + } + print("search..." if expr is None else "hybrid search...") + results = collection.search(**search_param) + print("=========================================================================================================") + result = results[0] + for j, res in enumerate(result): + print(f"\ttop{j}: {res}") + print("\thits count:", len(result)) + print("=========================================================================================================\n") + +# Delete entities +def delete(collection, ids): + print("=========================================================================================================\n") + print("Delete these entities:", ids) + expr = _ID_FIELD_NAME + " in " + str(ids) + collection.delete(expr=expr) + print("=========================================================================================================\n") + +# Retrieve entities +def retrieve(collection, ids): + print("=========================================================================================================") + print("Retrieve these entities:", ids) + expr = _ID_FIELD_NAME + " in " + str(ids) + result = collection.query(expr=expr, output_fields=[_JSON_FIELD_NAME, _VARCHAR_FIELD_NAME, _VECTOR_FIELD_NAME, _DYNAMIC_FIELD_NAME]) + for item in result: + print(item) + print("=========================================================================================================\n") + return result + +# Upload data files to minio +def upload(data_folder: str, + bucket_name: str=DEFAULT_BUCKET_NAME)->(bool, list): + if not os.path.exists(data_folder): + print("Data path '{}' doesn't exist".format(data_folder)) + return False, [] + + remote_files = [] + try: + print("Prepare upload files") + minio_client = Minio(endpoint=MINIO_ADDRESS, access_key=MINIO_ACCESS_KEY, secret_key=MINIO_SECRET_KEY, secure=False) + found = minio_client.bucket_exists(bucket_name) + if not found: + print("MinIO bucket '{}' doesn't exist".format(bucket_name)) + return False, [] + + remote_data_path = "milvus_bulkinsert" + def upload_files(folder:str): + for parent, dirnames, filenames in os.walk(folder): + if parent is folder: + for filename in filenames: + ext = os.path.splitext(filename) + if len(ext) != 2 or (ext[1] != ".json" and ext[1] != ".npy" and ext[1] != ".csv"): + continue + local_full_path = os.path.join(parent, filename) + minio_file_path = os.path.join(remote_data_path, os.path.basename(folder), filename) + minio_client.fput_object(bucket_name, minio_file_path, local_full_path) + print("Upload file '{}' to '{}'".format(local_full_path, minio_file_path)) + remote_files.append(minio_file_path) + for dir in dirnames: + upload_files(os.path.join(parent, dir)) + + upload_files(data_folder) + + except S3Error as e: + print("Failed to connect MinIO server {}, error: {}".format(MINIO_ADDRESS, e)) + return False, [] + + print("Successfully upload files: {}".format(remote_files)) + return True, remote_files + +def main(has_partition_key: bool): + # create a connection + create_connection() + + # drop collection if the collection exists + if has_collection(): + drop_collection() + + # create collection + collection = create_collection(has_partition_key) + + # specify an index type + create_index(collection) + + + # load data to memory + load_collection(collection) + + # show collections + list_collections() + + # do bulk_insert, wait all tasks finish persisting + row_count_per_file = 100 + if has_partition_key: + # automatically partitioning + bulk_insert_rowbased(row_count_per_file=row_count_per_file, file_count=2) + else: + # bulklinsert into default partition + bulk_insert_rowbased(row_count_per_file=row_count_per_file, file_count=1) + + # create a partition, bulkinsert into the partition + a_partition = "part_1" + create_partition(collection, a_partition) + bulk_insert_rowbased(row_count_per_file=row_count_per_file, file_count=1, partition_name=a_partition) + + # list all tasks + list_all_bulk_insert_tasks() + + # get the number of entities + get_entity_num(collection) + + print("Waiting index complete and refresh segments list to load...") + utility.wait_for_index_building_complete(_COLLECTION_NAME) + collection.load(_refresh = True) + + # pick some entities + pick_ids = [50, row_count_per_file + 99] + id_vectors = retrieve(collection, pick_ids) + + # search the picked entities, they are in result at the top0 + for id_vector in id_vectors: + id = id_vector[_ID_FIELD_NAME] + vector = id_vector[_VECTOR_FIELD_NAME] + print("Search id:", id, ", compare this id to the top0 of search result, they are equal") + search(collection, vector) + + # delete the picked entities + delete(collection, pick_ids) + + # search the deleted entities, they are not in result anymore + for id_vector in id_vectors: + id = id_vector[_ID_FIELD_NAME] + vector = id_vector[_VECTOR_FIELD_NAME] + print("Search id:", id, ", compare this id to the top0 result, they are not equal since the id has been deleted") + # here we use Strong consistency level to do search, because we need to make sure the delete operation is applied + search(collection, vector, consistency_level="Strong") + + # search by filtering the varchar field + vector = [round(random.random(), 6) for _ in range(_DIM)] + search(collection, vector, expr="{} like \"description_33%\"".format(_VARCHAR_FIELD_NAME)) + + # release memory + release_collection(collection) + + # drop collection + drop_collection() + + +if __name__ == '__main__': + # change this value if you want to test bulkinert with partition key + # Note: bulkinsert supports partition key from Milvus v2.2.12 + has_partition_key = False + main(has_partition_key) diff --git a/examples/example_bulkinsert_json.py b/examples/bulk_import/example_bulkinsert_json.py similarity index 100% rename from examples/example_bulkinsert_json.py rename to examples/bulk_import/example_bulkinsert_json.py diff --git a/examples/example_bulkinsert_numpy.py b/examples/bulk_import/example_bulkinsert_numpy.py similarity index 100% rename from examples/example_bulkinsert_numpy.py rename to examples/bulk_import/example_bulkinsert_numpy.py diff --git a/examples/bulk_import/example_bulkinsert_withfunction.py b/examples/bulk_import/example_bulkinsert_withfunction.py new file mode 100644 index 000000000..36351d431 --- /dev/null +++ b/examples/bulk_import/example_bulkinsert_withfunction.py @@ -0,0 +1,360 @@ +import random +import json +import csv +import time +import os + +from pymilvus import ( + connections, + FieldSchema, CollectionSchema, DataType, + Collection, + utility, + BulkInsertState, + Function, FunctionType, +) + + +LOCAL_FILES_PATH = "/tmp/milvus_bulkinsert" + +# Milvus service address +_HOST = '127.0.0.1' +_PORT = '19530' + +# Const names +_COLLECTION_NAME = 'demo_bulk_insert_csv' +_ID_FIELD_NAME = 'id_field' +_VECTOR_FIELD_NAME = 'float_vector_field' +_JSON_FIELD_NAME = "json_field" +_VARCHAR_FIELD_NAME = "varchar_field" +_DYNAMIC_FIELD_NAME = "$meta" # dynamic field, the internal name is "$meta", enable_dynamic_field=True + + +# Vector field parameter +_DIM = 1536 + +# to generate increment ID +id_start = 1 + +# Create a Milvus connection +def create_connection(): + retry = True + while retry: + try: + print(f"\nCreate connection...") + connections.connect(host=_HOST, port=_PORT) + retry = False + except Exception as e: + print("Cannot connect to Milvus. Error: " + str(e)) + print(f"Cannot connect to Milvus. Trying to connect Again. Sleeping for: 1") + time.sleep(1) + + print(f"\nList connections:") + print(connections.list_connections()) + +# Create a collection +def create_collection(has_partition_key: bool): + field1 = FieldSchema(name=_ID_FIELD_NAME, dtype=DataType.INT64, description="int64", is_primary=True, auto_id=False) + field2 = FieldSchema(name=_VECTOR_FIELD_NAME, dtype=DataType.FLOAT_VECTOR, description="float vector", dim=_DIM, + is_primary=False) + field3 = FieldSchema(name=_JSON_FIELD_NAME, dtype=DataType.JSON) + # if has partition key, we use this varchar field as partition key field + field4 = FieldSchema(name=_VARCHAR_FIELD_NAME, dtype=DataType.VARCHAR, max_length=256, is_partition_key=has_partition_key) + schema = CollectionSchema(fields=[field1, field2, field3, field4], enable_dynamic_field=True) + text_embedding_function = Function( + name="openai", + function_type=FunctionType.TEXTEMBEDDING, + input_field_names=[_VARCHAR_FIELD_NAME], + output_field_names=_VECTOR_FIELD_NAME, + params={ + "provider": "openai", + "model_name": "text-embedding-3-small", + } + ) + schema.add_function(text_embedding_function) + if has_partition_key: + collection = Collection(name=_COLLECTION_NAME, schema=schema, num_partitions=10) + else: + collection = Collection(name=_COLLECTION_NAME, schema=schema) + print("\nCollection created:", _COLLECTION_NAME) + return collection + +# Test existence of a collection +def has_collection(): + return utility.has_collection(_COLLECTION_NAME) + +# Drop a collection in Milvus +def drop_collection(): + collection = Collection(_COLLECTION_NAME) + collection.drop() + print("\nDrop collection:", _COLLECTION_NAME) + + +# List all collections in Milvus +def list_collections(): + print("\nList collections:") + print(utility.list_collections()) + +# Create a partition +def create_partition(collection, partition_name): + collection.create_partition(partition_name=partition_name) + print("\nPartition created:", partition_name) + return collection.partition(partition_name) + +def gen_csv_rowbased(num, path, partition_name, sep=","): + global id_start + header = [_ID_FIELD_NAME, _JSON_FIELD_NAME, _VARCHAR_FIELD_NAME, _DYNAMIC_FIELD_NAME] + rows = [] + for i in range(num): + rows.append([ + id_start, # id field + json.dumps({"Number": id_start, "Name": "book_"+str(id_start)}), # json field + "{}_{}".format(partition_name, id_start) if partition_name is not None else "description_{}".format(id_start), # varchar field + json.dumps({"dynamic_field": id_start}), # no field matches this value, this value will be put into dynamic field + ]) + id_start = id_start + 1 + data = [header] + rows + with open(path, "w") as f: + writer = csv.writer(f, delimiter=sep) + for row in data: + writer.writerow(row) + + +def bulk_insert_rowbased(row_count_per_file, file_count, partition_name = None): + # make sure the files folder is created + os.makedirs(name=LOCAL_FILES_PATH, exist_ok=True) + + task_ids = [] + for i in range(file_count): + data_folder = os.path.join(LOCAL_FILES_PATH, "csv_{}".format(i)) + os.makedirs(name=data_folder, exist_ok=True) + file_path = os.path.join(data_folder, "csv_{}.csv".format(i)) + print("Generate csv file:", file_path) + sep ="\t" + gen_csv_rowbased(row_count_per_file, file_path, partition_name, sep) + + print("Import csv file:", file_path) + task_id = utility.do_bulk_insert(collection_name=_COLLECTION_NAME, + partition_name=partition_name, + files=[file_path], + sep=sep) + task_ids.append(task_id) + + return wait_tasks_competed(task_ids) + +# Wait all bulkinsert tasks to be a certain state +# return the states of all the tasks, including failed task +def wait_tasks_to_state(task_ids, state_code): + wait_ids = task_ids + states = [] + while True: + time.sleep(2) + temp_ids = [] + for id in wait_ids: + state = utility.get_bulk_insert_state(task_id=id) + if state.state == BulkInsertState.ImportFailed or state.state == BulkInsertState.ImportFailedAndCleaned: + print(state) + print("The task", state.task_id, "failed, reason:", state.failed_reason) + continue + + if state.state >= state_code: + states.append(state) + continue + + temp_ids.append(id) + + wait_ids = temp_ids + if len(wait_ids) == 0: + break + print("Wait {} tasks to be state: {}. Next round check".format(len(wait_ids), BulkInsertState.state_2_name.get(state_code, "unknown"))) + + return states + +# If the state of bulkinsert task is BulkInsertState.ImportCompleted, that means the data file has been parsed and data has been persisted, +# some segments have been created and waiting for index. +# ImportCompleted state doesn't mean the data is queryable, to query the data, you need to wait until the segment is +# indexed successfully and loaded into memory. +def wait_tasks_competed(task_ids): + print("=========================================================================================================") + states = wait_tasks_to_state(task_ids, BulkInsertState.ImportCompleted) + complete_count = 0 + for state in states: + if state.state == BulkInsertState.ImportCompleted: + complete_count = complete_count + 1 + # print(state) + # if you want to get the auto-generated primary keys, use state.ids to fetch + # print("Auto-generated ids:", state.ids) + + print("{} of {} tasks have successfully generated segments, able to be compacted and indexed as normal".format(complete_count, len(task_ids))) + print("=========================================================================================================\n") + return states + +# List all bulkinsert tasks, including pending tasks, working tasks and finished tasks. +# the parameter 'limit' is: how many latest tasks should be returned, if the limit<=0, all the tasks will be returned +def list_all_bulk_insert_tasks(collection_name=_COLLECTION_NAME, limit=0): + tasks = utility.list_bulk_insert_tasks(limit=limit, collection_name=collection_name) + print("=========================================================================================================") + print("List bulkinsert tasks with limit", limit) + pending = 0 + started = 0 + persisted = 0 + completed = 0 + failed = 0 + for task in tasks: + print(task) + if task.state == BulkInsertState.ImportPending: + pending = pending + 1 + elif task.state == BulkInsertState.ImportStarted: + started = started + 1 + elif task.state == BulkInsertState.ImportPersisted: + persisted = persisted + 1 + elif task.state == BulkInsertState.ImportCompleted: + completed = completed + 1 + elif task.state == BulkInsertState.ImportFailed: + failed = failed + 1 + print("There are {} bulkinsert tasks: {} pending, {} started, {} persisted, {} completed, {} failed" + .format(len(tasks), pending, started, persisted, completed, failed)) + print("=========================================================================================================\n") + +# Get collection row count. +def get_entity_num(collection): + print("=========================================================================================================") + print("The number of entity:", collection.num_entities) + +# Specify an index type +def create_index(collection): + print("Start Creating index IVF_FLAT") + index = { + "index_type": "IVF_FLAT", + "metric_type": "L2", + "params": {"nlist": 128}, + } + collection.create_index(_VECTOR_FIELD_NAME, index) + +# Load collection data into memory. If collection is not loaded, the search() and query() methods will return error. +def load_collection(collection): + collection.load() + +# Release collection data to free memory. +def release_collection(collection): + collection.release() + +# ANN search +def search(collection, search_vector, expr = None, consistency_level = "Eventually"): + search_param = { + "expr": expr, + "data": [search_vector], + "anns_field": _VECTOR_FIELD_NAME, + "param": {"metric_type": "L2", "params": {"nprobe": 10}}, + "limit": 5, + "output_fields": [_JSON_FIELD_NAME, _VARCHAR_FIELD_NAME, _DYNAMIC_FIELD_NAME], + "consistency_level": consistency_level, + } + print("search..." if expr is None else "hybrid search...") + results = collection.search(**search_param) + print("=========================================================================================================") + result = results[0] + for j, res in enumerate(result): + print(f"\ttop{j}: {res}") + print("\thits count:", len(result)) + print("=========================================================================================================\n") + +# Delete entities +def delete(collection, ids): + print("=========================================================================================================\n") + print("Delete these entities:", ids) + expr = _ID_FIELD_NAME + " in " + str(ids) + collection.delete(expr=expr) + print("=========================================================================================================\n") + +# Retrieve entities +def retrieve(collection, ids): + print("=========================================================================================================") + print("Retrieve these entities:", ids) + expr = _ID_FIELD_NAME + " in " + str(ids) + result = collection.query(expr=expr, output_fields=[_JSON_FIELD_NAME, _VARCHAR_FIELD_NAME, _VECTOR_FIELD_NAME, _DYNAMIC_FIELD_NAME]) + for item in result: + print(item) + print("=========================================================================================================\n") + return result + + +def main(has_partition_key: bool): + # create a connection + create_connection() + + # drop collection if the collection exists + if has_collection(): + drop_collection() + + # create collection + collection = create_collection(has_partition_key) + + # specify an index type + create_index(collection) + + print("Load data to memory") + # load data to memory + load_collection(collection) + print("Load data to memory completed") + # show collections + print("Show collections") + list_collections() + + # do bulk_insert, wait all tasks finish persisting + row_count_per_file = 10 + if has_partition_key: + # automatically partitioning + bulk_insert_rowbased(row_count_per_file=row_count_per_file, file_count=2) + else: + # bulklinsert into default partition + bulk_insert_rowbased(row_count_per_file=row_count_per_file, file_count=1) + print("Bulk insert completed") + + # list all tasks + list_all_bulk_insert_tasks() + + # get the number of entities + get_entity_num(collection) + + print("Waiting index complete and refresh segments list to load...") + utility.wait_for_index_building_complete(_COLLECTION_NAME) + collection.load(_refresh = True) + + # pick some entities + pick_ids = [50, row_count_per_file + 99] + id_vectors = retrieve(collection, pick_ids) + + # search the picked entities, they are in result at the top0 + for id_vector in id_vectors: + id = id_vector[_ID_FIELD_NAME] + vector = id_vector[_VECTOR_FIELD_NAME] + print("Search id:", id, ", compare this id to the top0 of search result, they are equal") + search(collection, vector) + + # delete the picked entities + delete(collection, pick_ids) + + # search the deleted entities, they are not in result anymore + for id_vector in id_vectors: + id = id_vector[_ID_FIELD_NAME] + vector = id_vector[_VECTOR_FIELD_NAME] + print("Search id:", id, ", compare this id to the top0 result, they are not equal since the id has been deleted") + # here we use Strong consistency level to do search, because we need to make sure the delete operation is applied + search(collection, vector, consistency_level="Strong") + + # search by filtering the varchar field + vector = [round(random.random(), 6) for _ in range(_DIM)] + ret = search(collection, vector) + print(ret) + # release memory + # release_collection(collection) + + # drop collection + # drop_collection() + + +if __name__ == '__main__': + # change this value if you want to test bulkinert with partition key + # Note: bulkinsert supports partition key from Milvus v2.2.12 + has_partition_key = False + main(has_partition_key) diff --git a/examples/example_bulkwriter.py b/examples/bulk_import/example_bulkwriter.py similarity index 62% rename from examples/example_bulkwriter.py rename to examples/bulk_import/example_bulkwriter.py index 161fe8899..891acd227 100644 --- a/examples/example_bulkwriter.py +++ b/examples/bulk_import/example_bulkwriter.py @@ -17,8 +17,12 @@ import time import pandas as pd import numpy as np +import tensorflow as tf import logging + +from typing import List + logging.basicConfig(level=logging.INFO) from pymilvus import ( @@ -26,13 +30,16 @@ FieldSchema, CollectionSchema, DataType, Collection, utility, + BulkInsertState, +) + +from pymilvus.bulk_writer import ( LocalBulkWriter, RemoteBulkWriter, BulkFileType, + list_import_jobs, bulk_import, get_import_progress, - list_import_jobs, - BulkInsertState, ) # minio @@ -48,13 +55,55 @@ ALL_TYPES_COLLECTION_NAME = "all_types_for_bulkwriter" DIM = 512 -def gen_binary_vector(): +# optional input for binary vector: +# 1. list of int such as [1, 0, 1, 1, 0, 0, 1, 0] +# 2. numpy array of uint8 +def gen_binary_vector(to_numpy_arr): raw_vector = [random.randint(0, 1) for i in range(DIM)] - binary_vectors = np.packbits(raw_vector, axis=-1).tolist() - return binary_vectors - -def gen_float_vector(): - return [random.random() for _ in range(DIM)] + if to_numpy_arr: + return np.packbits(raw_vector, axis=-1) + return raw_vector + +# optional input for float vector: +# 1. list of float such as [0.56, 1.859, 6.55, 9.45] +# 2. numpy array of float32 +def gen_float_vector(to_numpy_arr): + raw_vector = [random.random() for _ in range(DIM)] + if to_numpy_arr: + return np.array(raw_vector, dtype="float32") + return raw_vector + +# optional input for bfloat16 vector: +# 1. list of float such as [0.56, 1.859, 6.55, 9.45] +# 2. numpy array of bfloat16 +def gen_bf16_vector(to_numpy_arr): + raw_vector = [random.random() for _ in range(DIM)] + if to_numpy_arr: + return tf.cast(raw_vector, dtype=tf.bfloat16).numpy() + return raw_vector + +# optional input for float16 vector: +# 1. list of float such as [0.56, 1.859, 6.55, 9.45] +# 2. numpy array of float16 +def gen_fp16_vector(to_numpy_arr): + raw_vector = [random.random() for _ in range(DIM)] + if to_numpy_arr: + return np.array(raw_vector, dtype=np.float16) + return raw_vector + +# optional input for sparse vector: +# only accepts dict like {2: 13.23, 45: 0.54} or {"indices": [1, 2], "values": [0.1, 0.2]} +# note: no need to sort the keys +def gen_sparse_vector(pair_dict: bool): + raw_vector = {} + dim = random.randint(2, 20) + if pair_dict: + raw_vector["indices"] = [i for i in range(dim)] + raw_vector["values"] = [random.random() for _ in range(dim)] + else: + for i in range(dim): + raw_vector[i] = random.random() + return raw_vector def create_connection(): print(f"\nCreate connection...") @@ -78,7 +127,7 @@ def build_simple_collection(): print(f"Collection '{collection.name}' created") return collection.schema -def build_all_type_schema(bin_vec: bool, has_array: bool): +def build_all_type_schema(is_numpy: bool): print(f"\n===================== build all types schema ====================") fields = [ FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=False), @@ -91,12 +140,18 @@ def build_all_type_schema(bin_vec: bool, has_array: bool): FieldSchema(name="double", dtype=DataType.DOUBLE), FieldSchema(name="varchar", dtype=DataType.VARCHAR, max_length=512), FieldSchema(name="json", dtype=DataType.JSON), - FieldSchema(name="vector", dtype=DataType.BINARY_VECTOR, dim=DIM) if bin_vec else FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=DIM), + # from 2.4.0, milvus supports multiple vector fields in one collection + # FieldSchema(name="float_vector", dtype=DataType.FLOAT_VECTOR, dim=DIM), + FieldSchema(name="binary_vector", dtype=DataType.BINARY_VECTOR, dim=DIM), + FieldSchema(name="float16_vector", dtype=DataType.FLOAT16_VECTOR, dim=DIM), + FieldSchema(name="bfloat16_vector", dtype=DataType.BFLOAT16_VECTOR, dim=DIM), ] - if has_array: + # milvus doesn't support parsing array/sparse_vector from numpy file + if not is_numpy: fields.append(FieldSchema(name="array_str", dtype=DataType.ARRAY, max_capacity=100, element_type=DataType.VARCHAR, max_length=128)) fields.append(FieldSchema(name="array_int", dtype=DataType.ARRAY, max_capacity=100, element_type=DataType.INT64)) + fields.append(FieldSchema(name="sparse_vector", dtype=DataType.SPARSE_FLOAT_VECTOR)) schema = CollectionSchema(fields=fields, enable_dynamic_field=True) return schema @@ -115,7 +170,7 @@ def read_sample_data(file_path: str, writer: [LocalBulkWriter, RemoteBulkWriter] writer.append_row(row) -def local_writer(schema: CollectionSchema, file_type: BulkFileType): +def local_writer_simple(schema: CollectionSchema, file_type: BulkFileType): print(f"\n===================== local writer ({file_type.name}) ====================") with LocalBulkWriter( schema=schema, @@ -128,7 +183,7 @@ def local_writer(schema: CollectionSchema, file_type: BulkFileType): # append rows for i in range(100000): - local_writer.append_row({"path": f"path_{i}", "vector": gen_float_vector(), "label": f"label_{i}"}) + local_writer.append_row({"path": f"path_{i}", "vector": gen_float_vector(i%2==0), "label": f"label_{i}"}) print(f"{local_writer.total_row_count} rows appends") print(f"{local_writer.buffer_row_count} rows in buffer not flushed") @@ -138,7 +193,7 @@ def local_writer(schema: CollectionSchema, file_type: BulkFileType): print(f"Local writer done! output local files: {batch_files}") -def remote_writer(schema: CollectionSchema, file_type: BulkFileType): +def remote_writer_simple(schema: CollectionSchema, file_type: BulkFileType): print(f"\n===================== remote writer ({file_type.name}) ====================") with RemoteBulkWriter( schema=schema, @@ -157,7 +212,7 @@ def remote_writer(schema: CollectionSchema, file_type: BulkFileType): # append rows for i in range(10000): - remote_writer.append_row({"path": f"path_{i}", "vector": gen_float_vector(), "label": f"label_{i}"}) + remote_writer.append_row({"path": f"path_{i}", "vector": gen_float_vector(i%2==0), "label": f"label_{i}"}) print(f"{remote_writer.total_row_count} rows appends") print(f"{remote_writer.buffer_row_count} rows in buffer not flushed") @@ -171,7 +226,7 @@ def parallel_append(schema: CollectionSchema): def _append_row(writer: LocalBulkWriter, begin: int, end: int): try: for i in range(begin, end): - writer.append_row({"path": f"path_{i}", "vector": gen_float_vector(), "label": f"label_{i}"}) + writer.append_row({"path": f"path_{i}", "vector": gen_float_vector(False), "label": f"label_{i}"}) if i%100 == 0: print(f"{threading.current_thread().name} inserted {i-begin} items") except Exception as e: @@ -221,8 +276,8 @@ def _append_row(writer: LocalBulkWriter, begin: int, end: int): print("Data is correct") -def all_types_writer(bin_vec: bool, schema: CollectionSchema, file_type: BulkFileType)->list: - print(f"\n===================== all field types ({file_type.name}) binary_vector={bin_vec} ====================") +def all_types_writer(schema: CollectionSchema, file_type: BulkFileType)-> List[List[str]]: + print(f"\n===================== all field types ({file_type.name}) ====================") with RemoteBulkWriter( schema=schema, remote_path="bulk_data", @@ -248,32 +303,43 @@ def all_types_writer(bin_vec: bool, schema: CollectionSchema, file_type: BulkFil "double": i/7, "varchar": f"varchar_{i}", "json": {"dummy": i, "ok": f"name_{i}"}, - "vector": gen_binary_vector() if bin_vec else gen_float_vector(), + # "float_vector": gen_float_vector(False), + "binary_vector": gen_binary_vector(False), + "float16_vector": gen_fp16_vector(False), + "bfloat16_vector": gen_bf16_vector(False), f"dynamic_{i}": i, - # bulkinsert doesn't support import npy with array field, the below values will be stored into dynamic field + # bulkinsert doesn't support import npy with array field and sparse vector, + # if file_type is numpy, the below values will be stored into dynamic field "array_str": [f"str_{k}" for k in range(5)], "array_int": [k for k in range(10)], + "sparse_vector": gen_sparse_vector(False), } remote_writer.append_row(row) # append rows by numpy type for i in range(batch_count): + id = i+batch_count remote_writer.append_row({ - "id": np.int64(i+batch_count), + "id": np.int64(id), "bool": True if i % 3 == 0 else False, - "int8": np.int8(i%128), - "int16": np.int16(i%1000), - "int32": np.int32(i%100000), - "int64": np.int64(i), - "float": np.float32(i/3), - "double": np.float64(i/7), - "varchar": f"varchar_{i}", - "json": json.dumps({"dummy": i, "ok": f"name_{i}"}), - "vector": np.array(gen_binary_vector()).astype(np.dtype("int8")) if bin_vec else np.array(gen_float_vector()).astype(np.dtype("float32")), - f"dynamic_{i}": i, - # bulkinsert doesn't support import npy with array field, the below values will be stored into dynamic field + "int8": np.int8(id%128), + "int16": np.int16(id%1000), + "int32": np.int32(id%100000), + "int64": np.int64(id), + "float": np.float32(id/3), + "double": np.float64(id/7), + "varchar": f"varchar_{id}", + "json": json.dumps({"dummy": id, "ok": f"name_{id}"}), + # "float_vector": gen_float_vector(True), + "binary_vector": gen_binary_vector(True), + "float16_vector": gen_fp16_vector(True), + "bfloat16_vector": gen_bf16_vector(True), + f"dynamic_{id}": id, + # bulkinsert doesn't support import npy with array field and sparse vector, + # if file_type is numpy, the below values will be stored into dynamic field "array_str": np.array([f"str_{k}" for k in range(5)], np.dtype("str")), "array_int": np.array([k for k in range(10)], np.dtype("int64")), + "sparse_vector": gen_sparse_vector(True), }) print(f"{remote_writer.total_row_count} rows appends") @@ -284,50 +350,76 @@ def all_types_writer(bin_vec: bool, schema: CollectionSchema, file_type: BulkFil return remote_writer.batch_files -def call_bulkinsert(schema: CollectionSchema, batch_files: list): - print(f"\n===================== call bulkinsert ====================") +def call_bulkinsert(schema: CollectionSchema, batch_files: List[List[str]]): if utility.has_collection(ALL_TYPES_COLLECTION_NAME): utility.drop_collection(ALL_TYPES_COLLECTION_NAME) collection = Collection(name=ALL_TYPES_COLLECTION_NAME, schema=schema) print(f"Collection '{collection.name}' created") - task_ids = [] - for files in batch_files: - task_id = utility.do_bulk_insert(collection_name=ALL_TYPES_COLLECTION_NAME, files=files) - task_ids.append(task_id) - print(f"Create a bulkinert task, task id: {task_id}") + url = f"http://{HOST}:{PORT}" + + print(f"\n===================== import files to milvus ====================") + resp = bulk_import( + url=url, + collection_name=ALL_TYPES_COLLECTION_NAME, + files=batch_files, + ) + print(resp.json()) + job_id = resp.json()['data']['jobId'] + print(f"Create a bulkinsert job, job id: {job_id}") - while len(task_ids) > 0: - print("Wait 1 second to check bulkinsert tasks state...") + while True: + print("Wait 1 second to check bulkinsert job state...") time.sleep(1) - for id in task_ids: - state = utility.get_bulk_insert_state(task_id=id) - if state.state == BulkInsertState.ImportFailed or state.state == BulkInsertState.ImportFailedAndCleaned: - print(f"The task {state.task_id} failed, reason: {state.failed_reason}") - task_ids.remove(id) - elif state.state == BulkInsertState.ImportCompleted: - print(f"The task {state.task_id} completed") - task_ids.remove(id) + + print(f"\n===================== get import job progress ====================") + resp = get_import_progress( + url=url, + job_id=job_id, + ) + + state = resp.json()['data']['state'] + progress = resp.json()['data']['progress'] + if state == "Importing": + print(f"The job {job_id} is importing... {progress}%") + continue + if state == "Failed": + reason = resp.json()['data']['reason'] + print(f"The job {job_id} failed, reason: {reason}") + break + if state == "Completed" and progress == 100: + print(f"The job {job_id} completed") + break print(f"Collection row number: {collection.num_entities}") -def retrieve_imported_data(bin_vec: bool): +def retrieve_imported_data(): collection = Collection(name=ALL_TYPES_COLLECTION_NAME) print("Create index...") - index_param = { - "index_type": "BIN_FLAT", - "params": {}, - "metric_type": "HAMMING" - } if bin_vec else { - "index_type": "FLAT", - "params": {}, - "metric_type": "L2" - } - collection.create_index(field_name="vector", index_params=index_param) - - ids = [100, 5000] + for field in collection.schema.fields: + if (field.dtype == DataType.FLOAT_VECTOR or field.dtype == DataType.FLOAT16_VECTOR + or field.dtype == DataType.BFLOAT16_VECTOR): + collection.create_index(field_name=field.name, index_params={ + "index_type": "FLAT", + "params": {}, + "metric_type": "L2" + }) + elif field.dtype == DataType.BINARY_VECTOR: + collection.create_index(field_name=field.name, index_params={ + "index_type": "BIN_FLAT", + "params": {}, + "metric_type": "HAMMING" + }) + elif field.dtype == DataType.SPARSE_FLOAT_VECTOR: + collection.create_index(field_name=field.name, index_params={ + "index_type": "SPARSE_INVERTED_INDEX", + "metric_type": "IP", + "params": {"drop_ratio_build": 0.2} + }) + + ids = [100, 15000] print(f"Load collection and query items {ids}") collection.load() expr = f"id in {ids}" @@ -338,10 +430,15 @@ def retrieve_imported_data(bin_vec: bool): print(item) def cloud_bulkinsert(): - url = "https://_your_cloud_server_url_" - api_key = "_api_key_for_the_url_" - cluster_id = "_your_cloud_instance_id_" - collection_name = "_collection_name_on_the_cloud_" + # The value of the URL is fixed. + # For overseas regions, it is: https://api.cloud.zilliz.com + # For regions in China, it is: https://api.cloud.zilliz.com.cn + url = "https://api.cloud.zilliz.com" + api_key = "_api_key_for_cluster_org_" + cluster_id = "_your_cloud_cluster_id_" + collection_name = "_collection_name_on_the_cluster_id_" + # If partition_name is not specified, use "" + partition_name = "_partition_name_on_the_collection_" print(f"\n===================== import files to cloud vectordb ====================") object_url = "_your_object_storage_service_url_" @@ -349,12 +446,13 @@ def cloud_bulkinsert(): object_url_secret_key = "_your_object_storage_service_secret_key_" resp = bulk_import( url=url, - api_key=api_key, + collection_name=collection_name, + partition_name=partition_name, object_url=object_url, + cluster_id=cluster_id, + api_key=api_key, access_key=object_url_access_key, secret_key=object_url_secret_key, - cluster_id=cluster_id, - collection_name=collection_name, ) print(resp.json()) @@ -362,17 +460,17 @@ def cloud_bulkinsert(): job_id = resp.json()['data']['jobId'] resp = get_import_progress( url=url, - api_key=api_key, job_id=job_id, cluster_id=cluster_id, + api_key=api_key, ) print(resp.json()) print(f"\n===================== list import jobs ====================") resp = list_import_jobs( url=url, - api_key=api_key, cluster_id=cluster_id, + api_key=api_key, page_size=10, current_page=1, ) @@ -386,32 +484,26 @@ def cloud_bulkinsert(): BulkFileType.JSON, BulkFileType.NUMPY, BulkFileType.PARQUET, + BulkFileType.CSV, ] schema = build_simple_collection() for file_type in file_types: - local_writer(schema=schema, file_type=file_type) + local_writer_simple(schema=schema, file_type=file_type) for file_type in file_types: - remote_writer(schema=schema, file_type=file_type) + remote_writer_simple(schema=schema, file_type=file_type) parallel_append(schema) - # float vectors + all scalar types + # all vector types + all scalar types for file_type in file_types: - # Note: bulkinsert doesn't support import npy with array field - schema = build_all_type_schema(bin_vec=False, has_array=False if file_type==BulkFileType.NUMPY else True) - batch_files = all_types_writer(bin_vec=False, schema=schema, file_type=file_type) + # Note: bulkinsert doesn't support import npy with array field and sparse vector field + schema = build_all_type_schema(is_numpy= file_type==BulkFileType.NUMPY) + batch_files = all_types_writer(schema=schema, file_type=file_type) call_bulkinsert(schema, batch_files) - retrieve_imported_data(bin_vec=False) + retrieve_imported_data() - # binary vectors + all scalar types - for file_type in file_types: - # Note: bulkinsert doesn't support import npy with array field - schema = build_all_type_schema(bin_vec=True, has_array=False if file_type == BulkFileType.NUMPY else True) - batch_files = all_types_writer(bin_vec=True, schema=schema, file_type=file_type) - call_bulkinsert(schema, batch_files) - retrieve_imported_data(bin_vec=True) # # to call cloud bulkinsert api, you need to apply a cloud service from Zilliz Cloud(https://zilliz.com/cloud) # cloud_bulkinsert() diff --git a/examples/data/train_embeddings.csv b/examples/bulk_import/train_embeddings.csv similarity index 100% rename from examples/data/train_embeddings.csv rename to examples/bulk_import/train_embeddings.csv diff --git a/examples/cert/example_tls1.py b/examples/cert/example_tls1.py new file mode 100644 index 000000000..4a8f43749 --- /dev/null +++ b/examples/cert/example_tls1.py @@ -0,0 +1,124 @@ +import random + +from pymilvus import ( + MilvusClient, + FieldSchema, CollectionSchema, DataType, + utility +) + +# This example shows how to: +# 1. connect to Milvus server +# 2. create a collection +# 3. insert entities +# 4. create index +# 5. search + + +_HOST = '127.0.0.1' +_PORT = '19530' +_URI = f"https://{_HOST}:{_PORT}" + +# Const names +_COLLECTION_NAME = 'demo' +_ID_FIELD_NAME = 'id_field' +_VECTOR_FIELD_NAME = 'float_vector_field' + +# Vector parameters +_DIM = 128 +_INDEX_FILE_SIZE = 32 # max file size of stored index + +# Index parameters +_METRIC_TYPE = 'L2' +_INDEX_TYPE = 'IVF_FLAT' +_NLIST = 1024 +_NPROBE = 16 +_TOPK = 3 + + +def main(): + # create a connection + print(f"\nCreate connection...") + milvus_client = MilvusClient(uri=_URI, + secure=True, + server_pem_path="cert/server.pem", + server_name='localhost') + print(f"\nList connection:") + print(milvus_client._get_connection()) + + # drop collection if the collection exists + if milvus_client.has_collection(_COLLECTION_NAME): + milvus_client.drop_collection(_COLLECTION_NAME) + + # create collection + field1 = FieldSchema(name=_ID_FIELD_NAME, dtype=DataType.INT64, description="int64", is_primary=True) + field2 = FieldSchema(name=_VECTOR_FIELD_NAME, dtype=DataType.FLOAT_VECTOR, description="float vector", dim=_DIM, + is_primary=False) + schema = CollectionSchema(fields=[field1, field2], description="collection description") + milvus_client.create_collection(collection_name=_COLLECTION_NAME,schema=schema) + milvus_client.describe_collection(collection_name=_COLLECTION_NAME) + + print("\ncollection created:", _COLLECTION_NAME) + + # show collections + print("\nlist collections:") + print(milvus_client.list_collections()) + + # insert 10000 vectors with 128 dimension + data_dict = [] + for i in range(10000): + entity = { + "id_field": i+1, # Assuming id_field is the _COLLECTION_NAME of the field corresponding to the ID + "float_vector_field": [random.random() for _ in range(_DIM)] + } + data_dict.append(entity) + insert_result = milvus_client.insert(collection_name=_COLLECTION_NAME,data=data_dict) + + # get the number of entities + print(f"\nThe number of entity: {insert_result['insert_count']}") + + # create index + index_params = milvus_client.prepare_index_params() + + index_params.add_index( + field_name=_VECTOR_FIELD_NAME, + index_type=_INDEX_TYPE, + metric_type=_METRIC_TYPE, + params={"nlist": _NLIST} + ) + + milvus_client.create_index( + collection_name=_COLLECTION_NAME, + index_params=index_params + ) + print("\nCreated index") + + # load data to memory + milvus_client.load_collection(_COLLECTION_NAME) + vector = data_dict[1] + vectors = [vector["float_vector_field"]] + + # search + search_param = { + "anns_field": _VECTOR_FIELD_NAME, + "param": {"metric_type": _METRIC_TYPE, "params": {"nprobe": _NPROBE}}, + "expr": f"{_ID_FIELD_NAME} > 0"} + results = milvus_client.search(collection_name=_COLLECTION_NAME,data=vectors,limit= _TOPK,search_params=search_param) + for i, result in enumerate(results): + print("\nSearch result for {}th vector: ".format(i)) + for j, res in enumerate(result): + print("Top {}: {}".format(j, res)) + + # release memory + milvus_client.release_collection(_COLLECTION_NAME) + + # drop collection index + milvus_client.drop_index(_COLLECTION_NAME,index_name=_VECTOR_FIELD_NAME) + print("\nDrop index sucessfully") + + # drop collection + milvus_client.drop_collection(_COLLECTION_NAME) + print("\nDrop collection: {}".format(_COLLECTION_NAME)) + + +if __name__ == '__main__': + main() diff --git a/examples/cert/example_tls2.py b/examples/cert/example_tls2.py new file mode 100644 index 000000000..572bf561b --- /dev/null +++ b/examples/cert/example_tls2.py @@ -0,0 +1,126 @@ +import random + +from pymilvus import ( + MilvusClient, + FieldSchema, CollectionSchema, DataType, + utility +) + +# This example shows how to: +# 1. connect to Milvus server +# 2. create a collection +# 3. insert entities +# 4. create index +# 5. search + + +_HOST = '127.0.0.1' +_PORT = '19530' +_URI = f"https://{_HOST}:{_PORT}" + +# Const names +_COLLECTION_NAME = 'demo' +_ID_FIELD_NAME = 'id_field' +_VECTOR_FIELD_NAME = 'float_vector_field' + +# Vector parameters +_DIM = 128 +_INDEX_FILE_SIZE = 32 # max file size of stored index + +# Index parameters +_METRIC_TYPE = 'L2' +_INDEX_TYPE = 'IVF_FLAT' +_NLIST = 1024 +_NPROBE = 16 +_TOPK = 3 + + +def main(): + # create a connection + print(f"\nCreate connection...") + milvus_client = MilvusClient(uri=_URI, + secure=True, + client_pem_path="cert/client.pem", + client_key_path="cert/client.key", + ca_pem_path="cert/ca.pem", + server_name='localhost') + print(f"\nList connection:") + print(milvus_client._get_connection()) + + # drop collection if the collection exists + if milvus_client.has_collection(_COLLECTION_NAME): + milvus_client.drop_collection(_COLLECTION_NAME) + + # create collection + field1 = FieldSchema(name=_ID_FIELD_NAME, dtype=DataType.INT64, description="int64", is_primary=True) + field2 = FieldSchema(name=_VECTOR_FIELD_NAME, dtype=DataType.FLOAT_VECTOR, description="float vector", dim=_DIM, + is_primary=False) + schema = CollectionSchema(fields=[field1, field2], description="collection description") + milvus_client.create_collection(collection_name=_COLLECTION_NAME,schema=schema) + milvus_client.describe_collection(collection_name=_COLLECTION_NAME) + + print("\ncollection created:", _COLLECTION_NAME) + + # show collections + print("\nlist collections:") + print(milvus_client.list_collections()) + + # insert 10000 vectors with 128 dimension + data_dict = [] + for i in range(10000): + entity = { + "id_field": i+1, # Assuming id_field is the _COLLECTION_NAME of the field corresponding to the ID + "float_vector_field": [random.random() for _ in range(_DIM)] + } + data_dict.append(entity) + insert_result = milvus_client.insert(collection_name=_COLLECTION_NAME,data=data_dict) + + # get the number of entities + print(f"\nThe number of entity: {insert_result['insert_count']}") + + # create index + index_params = milvus_client.prepare_index_params() + + index_params.add_index( + field_name=_VECTOR_FIELD_NAME, + index_type=_INDEX_TYPE, + metric_type=_METRIC_TYPE, + params={"nlist": _NLIST} + ) + + milvus_client.create_index( + collection_name=_COLLECTION_NAME, + index_params=index_params + ) + print("\nCreated index") + + # load data to memory + milvus_client.load_collection(_COLLECTION_NAME) + vector = data_dict[1] + vectors = [vector["float_vector_field"]] + + # search + search_param = { + "anns_field": _VECTOR_FIELD_NAME, + "param": {"metric_type": _METRIC_TYPE, "params": {"nprobe": _NPROBE}}, + "expr": f"{_ID_FIELD_NAME} > 0"} + results = milvus_client.search(collection_name=_COLLECTION_NAME,data=vectors,limit= _TOPK,search_params=search_param) + for i, result in enumerate(results): + print("\nSearch result for {}th vector: ".format(i)) + for j, res in enumerate(result): + print("Top {}: {}".format(j, res)) + + # release memory + milvus_client.release_collection(_COLLECTION_NAME) + + # drop collection index + milvus_client.drop_index(_COLLECTION_NAME,index_name=_VECTOR_FIELD_NAME) + print("\nDrop index sucessfully") + + # drop collection + milvus_client.drop_collection(_COLLECTION_NAME) + print("\nDrop collection: {}".format(_COLLECTION_NAME)) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/examples/compact.py b/examples/compact.py new file mode 100644 index 000000000..5aaa73f2d --- /dev/null +++ b/examples/compact.py @@ -0,0 +1,83 @@ +import time +import numpy as np +from pymilvus import ( + MilvusClient, +) + +fmt = "\n=== {:30} ===\n" +dim = 8 +collection_name = "hello_milvus" +milvus_client = MilvusClient("http://localhost:19530") + +has_collection = milvus_client.has_collection(collection_name, timeout=5) +if has_collection: + milvus_client.drop_collection(collection_name) +milvus_client.create_collection(collection_name, dim, consistency_level="Strong", metric_type="L2") + +rng = np.random.default_rng(seed=19530) +rows = [ + {"id": 1, "vector": rng.random((1, dim))[0], "a": 100}, + {"id": 2, "vector": rng.random((1, dim))[0], "b": 200}, + {"id": 3, "vector": rng.random((1, dim))[0], "c": 300}, + {"id": 4, "vector": rng.random((1, dim))[0], "d": 400}, + {"id": 5, "vector": rng.random((1, dim))[0], "e": 500}, + {"id": 6, "vector": rng.random((1, dim))[0], "f": 600}, +] + +print(fmt.format("Start inserting entities")) +insert_result = milvus_client.insert(collection_name, rows) +print(fmt.format("Inserting entities done")) +print(insert_result) + +upsert_ret = milvus_client.upsert(collection_name, {"id": 2 , "vector": rng.random((1, dim))[0], "g": 100}) +print(upsert_ret) + +print(fmt.format("Start flush")) +milvus_client.flush(collection_name) +print(fmt.format("flush done")) + +result = milvus_client.query(collection_name, "", output_fields = ["count(*)"]) +print(f"final entities in {collection_name} is {result[0]['count(*)']}") + +rows = [ + {"id": 7, "vector": rng.random((1, dim))[0], "g": 700}, + {"id": 8, "vector": rng.random((1, dim))[0], "h": 800}, + {"id": 9, "vector": rng.random((1, dim))[0], "i": 900}, + {"id": 10, "vector": rng.random((1, dim))[0], "j": 1000}, + {"id": 11, "vector": rng.random((1, dim))[0], "k": 1100}, + {"id": 12, "vector": rng.random((1, dim))[0], "l": 1200}, +] + +print(fmt.format("Start inserting entities")) +insert_result = milvus_client.insert(collection_name, rows) +print(fmt.format("Inserting entities done")) +print(insert_result) + +print(fmt.format("Start flush")) +milvus_client.flush(collection_name) +print(fmt.format("flush done")) + +result = milvus_client.query(collection_name, "", output_fields = ["count(*)"]) +print(f"final entities in {collection_name} is {result[0]['count(*)']}") + +print(fmt.format("Start compact")) +job_id = milvus_client.compact(collection_name) +print(f"job_id:{job_id}") + +cnt = 0 +state = milvus_client.get_compaction_state(job_id) +while (state != "Completed" and cnt < 10): + time.sleep(1.0) + state = milvus_client.get_compaction_state(job_id) + print(f"compaction state: {state}") + cnt += 1 + +if state == "Completed": + print(fmt.format("compact done")) +else: + print(fmt.format("compact timeout")) + +result = milvus_client.query(collection_name, "", output_fields = ["count(*)"]) +print(f"final entities in {collection_name} is {result[0]['count(*)']}") + +milvus_client.drop_collection(collection_name) diff --git a/examples/multithreading_hello_milvus.py b/examples/concurrency/multithreading_hello_milvus.py similarity index 100% rename from examples/multithreading_hello_milvus.py rename to examples/concurrency/multithreading_hello_milvus.py diff --git a/examples/milvus_client/customize_schema.py b/examples/customize_schema.py similarity index 100% rename from examples/milvus_client/customize_schema.py rename to examples/customize_schema.py diff --git a/examples/milvus_client/customize_schema_auto_id.py b/examples/customize_schema_auto_id.py similarity index 100% rename from examples/milvus_client/customize_schema_auto_id.py rename to examples/customize_schema_auto_id.py diff --git a/examples/bfloat16_example.py b/examples/datatypes/bfloat16_example.py similarity index 100% rename from examples/bfloat16_example.py rename to examples/datatypes/bfloat16_example.py diff --git a/examples/binary_example.py b/examples/datatypes/binary_example.py similarity index 100% rename from examples/binary_example.py rename to examples/datatypes/binary_example.py diff --git a/examples/dynamic_field.py b/examples/datatypes/dynamic_field.py similarity index 100% rename from examples/dynamic_field.py rename to examples/datatypes/dynamic_field.py diff --git a/examples/example_str.py b/examples/datatypes/example_str.py similarity index 100% rename from examples/example_str.py rename to examples/datatypes/example_str.py diff --git a/examples/float16_example.py b/examples/datatypes/float16_example.py similarity index 100% rename from examples/float16_example.py rename to examples/datatypes/float16_example.py diff --git a/examples/fuzzy_match.py b/examples/datatypes/fuzzy_match.py similarity index 100% rename from examples/fuzzy_match.py rename to examples/datatypes/fuzzy_match.py diff --git a/examples/datatypes/hello_bm25.py b/examples/datatypes/hello_bm25.py new file mode 100644 index 000000000..9a3739e96 --- /dev/null +++ b/examples/datatypes/hello_bm25.py @@ -0,0 +1,215 @@ +# hello_bm25.py demonstrates how to insert raw data only into Milvus and perform +# sparse vector based ANN search using BM25 algorithm. +# 1. connect to Milvus +# 2. create collection +# 3. insert data +# 4. create index +# 5. search, query, and filtering search on entities +# 6. delete entities by PK +# 7. drop collection +import time + +from pymilvus import ( + connections, + utility, + FieldSchema, CollectionSchema, Function, DataType, FunctionType, + Collection, +) + +fmt = "\n=== {:30} ===\n" +search_latency_fmt = "search latency = {:.4f}s" + +################################################################################# +# 1. connect to Milvus +# Add a new connection alias `default` for Milvus server in `localhost:19530` +print(fmt.format("start connecting to Milvus")) +connections.connect("default", host="localhost", port="19530") + +has = utility.has_collection("hello_bm25") +print(f"Does collection hello_bm25 exist in Milvus: {has}") + +################################################################################# +# 2. create collection +# We're going to create a collection with 2 explicit fields and a function. +# +-+------------+------------+------------------+------------------------------+ +# | | field name | field type | other attributes | field description | +# +-+------------+------------+------------------+------------------------------+ +# |1| "id" | INT64 | is_primary=True | "primary field" | +# | | | | auto_id=False | | +# +-+------------+------------+------------------+------------------------------+ +# |2| "document" | VarChar | | "raw text document" | +# +-+------------+------------+------------------+------------------------------+ +# +# Function 'bm25' is used to convert raw text document to a sparse vector representation +# and store it in the 'sparse' field. +# +-+------------+-------------------+-----------+------------------------------+ +# | | field name | field type | other attr| field description | +# +-+------------+-------------------+-----------+------------------------------+ +# |3| "sparse" |SPARSE_FLOAT_VECTOR| | | +# +-+------------+-------------------+-----------+------------------------------+ +# +fields = [ + FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True), + FieldSchema(name="sparse", dtype=DataType.SPARSE_FLOAT_VECTOR), + FieldSchema(name="document", dtype=DataType.VARCHAR, max_length=1000, enable_analyzer=True), +] + +bm25_function = Function( + name="bm25", + function_type=FunctionType.BM25, + input_field_names=["document"], + output_field_names="sparse", +) + +schema = CollectionSchema(fields, "hello_bm25 demo") +schema.add_function(bm25_function) + +print(fmt.format("Create collection `hello_bm25`")) +hello_bm25 = Collection("hello_bm25", schema, consistency_level="Strong") + +################################################################################ +# 3. insert data +# We are going to insert 3 rows of data into `hello_bm25` +# Data to be inserted must be organized in fields. +# +# The insert() method returns: +# - either automatically generated primary keys by Milvus if auto_id=True in the schema; +# - or the existing primary key field from the entities if auto_id=False in the schema. + +print(fmt.format("Start inserting entities")) + +num_entities = 6 + +entities = [ + [f"This is a test document {i + hello_bm25.num_entities}" for i in range(num_entities)], +] + +insert_result = hello_bm25.insert(entities) +ids = insert_result.primary_keys + +time.sleep(3) + +hello_bm25.flush() +print(f"Number of entities in Milvus: {hello_bm25.num_entities}") # check the num_entities + +################################################################################ +# 4. create index +# We are going to create an index for hello_bm25 collection, here we simply +# uses AUTOINDEX so Milvus can use the default parameters. +print(fmt.format("Start Creating index AUTOINDEX")) +index = { + "index_type": "AUTOINDEX", + "metric_type": "BM25", +} + +hello_bm25.create_index("sparse", index) + +################################################################################ +# 5. search, query, and scalar filtering search +# After data were inserted into Milvus and indexed, you can perform: +# - search texts relevance by BM25 using sparse vector ANN search +# - query based on scalar filtering(boolean, int, etc.) +# - scalar filtering search. +# + +# Before conducting a search or a query, you need to load the data in `hello_bm25` into memory. +print(fmt.format("Start loading")) +hello_bm25.load() + +# ----------------------------------------------------------------------------- +print(fmt.format("Start searching based on BM25 texts relevance using sparse vector ANN search")) +texts_to_search = entities[-1][-2:] +print(fmt.format(f"texts_to_search: {texts_to_search}")) +search_params = { + "metric_type": "BM25", + "params": {}, +} + +start_time = time.time() +result = hello_bm25.search(texts_to_search, "sparse", search_params, limit=3, output_fields=["document"], consistency_level="Strong") +end_time = time.time() + +for hits, text in zip(result, texts_to_search): + print(f"result of text: {text}") + for hit in hits: + print(f"\thit: {hit}, document field: {hit.entity.get('document')}") +print(search_latency_fmt.format(end_time - start_time)) + +# ----------------------------------------------------------------------------- +# query based on scalar filtering(boolean, int, etc.) +filter_id = ids[num_entities // 2 - 1] +print(fmt.format(f"Start querying with `id > {filter_id}`")) + +start_time = time.time() +result = hello_bm25.query(expr=f"id > {filter_id}", output_fields=["document"]) +end_time = time.time() + +print(f"query result:\n-{result[0]}") +print(search_latency_fmt.format(end_time - start_time)) + +# ----------------------------------------------------------------------------- +# pagination +r1 = hello_bm25.query(expr=f"id > {filter_id}", limit=3, output_fields=["document"]) +r2 = hello_bm25.query(expr=f"id > {filter_id}", offset=1, limit=2, output_fields=["document"]) +print(f"query pagination(limit=3):\n\t{r1}") +print(f"query pagination(offset=1, limit=2):\n\t{r2}") + + +# ----------------------------------------------------------------------------- +# scalar filtering search +print(fmt.format(f"Start filtered searching with `id > {filter_id}`")) + +start_time = time.time() +result = hello_bm25.search(texts_to_search, "sparse", search_params, limit=3, expr=f"id > {filter_id}", output_fields=["document"]) +end_time = time.time() + +for hits, text in zip(result, texts_to_search): + print(f"result of text: {text}") + for hit in hits: + print(f"\thit: {hit}, document field: {hit.entity.get('document')}") +print(search_latency_fmt.format(end_time - start_time)) + +############################################################################### +# 6. delete entities by PK +# You can delete entities by their PK values using boolean expressions. + +expr = f'id in [{ids[0]}, {ids[1]}]' +print(fmt.format(f"Start deleting with expr `{expr}`")) + +result = hello_bm25.query(expr=expr, output_fields=["document"]) +print(f"query before delete by expr=`{expr}` -> result: \n- {result[0]}\n- {result[1]}\n") + +hello_bm25.delete(expr) + +result = hello_bm25.query(expr=expr, output_fields=["document"]) +print(f"query after delete by expr=`{expr}` -> result: {result}\n") + +############################################################################### +# 7. upsert by PK +# You can upsert data to replace existing data. +target_id = ids[2] +print(fmt.format(f"Start upsert operation for id {target_id}")) + +# Query before upsert +result_before = hello_bm25.query(expr=f"id == {target_id}", output_fields=["id", "document"]) +print(f"Query before upsert (id={target_id}):\n{result_before}") + +# Prepare data for upsert +upsert_data = [ + [target_id], + ["This is an upserted document for testing purposes."] +] + +# Perform upsert operation +hello_bm25.upsert(upsert_data) + +# Query after upsert +result_after = hello_bm25.query(expr=f"id == {target_id}", output_fields=["id", "document"]) +print(f"Query after upsert (id={target_id}):\n{result_after}") + + +############################################################################### +# 7. drop collection +# Finally, drop the hello_bm25 collection +print(fmt.format("Drop collection `hello_bm25`")) +utility.drop_collection("hello_bm25") diff --git a/examples/hello_milvus_array.py b/examples/datatypes/hello_milvus_array.py similarity index 100% rename from examples/hello_milvus_array.py rename to examples/datatypes/hello_milvus_array.py diff --git a/examples/hello_sparse.py b/examples/datatypes/hello_sparse.py similarity index 91% rename from examples/hello_sparse.py rename to examples/datatypes/hello_sparse.py index b6ac8f732..eab0c558e 100644 --- a/examples/hello_sparse.py +++ b/examples/datatypes/hello_sparse.py @@ -10,7 +10,7 @@ import time import numpy as np -from scipy.sparse import rand +import random from pymilvus import ( connections, utility, @@ -20,7 +20,9 @@ fmt = "=== {:30} ===" search_latency_fmt = "search latency = {:.4f}s" -num_entities, dim, density = 1000, 3000, 0.005 +num_entities, dim = 1000, 3000 +# non zero count of randomly generated sparse vectors +nnz = 30 def log(msg): print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + " " + msg) @@ -54,11 +56,16 @@ def log(msg): # insert log(fmt.format("Start creating entities to insert")) rng = np.random.default_rng(seed=19530) -# this step is so damn slow -matrix_csr = rand(num_entities, dim, density=density, format='csr') + +def generate_sparse_vector(dimension: int, non_zero_count: int) -> dict: + indices = random.sample(range(dimension), non_zero_count) + values = [random.random() for _ in range(non_zero_count)] + sparse_vector = {index: value for index, value in zip(indices, values)} + return sparse_vector + entities = [ rng.random(num_entities).tolist(), - matrix_csr, + [generate_sparse_vector(dim, nnz) for _ in range(num_entities)], ] log(fmt.format("Start inserting entities")) diff --git a/examples/example_tls1.py b/examples/example_tls1.py deleted file mode 100644 index 8879ac750..000000000 --- a/examples/example_tls1.py +++ /dev/null @@ -1,164 +0,0 @@ -import random - -from pymilvus import ( - connections, - FieldSchema, CollectionSchema, DataType, - Collection, - utility -) - -# This example shows how to: -# 1. connect to Milvus server -# 2. create a collection -# 3. insert entities -# 4. create index -# 5. search - - -_HOST = '127.0.0.1' -_PORT = '19530' - -# Const names -_COLLECTION_NAME = 'demo' -_ID_FIELD_NAME = 'id_field' -_VECTOR_FIELD_NAME = 'float_vector_field' - -# Vector parameters -_DIM = 128 -_INDEX_FILE_SIZE = 32 # max file size of stored index - -# Index parameters -_METRIC_TYPE = 'L2' -_INDEX_TYPE = 'IVF_FLAT' -_NLIST = 1024 -_NPROBE = 16 -_TOPK = 3 - - -# Create a Milvus connection -def create_connection(): - print(f"\nCreate connection...") - connections.connect(host=_HOST, port=_PORT, secure=True, server_pem_path="cert/server.pem", server_name="localhost") - print(f"\nList connections:") - print(connections.list_connections()) - - -# Create a collection named 'demo' -def create_collection(name, id_field, vector_field): - field1 = FieldSchema(name=id_field, dtype=DataType.INT64, description="int64", is_primary=True) - field2 = FieldSchema(name=vector_field, dtype=DataType.FLOAT_VECTOR, description="float vector", dim=_DIM, - is_primary=False) - schema = CollectionSchema(fields=[field1, field2], description="collection description") - collection = Collection(name=name, data=None, schema=schema) - print("\ncollection created:", name) - return collection - - -def has_collection(name): - return utility.has_collection(name) - - -# Drop a collection in Milvus -def drop_collection(name): - collection = Collection(name) - collection.drop() - print("\nDrop collection: {}".format(name)) - - -# List all collections in Milvus -def list_collections(): - print("\nlist collections:") - print(utility.list_collections()) - - -def insert(collection, num, dim): - data = [ - [i for i in range(num)], - [[random.random() for _ in range(dim)] for _ in range(num)], - ] - collection.insert(data) - return data[1] - - -def get_entity_num(collection): - print("\nThe number of entity:") - print(collection.num_entities) - - -def create_index(collection, filed_name): - index_param = { - "index_type": _INDEX_TYPE, - "params": {"nlist": _NLIST}, - "metric_type": _METRIC_TYPE} - collection.create_index(filed_name, index_param) - print("\nCreated index:\n{}".format(collection.index().params)) - - -def drop_index(collection): - collection.drop_index() - print("\nDrop index sucessfully") - - -def load_collection(collection): - collection.load() - - -def release_collection(collection): - collection.release() - - -def search(collection, vector_field, id_field, search_vectors): - search_param = { - "data": search_vectors, - "anns_field": vector_field, - "param": {"metric_type": _METRIC_TYPE, "params": {"nprobe": _NPROBE}}, - "limit": _TOPK, - "expr": "id_field > 0"} - results = collection.search(**search_param) - for i, result in enumerate(results): - print("\nSearch result for {}th vector: ".format(i)) - for j, res in enumerate(result): - print("Top {}: {}".format(j, res)) - - -def main(): - # create a connection - create_connection() - - # drop collection if the collection exists - if has_collection(_COLLECTION_NAME): - drop_collection(_COLLECTION_NAME) - - # create collection - collection = create_collection(_COLLECTION_NAME, _ID_FIELD_NAME, _VECTOR_FIELD_NAME) - - # show collections - list_collections() - - # insert 10000 vectors with 128 dimension - vectors = insert(collection, 10000, _DIM) - - # get the number of entities - get_entity_num(collection) - - # create index - create_index(collection, _VECTOR_FIELD_NAME) - - # load data to memory - load_collection(collection) - - # search - search(collection, _VECTOR_FIELD_NAME, _ID_FIELD_NAME, vectors[:3]) - - # release memory - release_collection(collection) - - # drop collection index - drop_index(collection) - - # drop collection - drop_collection(_COLLECTION_NAME) - - -if __name__ == '__main__': - main() diff --git a/examples/example_tls2.py b/examples/example_tls2.py deleted file mode 100644 index 211f503ab..000000000 --- a/examples/example_tls2.py +++ /dev/null @@ -1,166 +0,0 @@ -import random - -from pymilvus import ( - connections, - FieldSchema, CollectionSchema, DataType, - Collection, - utility -) - -# This example shows how to: -# 1. connect to Milvus server -# 2. create a collection -# 3. insert entities -# 4. create index -# 5. search - - -_HOST = '127.0.0.1' -_PORT = '19530' - -# Const names -_COLLECTION_NAME = 'demo' -_ID_FIELD_NAME = 'id_field' -_VECTOR_FIELD_NAME = 'float_vector_field' - -# Vector parameters -_DIM = 128 -_INDEX_FILE_SIZE = 32 # max file size of stored index - -# Index parameters -_METRIC_TYPE = 'L2' -_INDEX_TYPE = 'IVF_FLAT' -_NLIST = 1024 -_NPROBE = 16 -_TOPK = 3 - - -# Create a Milvus connection -def create_connection(): - print(f"\nCreate connection...") - connections.connect(host=_HOST, port=_PORT, secure=True, client_pem_path="cert/client.pem", - client_key_path="cert/client.key", - ca_pem_path="cert/ca.pem", server_name="localhost") - print(f"\nList connections:") - print(connections.list_connections()) - - -# Create a collection named 'demo' -def create_collection(name, id_field, vector_field): - field1 = FieldSchema(name=id_field, dtype=DataType.INT64, description="int64", is_primary=True) - field2 = FieldSchema(name=vector_field, dtype=DataType.FLOAT_VECTOR, description="float vector", dim=_DIM, - is_primary=False) - schema = CollectionSchema(fields=[field1, field2], description="collection description") - collection = Collection(name=name, data=None, schema=schema) - print("\ncollection created:", name) - return collection - - -def has_collection(name): - return utility.has_collection(name) - - -# Drop a collection in Milvus -def drop_collection(name): - collection = Collection(name) - collection.drop() - print("\nDrop collection: {}".format(name)) - - -# List all collections in Milvus -def list_collections(): - print("\nlist collections:") - print(utility.list_collections()) - - -def insert(collection, num, dim): - data = [ - [i for i in range(num)], - [[random.random() for _ in range(dim)] for _ in range(num)], - ] - collection.insert(data) - return data[1] - - -def get_entity_num(collection): - print("\nThe number of entity:") - print(collection.num_entities) - - -def create_index(collection, filed_name): - index_param = { - "index_type": _INDEX_TYPE, - "params": {"nlist": _NLIST}, - "metric_type": _METRIC_TYPE} - collection.create_index(filed_name, index_param) - print("\nCreated index:\n{}".format(collection.index().params)) - - -def drop_index(collection): - collection.drop_index() - print("\nDrop index sucessfully") - - -def load_collection(collection): - collection.load() - - -def release_collection(collection): - collection.release() - - -def search(collection, vector_field, id_field, search_vectors): - search_param = { - "data": search_vectors, - "anns_field": vector_field, - "param": {"metric_type": _METRIC_TYPE, "params": {"nprobe": _NPROBE}}, - "limit": _TOPK, - "expr": "id_field > 0"} - results = collection.search(**search_param) - for i, result in enumerate(results): - print("\nSearch result for {}th vector: ".format(i)) - for j, res in enumerate(result): - print("Top {}: {}".format(j, res)) - - -def main(): - # create a connection - create_connection() - - # drop collection if the collection exists - if has_collection(_COLLECTION_NAME): - drop_collection(_COLLECTION_NAME) - - # create collection - collection = create_collection(_COLLECTION_NAME, _ID_FIELD_NAME, _VECTOR_FIELD_NAME) - - # show collections - list_collections() - - # insert 10000 vectors with 128 dimension - vectors = insert(collection, 10000, _DIM) - - # get the number of entities - get_entity_num(collection) - - # create index - create_index(collection, _VECTOR_FIELD_NAME) - - # load data to memory - load_collection(collection) - - # search - search(collection, _VECTOR_FIELD_NAME, _ID_FIELD_NAME, vectors[:3]) - - # release memory - release_collection(collection) - - # drop collection index - drop_index(collection) - - # drop collection - drop_collection(_COLLECTION_NAME) - - -if __name__ == '__main__': - main() diff --git a/examples/flush.py b/examples/flush.py new file mode 100644 index 000000000..c192a6812 --- /dev/null +++ b/examples/flush.py @@ -0,0 +1,57 @@ +import time +import numpy as np +from pymilvus import ( + MilvusClient, +) + +fmt = "\n=== {:30} ===\n" +dim = 8 +collection_name = "hello_milvus" +milvus_client = MilvusClient("http://localhost:19530") + +has_collection = milvus_client.has_collection(collection_name, timeout=5) +if has_collection: + milvus_client.drop_collection(collection_name) +milvus_client.create_collection(collection_name, dim, consistency_level="Strong", metric_type="L2") + +rng = np.random.default_rng(seed=19530) +rows = [ + {"id": 1, "vector": rng.random((1, dim))[0], "a": 100}, + {"id": 2, "vector": rng.random((1, dim))[0], "b": 200}, + {"id": 3, "vector": rng.random((1, dim))[0], "c": 300}, + {"id": 4, "vector": rng.random((1, dim))[0], "d": 400}, + {"id": 5, "vector": rng.random((1, dim))[0], "e": 500}, + {"id": 6, "vector": rng.random((1, dim))[0], "f": 600}, +] + +print(fmt.format("Start inserting entities")) +insert_result = milvus_client.insert(collection_name, rows) +print(fmt.format("Inserting entities done")) +print(insert_result) + +upsert_ret = milvus_client.upsert(collection_name, {"id": 2 , "vector": rng.random((1, dim))[0], "g": 100}) +print(upsert_ret) + +print(fmt.format("Start flush")) +milvus_client.flush(collection_name) +print(fmt.format("flush done")) + + +result = milvus_client.query(collection_name, "", output_fields = ["count(*)"]) +print(f"final entities in {collection_name} is {result[0]['count(*)']}") + + +print(f"start to delete by specifying filter in collection {collection_name}") +delete_result = milvus_client.delete(collection_name, ids=[6]) +print(delete_result) + + +print(fmt.format("Start flush")) +milvus_client.flush(collection_name) +print(fmt.format("flush done")) + + +result = milvus_client.query(collection_name, "", output_fields = ["count(*)"]) +print(f"final entities in {collection_name} is {result[0]['count(*)']}") + +milvus_client.drop_collection(collection_name) diff --git a/examples/get_server_version.py b/examples/get_server_version.py new file mode 100644 index 000000000..16b8bc708 --- /dev/null +++ b/examples/get_server_version.py @@ -0,0 +1,8 @@ +from pymilvus import ( + MilvusClient, +) + +milvus_client = MilvusClient("http://localhost:19530") + +version = milvus_client.get_server_version() +print(f"server version: {version}") diff --git a/examples/example_gpu_brute_force.py b/examples/gpu_indx/example_gpu_brute_force.py similarity index 100% rename from examples/example_gpu_brute_force.py rename to examples/gpu_indx/example_gpu_brute_force.py diff --git a/examples/example_gpu_cagra.py b/examples/gpu_indx/example_gpu_cagra.py similarity index 100% rename from examples/example_gpu_cagra.py rename to examples/gpu_indx/example_gpu_cagra.py diff --git a/examples/hybrid_search.py b/examples/hybrid_search.py index 02e85343a..28ae0b309 100644 --- a/examples/hybrid_search.py +++ b/examples/hybrid_search.py @@ -1,9 +1,7 @@ import numpy as np from pymilvus import ( - connections, - utility, - FieldSchema, CollectionSchema, DataType, - Collection, + MilvusClient, + DataType, AnnSearchRequest, RRFRanker, WeightedRanker, ) @@ -11,23 +9,26 @@ search_latency_fmt = "search latency = {:.4f}s" num_entities, dim = 3000, 8 -print(fmt.format("start connecting to Milvus")) -connections.connect("default", host="localhost", port="19530") +collection_name = "hello_milvus" +milvus_client = MilvusClient("http://localhost:19530") -has = utility.has_collection("hello_milvus") -print(f"Does collection hello_milvus exist in Milvus: {has}") +has_collection = milvus_client.has_collection(collection_name, timeout=5) +if has_collection: + milvus_client.drop_collection(collection_name) -fields = [ - FieldSchema(name="pk", dtype=DataType.VARCHAR, is_primary=True, auto_id=False, max_length=100), - FieldSchema(name="random", dtype=DataType.DOUBLE), - FieldSchema(name="embeddings", dtype=DataType.FLOAT_VECTOR, dim=dim), - FieldSchema(name="embeddings2", dtype=DataType.FLOAT_VECTOR, dim=dim) -] +schema = milvus_client.create_schema(auto_id=False, description="hello_milvus is the simplest demo to introduce the APIs") +schema.add_field("pk", DataType.VARCHAR, is_primary=True, max_length=100) +schema.add_field("random", DataType.DOUBLE) +schema.add_field("embeddings", DataType.FLOAT_VECTOR, dim=dim) +schema.add_field("embeddings2", DataType.FLOAT_VECTOR, dim=dim) -schema = CollectionSchema(fields, "hello_milvus is the simplest demo to introduce the APIs") +index_params = milvus_client.prepare_index_params() +index_params.add_index(field_name = "embeddings", index_type = "IVF_FLAT", metric_type="L2", nlist=128) +index_params.add_index(field_name = "embeddings2",index_type = "IVF_FLAT", metric_type="L2", nlist=128) print(fmt.format("Create collection `hello_milvus`")) -hello_milvus = Collection("hello_milvus", schema, consistency_level="Strong", num_shards = 4) + +milvus_client.create_collection(collection_name, schema=schema, index_params=index_params, consistency_level="Strong") print(fmt.format("Start inserting entities")) rng = np.random.default_rng(seed=19530) @@ -39,29 +40,19 @@ rng.random((num_entities, dim)), # field embeddings2, supports numpy.ndarray and list ] -insert_result = hello_milvus.insert(entities) +rows = [ {"pk": entities[0][i], "random": entities[1][i], "embeddings": entities[2][i], "embeddings2": entities[3][i]} for i in range (num_entities)] -hello_milvus.flush() -print(f"Number of entities in Milvus: {hello_milvus.num_entities}") # check the num_entities +insert_result = milvus_client.insert(collection_name, rows) -print(fmt.format("Start Creating index IVF_FLAT")) -index = { - "index_type": "IVF_FLAT", - "metric_type": "L2", - "params": {"nlist": 128}, -} - -hello_milvus.create_index("embeddings", index) -hello_milvus.create_index("embeddings2", index) print(fmt.format("Start loading")) -hello_milvus.load() +milvus_client.load_collection(collection_name) field_names = ["embeddings", "embeddings2"] +field_names = ["embeddings"] req_list = [] nq = 1 -weights = [0.2, 0.3] default_limit = 5 vectors_to_search = [] @@ -77,15 +68,8 @@ req = AnnSearchRequest(**search_param) req_list.append(req) -hybrid_res = hello_milvus.hybrid_search(req_list, WeightedRanker(*weights), default_limit, output_fields=["random"]) - -print("rank by WightedRanker") -for hits in hybrid_res: - for hit in hits: - print(f" hybrid search hit: {hit}") - print("rank by RRFRanker") -hybrid_res = hello_milvus.hybrid_search(req_list, RRFRanker(), default_limit, output_fields=["random"]) +hybrid_res = milvus_client.hybrid_search(collection_name, req_list, RRFRanker(), default_limit, output_fields=["random"]) for hits in hybrid_res: for hit in hits: print(f" hybrid search hit: {hit}") diff --git a/examples/hybrid_search/hello_hybrid_bm25.py b/examples/hybrid_search/hello_hybrid_bm25.py new file mode 100644 index 000000000..20dc46016 --- /dev/null +++ b/examples/hybrid_search/hello_hybrid_bm25.py @@ -0,0 +1,178 @@ +# A demo showing hybrid semantic search with dense and full text search with BM25 +# using Milvus. +# +# You can optionally choose to use the BGE-M3 model to embed the text as dense +# vectors, or simply use random generated vectors as an example. +# +# You can also use the BGE CrossEncoder model to rerank the search results. +# +# Note that the full text search feature is only available in Milvus 2.4.0 or +# higher version. Make sure you follow https://milvus.io/docs/install_standalone-docker.md +# to set up the latest version of Milvus in your local environment. + +# To connect to Milvus server, you need the python client library called pymilvus. +# To use BGE-M3 model, you need to install the optional `model` module in pymilvus. +# You can get them by simply running the following commands: +# +# pip install pymilvus +# pip install pymilvus[model] + +# If true, use BGE-M3 model to generate dense vectors. +# If false, use random numbers to compose dense vectors. +use_bge_m3 = False +# If true, the search result will be reranked using BGE CrossEncoder model. +use_reranker = False + +# The overall steps are as follows: +# 1. embed the text as dense and sparse vectors +# 2. setup a Milvus collection to store the dense and sparse vectors +# 3. insert the data to Milvus +# 4. search and inspect the result! +import random +import string +import numpy as np + +from pymilvus import ( + utility, + FieldSchema, + CollectionSchema, + DataType, + Collection, + AnnSearchRequest, + RRFRanker, + connections, + Function, + FunctionType, +) + +# 1. prepare a small corpus to search +docs = [ + "Artificial intelligence was founded as an academic discipline in 1956.", + "Alan Turing was the first person to conduct substantial research in AI.", + "Born in Maida Vale, London, Turing was raised in southern England.", +] +# add some randomly generated texts +docs.extend( + [ + " ".join( + "".join(random.choice(string.ascii_lowercase) for _ in range(random.randint(1, 8))) + for _ in range(10) + ) + for _ in range(1000) + ] +) +query = "Who started AI research?" + + +def random_embedding(texts): + rng = np.random.default_rng() + return { + "dense": np.random.rand(len(texts), 768), + } + + +dense_dim = 768 +ef = random_embedding + +if use_bge_m3: + # BGE-M3 model is included in the optional `model` module in pymilvus, to + # install it, simply run "pip install pymilvus[model]". + from pymilvus.model.hybrid import BGEM3EmbeddingFunction + + ef = BGEM3EmbeddingFunction(use_fp16=False, device="cpu") + dense_dim = ef.dim["dense"] + +docs_embeddings = ef(docs) +query_embeddings = ef([query]) + +# 2. setup Milvus collection and index +connections.connect("default", host="localhost", port="19530") + +# Specify the data schema for the new Collection. +fields = [ + # Use auto generated id as primary key + FieldSchema(name="pk", dtype=DataType.VARCHAR, is_primary=True, auto_id=True, max_length=100), + # Store the original text to retrieve based on semantically distance + FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=512, enable_analyzer=True), + # We need a sparse vector field to perform full text search with BM25, + # but you don't need to provide data for it when inserting data. + FieldSchema(name="sparse_vector", dtype=DataType.SPARSE_FLOAT_VECTOR), + FieldSchema(name="dense_vector", dtype=DataType.FLOAT_VECTOR, dim=dense_dim), +] +functions = [ + Function( + name="bm25", + function_type=FunctionType.BM25, + input_field_names=["text"], + output_field_names="sparse_vector", + ) +] +schema = CollectionSchema(fields, "", functions=functions) +col_name = "hybrid_bm25_demo" +# Now we can create the new collection with above name and schema. +col = Collection(col_name, schema, consistency_level="Strong") + +# We need to create indices for the vector fields. The indices will be loaded +# into memory for efficient search. +sparse_index = {"index_type": "SPARSE_INVERTED_INDEX", "metric_type": "BM25"} +col.create_index("sparse_vector", sparse_index) +dense_index = {"index_type": "FLAT", "metric_type": "IP"} +col.create_index("dense_vector", dense_index) +col.load() + +# 3. insert text and sparse/dense vector representations into the collection +entities = [docs, docs_embeddings["dense"]] +col.insert(entities) +col.flush() + +# 4. search and inspect the result! +k = 2 # we want to get the top 2 docs closest to the query + +# Prepare the search requests for both full text search and dense vector search +full_text_search_params = {"metric_type": "BM25"} +# provide raw text query for full text search, while use the sparse vector as +# ANNS field +full_text_search_req = AnnSearchRequest([query], "sparse_vector", full_text_search_params, limit=k) +dense_search_params = {"metric_type": "IP"} +dense_req = AnnSearchRequest( + query_embeddings["dense"], "dense_vector", dense_search_params, limit=k +) + +# Search topK docs based on dense and sparse vectors and rerank with RRF. +res = col.hybrid_search( + [full_text_search_req, dense_req], rerank=RRFRanker(), limit=k, output_fields=["text"] +) + +# Currently Milvus only support 1 query in the same hybrid search request, so +# we inspect res[0] directly. In future release Milvus will accept batch +# hybrid search queries in the same call. +res = res[0] + +if use_reranker: + result_texts = [hit.fields["text"] for hit in res] + from pymilvus.model.reranker import BGERerankFunction + + bge_rf = BGERerankFunction(device="cpu") + # rerank the results using BGE CrossEncoder model + results = bge_rf(query, result_texts, top_k=2) + for hit in results: + print(f"text: {hit.text} distance {hit.score}") +else: + for hit in res: + print(f'text: {hit.fields["text"]} distance {hit.distance}') + +# If you used both BGE-M3 and the reranker, you should see the following: +# text: Alan Turing was the first person to conduct substantial research in AI. distance 0.9306981017573297 +# text: Artificial intelligence was founded as an academic discipline in 1956. distance 0.03217001154515051 +# +# If you used only BGE-M3, you should see the following: +# text: Alan Turing was the first person to conduct substantial research in AI. distance 0.032786883413791656 +# text: Artificial intelligence was founded as an academic discipline in 1956. distance 0.016129031777381897 + +# In this simple example the reranker yields the same result as the embedding based hybrid search, but in more complex +# scenarios the reranker can provide more accurate results. + +# If you used random vectors, the result will be different each time you run the script. + +# Drop the collection to clean up the data. +utility.drop_collection(col_name) diff --git a/examples/hello_hybrid_sparse_dense.py b/examples/hybrid_search/hello_hybrid_sparse_dense.py similarity index 100% rename from examples/hello_hybrid_sparse_dense.py rename to examples/hybrid_search/hello_hybrid_sparse_dense.py diff --git a/examples/hybrid_search/hybrid_search.py b/examples/hybrid_search/hybrid_search.py new file mode 100644 index 000000000..6a13045f0 --- /dev/null +++ b/examples/hybrid_search/hybrid_search.py @@ -0,0 +1,93 @@ +import numpy as np +from pymilvus import ( + connections, + utility, + FieldSchema, CollectionSchema, DataType, + Collection, + AnnSearchRequest, RRFRanker, WeightedRanker, +) + +fmt = "\n=== {:30} ===\n" +search_latency_fmt = "search latency = {:.4f}s" +num_entities, dim = 3000, 8 + +print(fmt.format("start connecting to Milvus")) +connections.connect("default", host="localhost", port="19530") + +has = utility.has_collection("hello_milvus") +print(f"Does collection hello_milvus exist in Milvus: {has}") +if has: + utility.drop_collection("hello_milvus") + +fields = [ + FieldSchema(name="pk", dtype=DataType.VARCHAR, is_primary=True, auto_id=False, max_length=100), + FieldSchema(name="random", dtype=DataType.DOUBLE), + FieldSchema(name="embeddings", dtype=DataType.FLOAT_VECTOR, dim=dim), + FieldSchema(name="embeddings2", dtype=DataType.FLOAT_VECTOR, dim=dim) +] + +schema = CollectionSchema(fields, "hello_milvus is the simplest demo to introduce the APIs") + +print(fmt.format("Create collection `hello_milvus`")) +hello_milvus = Collection("hello_milvus", schema, consistency_level="Strong", num_shards = 4) + +print(fmt.format("Start inserting entities")) +rng = np.random.default_rng(seed=19530) +entities = [ + # provide the pk field because `auto_id` is set to False + [str(i) for i in range(num_entities)], + rng.random(num_entities).tolist(), # field random, only supports list + rng.random((num_entities, dim)), # field embeddings, supports numpy.ndarray and list + rng.random((num_entities, dim)), # field embeddings2, supports numpy.ndarray and list +] + +insert_result = hello_milvus.insert(entities) + +hello_milvus.flush() +print(f"Number of entities in Milvus: {hello_milvus.num_entities}") # check the num_entities + +print(fmt.format("Start Creating index IVF_FLAT")) +index = { + "index_type": "IVF_FLAT", + "metric_type": "L2", + "params": {"nlist": 128}, +} + +hello_milvus.create_index("embeddings", index) +hello_milvus.create_index("embeddings2", index) + +print(fmt.format("Start loading")) +hello_milvus.load() + +field_names = ["embeddings", "embeddings2"] + +req_list = [] +nq = 1 +weights = [0.2, 0.3] +default_limit = 5 +vectors_to_search = [] + +for i in range(len(field_names)): + # 4. generate search data + vectors_to_search = rng.random((nq, dim)) + search_param = { + "data": vectors_to_search, + "anns_field": field_names[i], + "param": {"metric_type": "L2"}, + "limit": default_limit, + "expr": "random > 0.5"} + req = AnnSearchRequest(**search_param) + req_list.append(req) + +hybrid_res = hello_milvus.hybrid_search(req_list, WeightedRanker(*weights), default_limit, output_fields=["random"]) + +print("rank by WightedRanker") +for hits in hybrid_res: + for hit in hits: + print(f" hybrid search hit: {hit}") + +print("rank by RRFRanker") +hybrid_res = hello_milvus.hybrid_search(req_list, RRFRanker(), default_limit, output_fields=["random"]) +for hits in hybrid_res: + for hit in hits: + print(f" hybrid search hit: {hit}") diff --git a/examples/milvus_client/index.py b/examples/index.py similarity index 100% rename from examples/milvus_client/index.py rename to examples/index.py diff --git a/examples/milvus_client/index_params.py b/examples/index_params.py similarity index 100% rename from examples/milvus_client/index_params.py rename to examples/index_params.py diff --git a/examples/milvus_client/partition.py b/examples/milvus_client/partition.py deleted file mode 100644 index 7466c034a..000000000 --- a/examples/milvus_client/partition.py +++ /dev/null @@ -1,85 +0,0 @@ -import time -import numpy as np -from pymilvus import ( - MilvusClient, -) - -fmt = "\n=== {:30} ===\n" -dim = 8 -collection_name = "hello_milvus" -milvus_client = MilvusClient("http://localhost:19530") - -has_collection = milvus_client.has_collection(collection_name, timeout=5) -if has_collection: - milvus_client.drop_collection(collection_name) -milvus_client.create_collection(collection_name, dim, consistency_level="Strong", metric_type="L2") - -print(fmt.format(" all collections ")) -print(milvus_client.list_collections()) - -print(fmt.format(f"schema of collection {collection_name}")) -print(milvus_client.describe_collection(collection_name)) - -rng = np.random.default_rng(seed=19530) - -milvus_client.create_partition(collection_name, partition_name = "p1") -milvus_client.insert(collection_name, {"id": 1, "vector": rng.random((1, dim))[0], "a": 100}, partition_name = "p1") -milvus_client.insert(collection_name, {"id": 2, "vector": rng.random((1, dim))[0], "b": 200}, partition_name = "p1") -milvus_client.insert(collection_name, {"id": 3, "vector": rng.random((1, dim))[0], "c": 300}, partition_name = "p1") - -milvus_client.create_partition(collection_name, partition_name = "p2") -milvus_client.insert(collection_name, {"id": 4, "vector": rng.random((1, dim))[0], "e": 400}, partition_name = "p2") -milvus_client.insert(collection_name, {"id": 5, "vector": rng.random((1, dim))[0], "f": 500}, partition_name = "p2") -milvus_client.insert(collection_name, {"id": 6, "vector": rng.random((1, dim))[0], "g": 600}, partition_name = "p2") - -has_p1 = milvus_client.has_partition(collection_name, "p1") -print("has partition p1", has_p1) - -has_p3 = milvus_client.has_partition(collection_name, "p3") -print("has partition p3", has_p3) - -partitions = milvus_client.list_partitions(collection_name) -print("partitions:", partitions) - -milvus_client.release_collection(collection_name) -milvus_client.load_partitions(collection_name, partition_names =["p1", "p2"]) - -print(fmt.format("Start search in partiton p1")) -vectors_to_search = rng.random((1, dim)) -result = milvus_client.search(collection_name, vectors_to_search, limit=3, output_fields=["pk", "a", "b"], partition_names = ["p1"]) -for hits in result: - for hit in hits: - print(f"hit: {hit}") - -milvus_client.release_partitions(collection_name, partition_names = ["p1"]) -milvus_client.drop_partition(collection_name, partition_name = "p1", timeout = 2.0) -print("successfully drop partition p1") - -try: - milvus_client.drop_partition(collection_name, partition_name = "p2", timeout = 2.0) -except Exception as e: - print(f"cacthed {e}") - -has_p1 = milvus_client.has_partition(collection_name, "p1") -print("has partition of p1:", has_p1) - -print(fmt.format("Start query by specifying primary keys")) -query_results = milvus_client.query(collection_name, ids=[2]) -assert len(query_results) == 0 - -print(fmt.format("Start query by specifying primary keys")) -query_results = milvus_client.query(collection_name, ids=[4]) -print(query_results[0]) - -print(fmt.format("Start query by specifying filtering expression")) -query_results = milvus_client.query(collection_name, filter= "f == 500") -for ret in query_results: - print(ret) - -print(fmt.format(f"Start search with retrieve serveral fields.")) -result = milvus_client.search(collection_name, vectors_to_search, limit=3, output_fields=["pk", "a", "b"]) -for hits in result: - for hit in hits: - print(f"hit: {hit}") - -milvus_client.drop_collection(collection_name) diff --git a/examples/hello_model.py b/examples/milvus_model/hello_model.py similarity index 100% rename from examples/hello_model.py rename to examples/milvus_model/hello_model.py diff --git a/examples/milvus_client/non_ascii_encode.py b/examples/non_ascii_encode.py similarity index 100% rename from examples/milvus_client/non_ascii_encode.py rename to examples/non_ascii_encode.py diff --git a/examples/old_style_example.py b/examples/old_style_example.py deleted file mode 100644 index 205bde80c..000000000 --- a/examples/old_style_example.py +++ /dev/null @@ -1,152 +0,0 @@ -import random - -from pymilvus import Milvus, DataType - -# This example shows how to: -# 1. connect to Milvus server -# 2. create a collection -# 3. insert entities -# 4. create index -# 5. search - -_HOST = '127.0.0.1' -_PORT = '19530' - -# Const names -_COLLECTION_NAME = 'demo' -_ID_FIELD_NAME = 'id_field' -_VECTOR_FIELD_NAME = 'float_vector_field' - -# Vector parameters -_DIM = 128 -_INDEX_FILE_SIZE = 32 # max file size of stored index - -# Index parameters -_METRIC_TYPE = 'L2' -_INDEX_TYPE = 'IVF_FLAT' -_NLIST = 1024 -_NPROBE = 16 -_TOPK = 10 - -# Create milvus client instance -milvus = Milvus(_HOST, _PORT) - - -def create_collection(name): - id_field = { - "name": _ID_FIELD_NAME, - "type": DataType.INT64, - "auto_id": True, - "is_primary": True, - } - vector_field = { - "name": _VECTOR_FIELD_NAME, - "type": DataType.FLOAT_VECTOR, - "params": {"dim": _DIM}, - } - fields = {"fields": [id_field, vector_field]} - - milvus.create_collection(collection_name=name, fields=fields) - print("collection created:", name) - - -def drop_collection(name): - if milvus.has_collection(name): - milvus.drop_collection(name) - print("collection dropped:", name) - - -def list_collections(): - collections = milvus.list_collections() - print("list collection:") - print(collections) - - -def get_collection_stats(name): - stats = milvus.get_collection_stats(name) - print("collection stats:") - print(stats) - - -def insert(name, num, dim): - vectors = [[random.random() for _ in range(dim)] for _ in range(num)] - entities = [ - {"name": _VECTOR_FIELD_NAME, "type": DataType.FLOAT_VECTOR, "values": vectors}, - ] - ids = milvus.insert(name, entities) - return ids, vectors - - -def flush(name): - milvus.flush([name]) - - -def create_index(name, field_name): - index_param = { - "metric_type": _METRIC_TYPE, - "index_type": _INDEX_TYPE, - "params": {"nlist": _NLIST} - } - milvus.create_index(name, field_name, index_param) - print("Create index: {}".format(index_param)) - - -def drop_index(name, field_name): - milvus.drop_index(name, field_name) - print("Drop index:", field_name) - - -def load_collection(name): - milvus.load_collection(name) - - -def release_collection(name): - milvus.release_collection(name) - - -def search(name, vector_field, search_vectors, ids): - nq = len(search_vectors) - search_params = {"metric_type": _METRIC_TYPE, "params": {"nprobe": _NPROBE}} - results = milvus.search(name, search_vectors, vector_field, param=search_params, limit=_TOPK) - for i in range(nq): - if results[i][0].distance == 0.0 or results[i][0].id == ids[0]: - print("OK! search results: ", results[i][0].entity) - else: - print("FAIL! search results: ", results[i][0].entity) - - -def main(): - name = _COLLECTION_NAME - vector_field = _VECTOR_FIELD_NAME - - drop_collection(name) - create_collection(name) - - # show collections - list_collections() - - # generate 10000 vectors with 128 dimension - ids, vectors = insert(name, 10000, _DIM) - - # flush - flush(name) - - # show row_count - get_collection_stats(name) - - # create index - create_index(name, vector_field) - - # load - load_collection(name) - - # search - search(name, vector_field, vectors[:10], ids) - - drop_index(name, vector_field) - release_collection(name) - drop_collection(name) - - -if __name__ == '__main__': - main() diff --git a/examples/old_style_example_index.py b/examples/old_style_example_index.py deleted file mode 100644 index 3ca3487c6..000000000 --- a/examples/old_style_example_index.py +++ /dev/null @@ -1,155 +0,0 @@ -""" -This is an example of creating index - -We will be using films.csv file, You can obtain it from here -(https://raw.githubusercontent.com/milvus-io/pymilvus/0.3.0/examples/films.csv) -There are 4 coloumns in films.csv, they are `id`, `title`, `release_year` and `embedding`, all -the data are from MovieLens `ml-latest-small` data except id and embedding, those two columns are fake values. - -We will be using `films.csv` to demenstrate how can we build index and search by index on Milvus. -We assuming you have read `example.py` and have a basic conception about how to communicate with Milvus using -pymilvus - -This example is runable for Milvus(0.11.x) and pymilvus(0.3.x). -""" -import csv -import random -from pprint import pprint - -from pymilvus import Milvus, DataType - -_HOST = '127.0.0.1' -_PORT = '19530' -client = Milvus(_HOST, _PORT) - -collection_name = 'demo_index' -if collection_name in client.list_collections(): - client.drop_collection(collection_name) - -collection_param = { - "fields": [ - {"name": "id", "type": DataType.INT64, "is_primary": True}, - {"name": "release_year", "type": DataType.INT64}, - {"name": "embedding", "type": DataType.FLOAT_VECTOR, "params": {"dim": 8}}, - ], -} - -client.create_collection(collection_name, collection_param) - -# ------ -# Basic create index: -# Now that we have a collection in Milvus with `segment_row_limit` 4096, we can create index or -# insert entities. -# -# We can execute `create_index` BEFORE we insert any entites or AFTER. However Milvus won't actually -# start build index task if the segment row count is smaller than `segment_row_limit`. So if we want -# to make Milvus build index right away, we need to insert number of entities larger than -# `segment_row_limit` -# -# We are going to use data in `films.csv` so you can checkout the structure. And we need to group -# data with same fields together, so here is a example of how we obtain the data in files and transfer -# them into what we need. -# ------ - -ids = [] # ids -titles = [] # titles -release_years = [] # release year -embeddings = [] # embeddings -films = [] -with open('films.csv', 'r') as csvfile: - reader = csv.reader(csvfile) - films = [film for film in reader] - -for film in films: - ids.append(int(film[0])) - titles.append(film[1]) - release_years.append(int(film[2])) - embeddings.append(list(map(float, film[3][1:][:-1].split(',')))) - -hybrid_entities = [ - {"name": "id", "values": ids, "type": DataType.INT64}, - {"name": "release_year", "values": release_years, "type": DataType.INT64}, - {"name": "embedding", "values": embeddings, "type": DataType.FLOAT_VECTOR}, -] - -# ------ -# Basic insert: -# After preparing the data, we are going to insert them into our collection. -# The number of films inserted should be 8657. -# ------ -ids = client.insert(collection_name, hybrid_entities) - -client.flush([collection_name]) -after_flush_counts = client.get_collection_stats(collection_name) -print(" > There are {} films in collection `{}` after flush".format(after_flush_counts, collection_name)) - -# ------ -# Basic create index: -# Now that we have insert all the films into Milvus, we are going to build index with these datas. -# -# While build index, we have to indicate which `field` to build index for, the `index_type`, -# `metric_type` and params for the specific index type. In our case, we want to build a `IVF_FLAT` -# index, so the specific params are "nlist". See pymilvus documentation -# (https://milvus-io.github.io/milvus-sdk-python/pythondoc/v0.3.0/index.html) for `index_type` we -# support and the params accordingly -# -# If there are already index for a collection and you run `create_index` with different params the -# older index will be replaced by new one. -# ------ -client.create_index(collection_name, "embedding", - {"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}}) - -# ------ -# Basic create index: -# We can get the detail of the index by `describe_collection` -# ------ -info = client.describe_collection(collection_name) -pprint(info) - -# ------ -# Basic load collection: -# Before search, we need to load collection data into memory. -# ------ -client.load_collection(collection_name) - -# ------ -# Basic search with expressions: -# If we want to use index, the specific index params need to be provided, in our case, the "params" -# should be "nprobe", if no "params" given, Milvus will complain about it and raise a exception. -# ------ -embedding2search = [[random.random() for _ in range(8)] for _ in range(1)] -search_param = { - "data": embedding2search, - "anns_field": "embedding", - "param": {"metric_type": "L2", "params": {"nprobe": 8}}, - "limit": 3, - "output_fields": ["release_year"], - "expression": "release_year in [1995, 2002]", -} - -# ------ -# Basic hybrid search entities -# ------ -results = client.search(collection_name, **search_param) -for entities in results: - for topk_film in entities: - current_entity = topk_film.entity - print("==") - print(f"- id: {topk_film.id}") - print(f"- title: {titles[topk_film.id]}") - print(f"- distance: {topk_film.distance}") - print(f"- release_year: {current_entity.release_year}") - -# ------ -# Basic delete index: -# You can drop index we create -# ------ -client.drop_index(collection_name, "embedding") - -if collection_name in client.list_collections(): - client.drop_collection(collection_name) - -# ------ -# Summary: -# Now we've went through some basic build index operations, hope it's helpful! -# ------ diff --git a/examples/old_style_example_str.py b/examples/old_style_example_str.py deleted file mode 100644 index 83ec354ae..000000000 --- a/examples/old_style_example_str.py +++ /dev/null @@ -1,163 +0,0 @@ -import random - -from pymilvus import Milvus, DataType - -# This example shows how to: -# 1. connect to Milvus server -# 2. create a collection -# 3. insert entities -# 4. create index -# 5. search - -_HOST = '127.0.0.1' -_PORT = '19530' - -# Const names -_COLLECTION_NAME = 'demo' -_ID_FIELD_NAME = 'id_field' -_VECTOR_FIELD_NAME = 'float_vector_field' - -# Vector parameters -_DIM = 128 -_INDEX_FILE_SIZE = 32 # max file size of stored index - -# Index parameters -_METRIC_TYPE = 'L2' -_INDEX_TYPE = 'IVF_FLAT' -_NLIST = 1024 -_NPROBE = 16 -_TOPK = 10 - -# string -_STR_FIELD_NAME = "str" -_MAX_LEN = 1024 - -# Create milvus client instance -milvus = Milvus(_HOST, _PORT) - - -def create_collection(name): - id_field = { - "name": _ID_FIELD_NAME, - "type": DataType.INT64, - "auto_id": True, - "is_primary": True, - } - vector_field = { - "name": _VECTOR_FIELD_NAME, - "type": DataType.FLOAT_VECTOR, - "params": {"dim": _DIM}, - } - str_field = { - "name": _STR_FIELD_NAME, - "type": DataType.VARCHAR, - "params": {"max_length": _MAX_LEN}, - } - fields = {"fields": [id_field, vector_field, str_field]} - - milvus.create_collection(collection_name=name, fields=fields) - print("collection created:", name) - - -def drop_collection(name): - if milvus.has_collection(name): - milvus.drop_collection(name) - print("collection dropped:", name) - - -def list_collections(): - collections = milvus.list_collections() - print("list collection:") - print(collections) - - -def get_collection_stats(name): - stats = milvus.get_collection_stats(name) - print("collection stats:") - print(stats) - - -def insert(name, num, dim): - vectors = [[random.random() for _ in range(dim)] for _ in range(num)] - strs = [str(random.random()) for _ in range(num)] - entities = [ - {"name": _VECTOR_FIELD_NAME, "type": DataType.FLOAT_VECTOR, "values": vectors}, - {"name": _STR_FIELD_NAME, "type": DataType.VARCHAR, "values": strs}, - ] - ids = milvus.insert(name, entities) - return ids, vectors - - -def flush(name): - milvus.flush([name]) - - -def create_index(name, field_name): - index_param = { - "metric_type": _METRIC_TYPE, - "index_type": _INDEX_TYPE, - "params": {"nlist": _NLIST} - } - milvus.create_index(name, field_name, index_param) - print("Create index: {}".format(index_param)) - - -def drop_index(name, field_name): - milvus.drop_index(name, field_name) - print("Drop index:", field_name) - - -def load_collection(name): - milvus.load_collection(name) - - -def release_collection(name): - milvus.release_collection(name) - - -def search(name, vector_field, search_vectors, ids): - nq = len(search_vectors) - search_params = {"metric_type": _METRIC_TYPE, "params": {"nprobe": _NPROBE}} - results = milvus.search(name, search_vectors, vector_field, param=search_params, limit=_TOPK) - for i in range(nq): - if results[i][0].distance == 0.0 or results[i][0].id == ids[0]: - print("OK! search results: ", results[i][0].entity) - else: - print("FAIL! search results: ", results[i][0].entity) - - -def main(): - name = _COLLECTION_NAME - vector_field = _VECTOR_FIELD_NAME - - drop_collection(name) - create_collection(name) - - # show collections - list_collections() - - # generate 10000 vectors with 128 dimension - ids, vectors = insert(name, 10000, _DIM) - - # flush - flush(name) - - # show row_count - get_collection_stats(name) - - # create index - create_index(name, vector_field) - - # load - load_collection(name) - - # search - search(name, vector_field, vectors[:10], ids) - - drop_index(name, vector_field) - release_collection(name) - drop_collection(name) - - -if __name__ == '__main__': - main() diff --git a/examples/old_style_query.py b/examples/old_style_query.py deleted file mode 100644 index 0a031cd9f..000000000 --- a/examples/old_style_query.py +++ /dev/null @@ -1,62 +0,0 @@ -import random - -from pymilvus import Milvus, DataType - -if __name__ == "__main__": - c = Milvus("localhost", "19530") - - collection_name = f"test_{random.randint(10000, 99999)}" - - c.create_collection(collection_name, {"fields": [ - { - "name": "f1", - "type": DataType.FLOAT_VECTOR, - "metric_type": "L2", - "params": {"dim": 4}, - "indexes": [{"metric_type": "L2"}] - }, - { - "name": "age", - "type": DataType.FLOAT, - }, - { - "name": "id", - "type": DataType.INT64, - "auto_id": True, - "is_primary": True, - } - ], - }, orm=True) - - assert c.has_collection(collection_name) - - ids = c.insert(collection_name, [ - {"name": "f1", "type": DataType.FLOAT_VECTOR, "values": [[1.1, 2.2, 3.3, 4.4], [5.5, 6.6, 7.7, 8.8]]}, - {"name": "age", "type": DataType.FLOAT, "values": [3.45, 8.9]} - ], orm=True) - - c.flush([collection_name]) - - c.load_collection(collection_name) - - ############################################################# - search_params = {"metric_type": "L2", "params": {"nprobe": 1}} - - results = c.search(collection_name, [[1.1, 2.2, 3.3, 4.4]], - "f1", param=search_params, limit=2, output_fields=["id"]) - - print("search results: ", results[0][0].entity, " + ", results[0][1].entity) - # - # print("Test entity.get: ", results[0][0].entity.get("age")) - # print("Test entity.value_of_field: ", results[0][0].entity.value_of_field("age")) - # print("Test entity.fields: ", results[0][0].entity.fields) - ############################################################# - - ids_expr = ",".join(str(x) for x in ids.primary_keys) - - expr = "id in [ " + ids_expr + " ] " - - print(expr) - - res = c.query(collection_name, expr) - print(res) diff --git a/examples/collection.py b/examples/orm/collection.py similarity index 100% rename from examples/collection.py rename to examples/orm/collection.py diff --git a/examples/database.py b/examples/orm/database.py similarity index 85% rename from examples/database.py rename to examples/orm/database.py index 32015bb22..ca7a0164a 100644 --- a/examples/database.py +++ b/examples/orm/database.py @@ -117,27 +117,38 @@ def collection_read_write(collection, db_name): # create collection within default col1_db1 = create_collection("col1_db1", "default") + db1Name = "db1" # create db1 - if "db1" not in db.list_database(): + if db1Name not in db.list_database(): print("\ncreate database: db1") - db.create_database(db_name="db1") + db.create_database(db_name=db1Name, properties={"key1":"value1"}) + db_info = db.describe_database(db_name=db1Name) + print(db_info) # use database db1 - db.using_database(db_name="db1") + db.using_database(db_name=db1Name) # create collection within default - col2_db1 = create_collection("col1_db1", "db1") + col2_db1 = create_collection("col1_db1", db1Name) # verify read and write - collection_read_write(col2_db1, "db1") + collection_read_write(col2_db1, db1Name) # list collections within db1 print("\nlist collections of database db1:") print(utility.list_collections()) + + # set properties of db1 + db_info = db.describe_database(db_name=db1Name) + print(db_info) + print("\nset properties of db1:") + db.set_properties(db_name=db1Name, properties={"key": "value"}) + db_info = db.describe_database(db_name=db1Name) + print(db_info) print("\ndrop collection: col1_db2 from db1") col2_db1.drop() print("\ndrop database: db1") - db.drop_database(db_name="db1") + db.drop_database(db_name=db1Name) # list database print("\nlist databases:") diff --git a/examples/example.py b/examples/orm/example.py similarity index 100% rename from examples/example.py rename to examples/orm/example.py diff --git a/examples/example_group_by.py b/examples/orm/example_group_by.py similarity index 100% rename from examples/example_group_by.py rename to examples/orm/example_group_by.py diff --git a/examples/example_index.py b/examples/orm/example_index.py similarity index 100% rename from examples/example_index.py rename to examples/orm/example_index.py diff --git a/examples/hello_cost.py b/examples/orm/hello_cost.py similarity index 100% rename from examples/hello_cost.py rename to examples/orm/hello_cost.py diff --git a/examples/hello_milvus.ipynb b/examples/orm/hello_milvus.ipynb similarity index 100% rename from examples/hello_milvus.ipynb rename to examples/orm/hello_milvus.ipynb diff --git a/examples/hello_milvus.py b/examples/orm/hello_milvus.py similarity index 99% rename from examples/hello_milvus.py rename to examples/orm/hello_milvus.py index 795db825d..bd9c0a020 100644 --- a/examples/hello_milvus.py +++ b/examples/orm/hello_milvus.py @@ -23,7 +23,7 @@ ################################################################################# # 1. connect to Milvus # Add a new connection alias `default` for Milvus server in `localhost:19530` -# Actually the "default" alias is a buildin in PyMilvus. +# Actually the "default" alias is built-in to PyMilvus. # If the address of Milvus is the same as `localhost:19530`, you can omit all # parameters and call the method as: `connections.connect()`. # diff --git a/examples/hello_milvus_delete.py b/examples/orm/hello_milvus_delete.py similarity index 100% rename from examples/hello_milvus_delete.py rename to examples/orm/hello_milvus_delete.py diff --git a/examples/orm/hello_text_embedding.py b/examples/orm/hello_text_embedding.py new file mode 100644 index 000000000..9400bbd29 --- /dev/null +++ b/examples/orm/hello_text_embedding.py @@ -0,0 +1,124 @@ +# hello_text_embedding.py demonstrates how to insert raw data only into Milvus and perform +# dense vector based ANN search using TextEmbedding. +# 1. connect to Milvus +# 2. create collection +# 3. insert data +# 4. create index +# 5. search +# 6. drop collection +import time + +from pymilvus import ( + connections, + utility, + FieldSchema, CollectionSchema, Function, DataType, FunctionType, + Collection, +) + +fmt = "\n=== {:30} ===\n" +search_latency_fmt = "search latency = {:.4f}s" + +################################################################################# +# 1. connect to Milvus +# Add a new connection alias `default` for Milvus server in `localhost:19530` +print(fmt.format("start connecting to Milvus")) +connections.connect("default", host="localhost", port="19530") + +collection_name = "text_embedding" + +has = utility.has_collection(collection_name) +print(f"Does collection {collection_name} exist in Milvus: {has}") + +################################################################################# +# 2. create collection +# We're going to create a collection with 2 explicit fields and a function. +# +-+------------+------------+------------------+------------------------------+ +# | | field name | field type | other attributes | field description | +# +-+------------+------------+------------------+------------------------------+ +# |1| "id" | INT64 | is_primary=True | "primary field" | +# | | | | auto_id=False | | +# +-+------------+------------+------------------+------------------------------+ +# |2| "document" | VarChar | | "raw text document" | +# +-+------------+------------+------------------+------------------------------+ +# +# Function 'text embedding' is used to convert raw text document to a dense vector representation +# and store it in the 'dense' field. +# +-+------------+-------------------+-----------+------------------------------+ +# | | field name | field type | other attr| field description | +# +-+------------+-------------------+-----------+------------------------------+ +# |3| "dense" |FLOAT_VECTOR | dim=1536 | | +# +-+------------+-------------------+-----------+------------------------------+ +# +fields = [ + FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True), + FieldSchema(name="dense", dtype=DataType.FLOAT_VECTOR, dim=1536), + FieldSchema(name="document", dtype=DataType.VARCHAR, max_length=1000), +] + +text_embedding_function = Function( + name="openai", + function_type=FunctionType.TEXTEMBEDDING, + input_field_names=["document"], + output_field_names="dense", + params={ + "provider": "openai", + "model_name": "text-embedding-3-small", + } +) + +schema = CollectionSchema(fields, "hello_text_embedding demo") +schema.add_function(text_embedding_function) + +hello_text_embedding = Collection(collection_name, schema, consistency_level="Strong") + +print(fmt.format("Start inserting documents")) +docs = [ + "Artificial intelligence was founded as an academic discipline in 1956.", + "Alan Turing was the first person to conduct substantial research in AI.", + "Born in Maida Vale, London, Turing was raised in southern England.", +] + +insert_result = hello_text_embedding.insert([docs]) +ids = insert_result.primary_keys + +################################################################################ +# 4. create index +# We are going to create an index for collection, here we simply +# uses AUTOINDEX so Milvus can use the default parameters. +print(fmt.format("Start Creating index AUTOINDEX")) +index = { + "index_type": "AUTOINDEX", + "metric_type": "IP", +} + +hello_text_embedding.create_index("dense", index) + +################################################################################ +# 5. search, query, and scalar filtering search +# After data were inserted into Milvus and indexed, you can perform: +# - search texts relevance by TextEmbedding using dense vector ANN search + +# Before conducting a search or a query, you need to load the data into memory. +print(fmt.format("Start loading")) +hello_text_embedding.load() + +# ----------------------------------------------------------------------------- +search_params = { + "metric_type": "IP", + "params": {"nprobe": 10}, +} +queries = ["When was artificial intelligence founded", + "Where was Alan Turing born?"] + +start_time = time.time() +result = hello_text_embedding.search(queries, "dense", search_params, limit=3, output_fields=["document"], consistency_level="Strong") +end_time = time.time() + +for hits, text in zip(result, queries): + print(f"result of text: {text}") + for hit in hits: + print(f"\thit: {hit}, document field: {hit.entity.get('document')}") +print(search_latency_fmt.format(end_time - start_time)) + +# Finally, drop the collection +utility.drop_collection(collection_name) diff --git a/examples/inverted_index_example.py b/examples/orm/inverted_index_example.py similarity index 100% rename from examples/inverted_index_example.py rename to examples/orm/inverted_index_example.py diff --git a/examples/iterator.py b/examples/orm/iterator.py similarity index 83% rename from examples/iterator.py rename to examples/orm/iterator.py index ed7d2bf54..f9fa496d5 100644 --- a/examples/iterator.py +++ b/examples/orm/iterator.py @@ -6,6 +6,7 @@ FieldSchema, CollectionSchema, DataType, Collection, ) +import logging HOST = "localhost" PORT = "19530" @@ -19,11 +20,32 @@ LIMIT = 5 NUM_ENTITIES = 1000 DIM = 8 -CLEAR_EXIST = False +CLEAR_EXIST = True +# Create a logger for the main script +log = logging.getLogger(__name__) +log.setLevel(logging.INFO) # Set the log level to INFO -def re_create_collection(skip_data_period: bool): - if not skip_data_period: +# Create a console handler and set its level to INFO +console_handler = logging.StreamHandler() +console_handler.setLevel(logging.INFO) + +# Create a formatter for the console output +formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') + +# Add the formatter to the handler +console_handler.setFormatter(formatter) + +# Add the handler to the logger (this will apply globally) +log.addHandler(console_handler) + +# Now, configure the root logger to apply to the entire app (including your package) +logging.getLogger().setLevel(logging.INFO) # Set the root logger level to INFO +logging.getLogger().addHandler(console_handler) # Attach the handler to the root logger + + +def re_create_collection(prepare_new_data: bool): + if prepare_new_data: if utility.has_collection(COLLECTION_NAME) and CLEAR_EXIST: utility.drop_collection(COLLECTION_NAME) print(f"dropped existed collection{COLLECTION_NAME}") @@ -95,7 +117,8 @@ def query_iterate_collection_no_offset(collection): query_iterator = collection.query_iterator(expr=expr, output_fields=[USER_ID, AGE], offset=0, batch_size=5, consistency_level=CONSISTENCY_LEVEL, - reduce_stop_for_best="false") + reduce_stop_for_best="false", print_iterator_cursor=False, + iterator_cp_file="/tmp/it_cp") no_best_ids: set = set({}) page_idx = 0 while True: @@ -113,7 +136,8 @@ def query_iterate_collection_no_offset(collection): print("best---------------------------") query_iterator = collection.query_iterator(expr=expr, output_fields=[USER_ID, AGE], offset=0, batch_size=5, consistency_level=CONSISTENCY_LEVEL, - reduce_stop_for_best="true") + reduce_stop_for_best="true", print_iterator_cursor=False, iterator_cp_file="/tmp/it_cp") + best_ids: set = set({}) page_idx = 0 while True: @@ -136,7 +160,7 @@ def query_iterate_collection_no_offset(collection): def query_iterate_collection_with_offset(collection): expr = f"10 <= {AGE} <= 14" query_iterator = collection.query_iterator(expr=expr, output_fields=[USER_ID, AGE], - offset=10, batch_size=50, consistency_level=CONSISTENCY_LEVEL) + offset=10, batch_size=50, consistency_level=CONSISTENCY_LEVEL, print_iterator_cursor=True) page_idx = 0 while True: res = query_iterator.next() @@ -153,7 +177,7 @@ def query_iterate_collection_with_offset(collection): def query_iterate_collection_with_limit(collection): expr = f"10 <= {AGE} <= 44" query_iterator = collection.query_iterator(expr=expr, output_fields=[USER_ID, AGE], - batch_size=80, limit=530, consistency_level=CONSISTENCY_LEVEL) + batch_size=80, limit=530, consistency_level=CONSISTENCY_LEVEL, print_iterator_cursor=True) page_idx = 0 while True: res = query_iterator.next() @@ -177,7 +201,7 @@ def search_iterator_collection(collection): "params": {"nprobe": 10, "radius": 1.0}, } search_iterator = collection.search_iterator(vectors_to_search, PICTURE, search_params, batch_size=500, - output_fields=[USER_ID]) + output_fields=[USER_ID], print_iterator_cursor=True) page_idx = 0 while True: res = search_iterator.next() @@ -201,7 +225,7 @@ def search_iterator_collection_with_limit(collection): "params": {"nprobe": 10, "radius": 1.0}, } search_iterator = collection.search_iterator(vectors_to_search, PICTURE, search_params, batch_size=200, limit=755, - output_fields=[USER_ID]) + output_fields=[USER_ID], print_iterator_cursor=True) page_idx = 0 while True: res = search_iterator.next() @@ -216,10 +240,10 @@ def search_iterator_collection_with_limit(collection): def main(): - skip_data_period = False + prepare_new_data = True connections.connect("default", host=HOST, port=PORT) - collection = re_create_collection(skip_data_period) - if not skip_data_period: + collection = re_create_collection(prepare_new_data) + if prepare_new_data: collection = prepare_data(collection) query_iterate_collection_no_offset(collection) query_iterate_collection_with_offset(collection) diff --git a/examples/orm/partition.py b/examples/orm/partition.py new file mode 100644 index 000000000..3c6bb30ce --- /dev/null +++ b/examples/orm/partition.py @@ -0,0 +1,148 @@ +# Copyright (C) 2019-2020 Zilliz. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under the License +# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express +# or implied. See the License for the specific language governing permissions and limitations under the License. + +from pymilvus import ( + connections, list_collections, has_partition, + FieldSchema, CollectionSchema, DataType, + Collection, Partition +) + +import random +import string + +default_dim = 128 +default_nb = 3000 +default_float_vec_field_name = "float_vector" +default_segment_row_limit = 1000 + + +all_index_types = [ + "FLAT", + "IVF_FLAT", + "IVF_SQ8", + # "IVF_SQ8_HYBRID", + "IVF_PQ", + "HNSW", + # "NSG", + "ANNOY", + "RHNSW_FLAT", + "RHNSW_PQ", + "RHNSW_SQ", + "BIN_FLAT", + "BIN_IVF_FLAT" +] + +default_index_params = [ + {"nlist": 128}, + {"nlist": 128}, + {"nlist": 128}, + # {"nlist": 128}, + {"nlist": 128, "m": 16, "nbits": 8}, + {"M": 48, "efConstruction": 500}, + # {"search_length": 50, "out_degree": 40, "candidate_pool_size": 100, "knng": 50}, + {"n_trees": 50}, + {"M": 48, "efConstruction": 500}, + {"M": 48, "efConstruction": 500, "PQM": 64}, + {"M": 48, "efConstruction": 500}, + {"nlist": 128}, + {"nlist": 128} +] + + +default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"} + + +def gen_default_fields(auto_id=True): + default_fields = [ + FieldSchema(name="count", dtype=DataType.INT64, is_primary=True), + FieldSchema(name="float", dtype=DataType.FLOAT), + FieldSchema(name=default_float_vec_field_name, dtype=DataType.FLOAT_VECTOR, dim=default_dim) + ] + default_schema = CollectionSchema(fields=default_fields, description="test collection", + segment_row_limit=default_segment_row_limit, auto_id=False) + return default_schema + + +def gen_data(nb): + entities = [ + [i for i in range(nb)], + [float(i) for i in range(nb)], + [[random.random() for _ in range(dim)] for _ in range(num)], + ] + return entities + + +def gen_unique_str(str_value=None): + prefix = "".join(random.choice(string.ascii_letters + string.digits) for _ in range(8)) + return "collection_" + prefix if str_value is None else str_value + "_" + prefix + + +def binary_support(): + return ["BIN_FLAT", "BIN_IVF_FLAT"] + + +def gen_simple_index(): + index_params = [] + for i in range(len(all_index_types)): + if all_index_types[i] in binary_support(): + continue + dic = {"index_type": all_index_types[i], "metric_type": "L2"} + dic.update({"params": default_index_params[i]}) + index_params.append(dic) + return index_params + + +def test_partition(): + connections.connect(alias="default") + print("create collection") + collection = Collection(name=gen_unique_str(), schema=gen_default_fields()) + print("create partition") + partition = Partition(collection, name=gen_unique_str()) + print(list_collections()) + assert has_partition(collection.name, partition.name) is True + + data = gen_data(default_nb) + print("insert data to partition") + res = partition.insert(data) + collection.flush() + print(res.insert_count) + assert partition.is_empty is False + assert partition.num_entities == default_nb + + print("start to create index") + index = { + "index_type": "IVF_FLAT", + "metric_type": "L2", + "params": {"nlist": 128}, + } + collection.create_index(default_float_vec_field_name, index) + + print("load partition") + partition.load() + topK = 5 + round_decimal = 3 + search_params = {"metric_type": "L2", "params": {"nprobe": 10}} + print("search partition") + res = partition.search(data[2][-2:], "float_vector", search_params, topK, "count > 100", round_decimal=round_decimal) + for hits in res: + for hit in hits: + print(hit) + + print("release partition") + partition.release() + print("drop partition") + partition.drop() + print("drop collection") + collection.drop() + + +if __name__ == "__main__": + test_partition() diff --git a/examples/resource_group.py b/examples/orm/resource_group.py similarity index 100% rename from examples/resource_group.py rename to examples/orm/resource_group.py diff --git a/examples/resource_group_declarative_api.py b/examples/orm/resource_group_declarative_api.py similarity index 100% rename from examples/resource_group_declarative_api.py rename to examples/orm/resource_group_declarative_api.py diff --git a/examples/role_and_privilege.py b/examples/orm/role_and_privilege.py similarity index 100% rename from examples/role_and_privilege.py rename to examples/orm/role_and_privilege.py diff --git a/examples/orm/search_with_template_expression.py b/examples/orm/search_with_template_expression.py new file mode 100644 index 000000000..95e8da818 --- /dev/null +++ b/examples/orm/search_with_template_expression.py @@ -0,0 +1,196 @@ +# hello_milvus.py demonstrates the basic operations of PyMilvus, a Python SDK of Milvus. +# 1. connect to Milvus +# 2. create collection +# 3. insert data +# 4. create index +# 5. search, query, and hybrid search on entities +# 6. delete entities by PK +# 7. drop collection +import time + +import numpy as np +from pymilvus import ( + connections, + utility, + FieldSchema, CollectionSchema, DataType, + Collection, +) + +fmt = "\n=== {:30} ===\n" +search_latency_fmt = "search latency = {:.4f}s" +num_entities, dim = 3000, 8 + +################################################################################# +# 1. connect to Milvus +# Add a new connection alias `default` for Milvus server in `localhost:19530` +# Actually the "default" alias is a buildin in PyMilvus. +# If the address of Milvus is the same as `localhost:19530`, you can omit all +# parameters and call the method as: `connections.connect()`. +# +# Note: the `using` parameter of the following methods is default to "default". +print(fmt.format("start connecting to Milvus")) +connections.connect("default", host="localhost", port="19530") + +has = utility.has_collection("hello_milvus") +print(f"Does collection hello_milvus exist in Milvus: {has}") + +################################################################################# +# 2. create collection +# We're going to create a collection with 3 fields. +# +-+------------+------------+------------------+------------------------------+ +# | | field name | field type | other attributes | field description | +# +-+------------+------------+------------------+------------------------------+ +# |1| "pk" | VarChar | is_primary=True | "primary field" | +# | | | | auto_id=False | | +# +-+------------+------------+------------------+------------------------------+ +# |2| "random" | Double | | "a double field" | +# +-+------------+------------+------------------+------------------------------+ +# |3|"embeddings"| FloatVector| dim=8 | "float vector with dim 8" | +# +-+------------+------------+------------------+------------------------------+ +fields = [ + FieldSchema(name="pk", dtype=DataType.VARCHAR, is_primary=True, auto_id=False, max_length=100), + FieldSchema(name="random", dtype=DataType.DOUBLE), + FieldSchema(name="embeddings", dtype=DataType.FLOAT_VECTOR, dim=dim) +] + +schema = CollectionSchema(fields, "hello_milvus is the simplest demo to introduce the APIs") + +print(fmt.format("Create collection `hello_milvus`")) +hello_milvus = Collection("hello_milvus", schema, consistency_level="Strong") + +################################################################################ +# 3. insert data +# We are going to insert 3000 rows of data into `hello_milvus` +# Data to be inserted must be organized in fields. +# +# The insert() method returns: +# - either automatically generated primary keys by Milvus if auto_id=True in the schema; +# - or the existing primary key field from the entities if auto_id=False in the schema. + +print(fmt.format("Start inserting entities")) +rng = np.random.default_rng(seed=19530) +entities = [ + # provide the pk field because `auto_id` is set to False + [str(i) for i in range(num_entities)], + rng.random(num_entities).tolist(), # field random, only supports list + rng.random((num_entities, dim), np.float32), # field embeddings, supports numpy.ndarray and list +] + +insert_result = hello_milvus.insert(entities) + +row = { + "pk": "19530", + "random": 0.5, + "embeddings": rng.random((1, dim), np.float32)[0] +} +hello_milvus.insert(row) + +hello_milvus.flush() +print(f"Number of entities in Milvus: {hello_milvus.num_entities}") # check the num_entities + +################################################################################ +# 4. create index +# We are going to create an IVF_FLAT index for hello_milvus collection. +# create_index() can only be applied to `FloatVector` and `BinaryVector` fields. +print(fmt.format("Start Creating index IVF_FLAT")) +index = { + "index_type": "IVF_FLAT", + "metric_type": "L2", + "params": {"nlist": 128}, +} + +hello_milvus.create_index("embeddings", index) + +################################################################################ +# 5. search, query, and hybrid search +# After data were inserted into Milvus and indexed, you can perform: +# - search based on vector similarity +# - query based on scalar filtering(boolean, int, etc.) +# - hybrid search based on vector similarity and scalar filtering. +# + +# Before conducting a search or a query, you need to load the data in `hello_milvus` into memory. +print(fmt.format("Start loading")) +hello_milvus.load() + +# ----------------------------------------------------------------------------- +# search based on vector similarity +print(fmt.format("Start searching based on vector similarity")) +vectors_to_search = entities[-1][-2:] +search_params = { + "metric_type": "L2", + "params": {"nprobe": 10}, +} + +exprs = { + "pk == {str}": {"str": "10"}, + "pk in {list}": {"list": ["1", "10", "100"]}, + "random > {target}": {"target": 5}, + "random <= {target}": {"target": 111.5}, + "{min} <= random < {max}": {"min": 0, "max": 9999}, +} + +for expr, expr_params in exprs.items(): + print(f"search with expression: {expr}") + start_time = time.time() + result = hello_milvus.search(vectors_to_search, "embeddings", search_params, limit=3, expr=expr, + output_fields=["random"], expr_params=expr_params) + end_time = time.time() + + for hits in result: + for hit in hits: + print(f"hit: {hit}, random field: {hit.entity.get('random')}") + print(search_latency_fmt.format(end_time - start_time)) + + # ----------------------------------------------------------------------------- + # query based on scalar filtering(boolean, int, etc.) + start_time = time.time() + result = hello_milvus.query(expr=expr, output_fields=["random", "embeddings"], expr_params=expr_params) + end_time = time.time() + + print(f"query result:\n-{result}") + print(search_latency_fmt.format(end_time - start_time)) + + # ----------------------------------------------------------------------------- + # pagination + r1 = hello_milvus.query(expr=expr, limit=4, output_fields=["random"], expr_params=expr_params) + r2 = hello_milvus.query(expr=expr, offset=1, limit=3, output_fields=["random"], expr_params=expr_params) + print(f"query pagination(limit=4):\n\t{r1}") + print(f"query pagination(offset=1, limit=3):\n\t{r2}") + + # ----------------------------------------------------------------------------- + # hybrid search + + start_time = time.time() + result = hello_milvus.search(vectors_to_search, "embeddings", search_params, limit=3, expr=expr, + output_fields=["random"], expr_params=expr_params) + end_time = time.time() + + for hits in result: + for hit in hits: + print(f"hit: {hit}, random field: {hit.entity.get('random')}") + print(search_latency_fmt.format(end_time - start_time)) + +############################################################################### +# 6. delete entities by PK +# You can delete entities by their PK values using boolean expressions. +ids = insert_result.primary_keys + +expr = "pk in {list}" +expr_params = {"list": [ids[0], ids[1]]} +print(fmt.format(f"Start deleting with expr `{expr}`")) + +result = hello_milvus.query(expr=expr, output_fields=["random", "embeddings"], expr_params=expr_params) +print(f"query before delete by expr=`{expr}` -> result: \n-{result[0]}\n-{result[1]}\n") + +hello_milvus.delete(expr, expr_params=expr_params) + +result = hello_milvus.query(expr=expr, output_fields=["random", "embeddings"], expr_params=expr_params) +print(f"query after delete by expr=`{expr}` -> result: {result}\n") + + +############################################################################### +# 7. drop collection +# Finally, drop the hello_milvus collection +print(fmt.format("Drop collection `hello_milvus`")) +utility.drop_collection("hello_milvus") diff --git a/examples/user.py b/examples/orm/user.py similarity index 100% rename from examples/user.py rename to examples/orm/user.py diff --git a/examples/partition.py b/examples/partition.py index 3c6bb30ce..7466c034a 100644 --- a/examples/partition.py +++ b/examples/partition.py @@ -1,148 +1,85 @@ -# Copyright (C) 2019-2020 Zilliz. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software distributed under the License -# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express -# or implied. See the License for the specific language governing permissions and limitations under the License. - +import time +import numpy as np from pymilvus import ( - connections, list_collections, has_partition, - FieldSchema, CollectionSchema, DataType, - Collection, Partition + MilvusClient, ) -import random -import string - -default_dim = 128 -default_nb = 3000 -default_float_vec_field_name = "float_vector" -default_segment_row_limit = 1000 - - -all_index_types = [ - "FLAT", - "IVF_FLAT", - "IVF_SQ8", - # "IVF_SQ8_HYBRID", - "IVF_PQ", - "HNSW", - # "NSG", - "ANNOY", - "RHNSW_FLAT", - "RHNSW_PQ", - "RHNSW_SQ", - "BIN_FLAT", - "BIN_IVF_FLAT" -] - -default_index_params = [ - {"nlist": 128}, - {"nlist": 128}, - {"nlist": 128}, - # {"nlist": 128}, - {"nlist": 128, "m": 16, "nbits": 8}, - {"M": 48, "efConstruction": 500}, - # {"search_length": 50, "out_degree": 40, "candidate_pool_size": 100, "knng": 50}, - {"n_trees": 50}, - {"M": 48, "efConstruction": 500}, - {"M": 48, "efConstruction": 500, "PQM": 64}, - {"M": 48, "efConstruction": 500}, - {"nlist": 128}, - {"nlist": 128} -] - - -default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"} - - -def gen_default_fields(auto_id=True): - default_fields = [ - FieldSchema(name="count", dtype=DataType.INT64, is_primary=True), - FieldSchema(name="float", dtype=DataType.FLOAT), - FieldSchema(name=default_float_vec_field_name, dtype=DataType.FLOAT_VECTOR, dim=default_dim) - ] - default_schema = CollectionSchema(fields=default_fields, description="test collection", - segment_row_limit=default_segment_row_limit, auto_id=False) - return default_schema - - -def gen_data(nb): - entities = [ - [i for i in range(nb)], - [float(i) for i in range(nb)], - [[random.random() for _ in range(dim)] for _ in range(num)], - ] - return entities - - -def gen_unique_str(str_value=None): - prefix = "".join(random.choice(string.ascii_letters + string.digits) for _ in range(8)) - return "collection_" + prefix if str_value is None else str_value + "_" + prefix - - -def binary_support(): - return ["BIN_FLAT", "BIN_IVF_FLAT"] - - -def gen_simple_index(): - index_params = [] - for i in range(len(all_index_types)): - if all_index_types[i] in binary_support(): - continue - dic = {"index_type": all_index_types[i], "metric_type": "L2"} - dic.update({"params": default_index_params[i]}) - index_params.append(dic) - return index_params - - -def test_partition(): - connections.connect(alias="default") - print("create collection") - collection = Collection(name=gen_unique_str(), schema=gen_default_fields()) - print("create partition") - partition = Partition(collection, name=gen_unique_str()) - print(list_collections()) - assert has_partition(collection.name, partition.name) is True - - data = gen_data(default_nb) - print("insert data to partition") - res = partition.insert(data) - collection.flush() - print(res.insert_count) - assert partition.is_empty is False - assert partition.num_entities == default_nb - - print("start to create index") - index = { - "index_type": "IVF_FLAT", - "metric_type": "L2", - "params": {"nlist": 128}, - } - collection.create_index(default_float_vec_field_name, index) - - print("load partition") - partition.load() - topK = 5 - round_decimal = 3 - search_params = {"metric_type": "L2", "params": {"nprobe": 10}} - print("search partition") - res = partition.search(data[2][-2:], "float_vector", search_params, topK, "count > 100", round_decimal=round_decimal) - for hits in res: - for hit in hits: - print(hit) - - print("release partition") - partition.release() - print("drop partition") - partition.drop() - print("drop collection") - collection.drop() - - -if __name__ == "__main__": - test_partition() +fmt = "\n=== {:30} ===\n" +dim = 8 +collection_name = "hello_milvus" +milvus_client = MilvusClient("http://localhost:19530") + +has_collection = milvus_client.has_collection(collection_name, timeout=5) +if has_collection: + milvus_client.drop_collection(collection_name) +milvus_client.create_collection(collection_name, dim, consistency_level="Strong", metric_type="L2") + +print(fmt.format(" all collections ")) +print(milvus_client.list_collections()) + +print(fmt.format(f"schema of collection {collection_name}")) +print(milvus_client.describe_collection(collection_name)) + +rng = np.random.default_rng(seed=19530) + +milvus_client.create_partition(collection_name, partition_name = "p1") +milvus_client.insert(collection_name, {"id": 1, "vector": rng.random((1, dim))[0], "a": 100}, partition_name = "p1") +milvus_client.insert(collection_name, {"id": 2, "vector": rng.random((1, dim))[0], "b": 200}, partition_name = "p1") +milvus_client.insert(collection_name, {"id": 3, "vector": rng.random((1, dim))[0], "c": 300}, partition_name = "p1") + +milvus_client.create_partition(collection_name, partition_name = "p2") +milvus_client.insert(collection_name, {"id": 4, "vector": rng.random((1, dim))[0], "e": 400}, partition_name = "p2") +milvus_client.insert(collection_name, {"id": 5, "vector": rng.random((1, dim))[0], "f": 500}, partition_name = "p2") +milvus_client.insert(collection_name, {"id": 6, "vector": rng.random((1, dim))[0], "g": 600}, partition_name = "p2") + +has_p1 = milvus_client.has_partition(collection_name, "p1") +print("has partition p1", has_p1) + +has_p3 = milvus_client.has_partition(collection_name, "p3") +print("has partition p3", has_p3) + +partitions = milvus_client.list_partitions(collection_name) +print("partitions:", partitions) + +milvus_client.release_collection(collection_name) +milvus_client.load_partitions(collection_name, partition_names =["p1", "p2"]) + +print(fmt.format("Start search in partiton p1")) +vectors_to_search = rng.random((1, dim)) +result = milvus_client.search(collection_name, vectors_to_search, limit=3, output_fields=["pk", "a", "b"], partition_names = ["p1"]) +for hits in result: + for hit in hits: + print(f"hit: {hit}") + +milvus_client.release_partitions(collection_name, partition_names = ["p1"]) +milvus_client.drop_partition(collection_name, partition_name = "p1", timeout = 2.0) +print("successfully drop partition p1") + +try: + milvus_client.drop_partition(collection_name, partition_name = "p2", timeout = 2.0) +except Exception as e: + print(f"cacthed {e}") + +has_p1 = milvus_client.has_partition(collection_name, "p1") +print("has partition of p1:", has_p1) + +print(fmt.format("Start query by specifying primary keys")) +query_results = milvus_client.query(collection_name, ids=[2]) +assert len(query_results) == 0 + +print(fmt.format("Start query by specifying primary keys")) +query_results = milvus_client.query(collection_name, ids=[4]) +print(query_results[0]) + +print(fmt.format("Start query by specifying filtering expression")) +query_results = milvus_client.query(collection_name, filter= "f == 500") +for ret in query_results: + print(ret) + +print(fmt.format(f"Start search with retrieve serveral fields.")) +result = milvus_client.search(collection_name, vectors_to_search, limit=3, output_fields=["pk", "a", "b"]) +for hits in result: + for hit in hits: + print(f"hit: {hit}") + +milvus_client.drop_collection(collection_name) diff --git a/examples/milvus_client/rbac.py b/examples/rbac.py similarity index 100% rename from examples/milvus_client/rbac.py rename to examples/rbac.py diff --git a/examples/milvus_client/simple.py b/examples/simple.py similarity index 100% rename from examples/milvus_client/simple.py rename to examples/simple.py diff --git a/examples/milvus_client/simple_auto_id.py b/examples/simple_auto_id.py similarity index 100% rename from examples/milvus_client/simple_auto_id.py rename to examples/simple_auto_id.py diff --git a/examples/milvus_client/simple_cost.py b/examples/simple_cost.py similarity index 92% rename from examples/milvus_client/simple_cost.py rename to examples/simple_cost.py index d8b35bf0d..c57e50481 100644 --- a/examples/milvus_client/simple_cost.py +++ b/examples/simple_cost.py @@ -7,9 +7,7 @@ fmt = "\n=== {:30} ===\n" dim = 8 collection_name = "hello_client_cost" -# milvus_client = MilvusClient("http://localhost:19530") -milvus_client = MilvusClient(uri="https://in01-20fa6a32462c074.aws-us-west-2.vectordb-uat3.zillizcloud.com:19541", - token="root:j6|y3/g$5Lq,a[TJ^ckphSMs{-F[&Jl)") +milvus_client = MilvusClient("http://localhost:19530") has_collection = milvus_client.has_collection(collection_name, timeout=5) if has_collection: diff --git a/examples/milvus_client/sparse.py b/examples/sparse.py similarity index 100% rename from examples/milvus_client/sparse.py rename to examples/sparse.py diff --git a/examples/text_embedding.py b/examples/text_embedding.py new file mode 100644 index 000000000..4667a29d5 --- /dev/null +++ b/examples/text_embedding.py @@ -0,0 +1,80 @@ +# hello_text_embedding.py demonstrates how to insert raw data only into Milvus and perform +# dense vector based ANN search using TextEmbedding. +# 1. connect to Milvus +# 2. create collection +# 3. insert data +# 4. create index +# 5. search +# 6. drop collection +import time + +from pymilvus import ( + MilvusClient, + utility, + FieldSchema, CollectionSchema, Function, DataType, FunctionType, + Collection, +) + +collection_name = "text_embedding" + +milvus_client = MilvusClient("http://localhost:19530") + +has_collection = milvus_client.has_collection(collection_name, timeout=5) +if has_collection: + milvus_client.drop_collection(collection_name) + +schema = milvus_client.create_schema() +schema.add_field("id", DataType.INT64, is_primary=True, auto_id=False) +schema.add_field("document", DataType.VARCHAR, max_length=9000) +schema.add_field("dense", DataType.FLOAT_VECTOR, dim=1536) + +text_embedding_function = Function( + name="openai", + function_type=FunctionType.TEXTEMBEDDING, + input_field_names=["document"], + output_field_names="dense", + params={ + "provider": "openai", + "model_name": "text-embedding-3-small", + } +) + +schema.add_function(text_embedding_function) + +index_params = milvus_client.prepare_index_params() +index_params.add_index( + field_name="dense", + index_name="dense_index", + index_type="AUTOINDEX", + metric_type="IP", +) + +ret = milvus_client.create_collection(collection_name, schema=schema, index_params=index_params, consistency_level="Strong") + +rows = [ + {"id": 1, "document": "Artificial intelligence was founded as an academic discipline in 1956."}, + {"id": 2, "document": "Alan Turing was the first person to conduct substantial research in AI."}, + {"id": 3, "document": "Born in Maida Vale, London, Turing was raised in southern England."}, +] + +insert_result = milvus_client.insert(collection_name, rows, progress_bar=True) + + +# ----------------------------------------------------------------------------- +search_params = { + "params": {"nprobe": 10}, +} +queries = ["When was artificial intelligence founded", + "Where was Alan Turing born?"] + +start_time = time.time() +result = milvus_client.search(collection_name, data=queries, anns_field="dense", search_params=search_params, limit=3, output_fields=["document"], consistency_level="Strong") +end_time = time.time() + +for hits, text in zip(result, queries): + print(f"result of text: {text}") + for hit in hits: + print(f"\thit: {hit}, document field: {hit.get('document')}") + +# Finally, drop the collection +milvus_client.drop_collection(collection_name) diff --git a/pymilvus/__init__.py b/pymilvus/__init__.py index a8201da42..21b5f99ed 100644 --- a/pymilvus/__init__.py +++ b/pymilvus/__init__.py @@ -10,22 +10,6 @@ # or implied. See the License for the specific language governing permissions and limitations under # the License. -from .bulk_writer.bulk_import import ( - bulk_import, - get_import_progress, - list_import_jobs, -) - -# bulk writer -from .bulk_writer.constants import ( - BulkFileType, -) -from .bulk_writer.local_bulk_writer import ( - LocalBulkWriter, -) -from .bulk_writer.remote_bulk_writer import ( - RemoteBulkWriter, -) from .client import __version__ from .client.abstract import AnnSearchRequest, Hit, Hits, RRFRanker, SearchResult, WeightedRanker from .client.asynch import SearchFuture @@ -34,6 +18,7 @@ from .client.types import ( BulkInsertState, DataType, + FunctionType, Group, IndexType, Replica, @@ -54,7 +39,7 @@ from .orm.index import Index from .orm.partition import Partition from .orm.role import Role -from .orm.schema import CollectionSchema, FieldSchema +from .orm.schema import CollectionSchema, FieldSchema, Function from .orm.utility import ( create_resource_group, create_user, @@ -117,6 +102,7 @@ "Group", "Shard", "FieldSchema", + "Function", "CollectionSchema", "SearchFuture", "MutationFuture", @@ -137,18 +123,13 @@ "Prepare", "Status", "DataType", + "FunctionType", "MilvusException", "__version__", "MilvusClient", "ResourceGroupInfo", "Connections", "IndexType", - "BulkFileType", - "LocalBulkWriter", - "RemoteBulkWriter", - "bulk_import", - "get_import_progress", - "list_import_jobs", "AnnSearchRequest", "RRFRanker", "WeightedRanker", diff --git a/pymilvus/bulk_writer/__init__.py b/pymilvus/bulk_writer/__init__.py index e69de29bb..507b0e02e 100644 --- a/pymilvus/bulk_writer/__init__.py +++ b/pymilvus/bulk_writer/__init__.py @@ -0,0 +1,28 @@ +from importlib.util import find_spec + +expected_pkgs = ["minio", "azure", "requests", "pyarrow"] + +missing = [pkg for pkg in expected_pkgs if find_spec(pkg) is None] + +if len(missing) > 0: + msg = f"Missing packages: {missing}. Please install bulk_writer by pip install pymilvus[bulk_writer] first" + raise ModuleNotFoundError(msg) + + +from .bulk_import import ( + bulk_import, + get_import_progress, + list_import_jobs, +) +from .constants import BulkFileType +from .local_bulk_writer import LocalBulkWriter +from .remote_bulk_writer import RemoteBulkWriter + +__all__ = [ + "BulkFileType", + "LocalBulkWriter", + "RemoteBulkWriter", + "bulk_import", + "get_import_progress", + "list_import_jobs", +] diff --git a/pymilvus/bulk_writer/buffer.py b/pymilvus/bulk_writer/buffer.py index e77723777..2935e575b 100644 --- a/pymilvus/bulk_writer/buffer.py +++ b/pymilvus/bulk_writer/buffer.py @@ -13,6 +13,7 @@ import json import logging from pathlib import Path +from typing import Optional import numpy as np import pandas as pd @@ -42,13 +43,17 @@ def __init__( self, schema: CollectionSchema, file_type: BulkFileType = BulkFileType.NUMPY, + config: Optional[dict] = None, ): self._buffer = {} self._fields = {} self._file_type = file_type + self._config = config or {} for field in schema.fields: if field.is_primary and field.auto_id: continue + if field.is_function_output: + continue self._buffer[field.name] = [] self._fields[field.name] = field @@ -120,6 +125,8 @@ def persist(self, local_path: str, **kwargs) -> list: return self._persist_json_rows(local_path, **kwargs) if self._file_type == BulkFileType.PARQUET: return self._persist_parquet(local_path, **kwargs) + if self._file_type == BulkFileType.CSV: + return self._persist_csv(local_path, **kwargs) self._throw(f"Unsupported file tpye: {self._file_type}") return [] @@ -149,7 +156,22 @@ def _persist_npy(self, local_path: str, **kwargs): str_arr.append(json.dumps(val)) self._buffer[k] = str_arr - arr = np.array(self._buffer[k], dtype=dt) + # currently, milvus server doesn't support numpy for sparse vector + if field_schema.dtype == DataType.SPARSE_FLOAT_VECTOR: + self._throw( + f"Failed to persist file {full_file_name}," + f" error: milvus doesn't support parsing sparse vectors from numpy file" + ) + + # special process for float16 vector, the self._buffer stores bytes for + # float16 vector, convert the bytes to uint8 array + if field_schema.dtype in {DataType.FLOAT16_VECTOR, DataType.BFLOAT16_VECTOR}: + a = [] + for b in self._buffer[k]: + a.append(np.frombuffer(b, dtype=dt).tolist()) + arr = np.array(a, dtype=dt) + else: + arr = np.array(self._buffer[k], dtype=dt) np.save(str(full_file_name), arr) except Exception as e: self._throw(f"Failed to persist file {full_file_name}, error: {e}") @@ -173,7 +195,18 @@ def _persist_json_rows(self, local_path: str, **kwargs): while row_index < row_count: row = {} for k, v in self._buffer.items(): - row[k] = v[row_index] + # special process for float16 vector, the self._buffer stores bytes for + # float16 vector, convert the bytes to float list + field_schema = self._fields[k] + if field_schema.dtype in {DataType.FLOAT16_VECTOR, DataType.BFLOAT16_VECTOR}: + dt = ( + np.dtype("bfloat16") + if (field_schema.dtype == DataType.BFLOAT16_VECTOR) + else np.float16 + ) + row[k] = np.frombuffer(v[row_index], dtype=dt).tolist() + else: + row[k] = v[row_index] rows.append(row) row_index = row_index + 1 @@ -196,21 +229,25 @@ def _persist_parquet(self, local_path: str, **kwargs): data = {} for k in self._buffer: field_schema = self._fields[k] - if field_schema.dtype == DataType.JSON: - # for JSON field, store as string array + if field_schema.dtype in {DataType.JSON, DataType.SPARSE_FLOAT_VECTOR}: + # for JSON and SPARSE_VECTOR field, store as string array str_arr = [] for val in self._buffer[k]: str_arr.append(json.dumps(val)) data[k] = pd.Series(str_arr, dtype=None) - elif field_schema.dtype == DataType.FLOAT_VECTOR: + elif field_schema.dtype in {DataType.BINARY_VECTOR, DataType.FLOAT_VECTOR}: arr = [] for val in self._buffer[k]: - arr.append(np.array(val, dtype=np.dtype("float32"))) + arr.append(np.array(val, dtype=NUMPY_TYPE_CREATOR[field_schema.dtype.name])) data[k] = pd.Series(arr) - elif field_schema.dtype == DataType.BINARY_VECTOR: + elif field_schema.dtype in {DataType.FLOAT16_VECTOR, DataType.BFLOAT16_VECTOR}: + # special process for float16 vector, the self._buffer stores bytes for + # float16 vector, convert the bytes to uint8 array arr = [] for val in self._buffer[k]: - arr.append(np.array(val, dtype=np.dtype("uint8"))) + arr.append( + np.frombuffer(val, dtype=NUMPY_TYPE_CREATOR[field_schema.dtype.name]) + ) data[k] = pd.Series(arr) elif field_schema.dtype == DataType.ARRAY: dt = NUMPY_TYPE_CREATOR[field_schema.element_type.name] @@ -237,10 +274,8 @@ def _persist_parquet(self, local_path: str, **kwargs): buffer_row_count = kwargs.get("buffer_row_count", 1) size_per_row = int(buffer_size / buffer_row_count) + 1 row_group_size = int(row_group_bytes / size_per_row) - if row_group_size < row_group_size_min: - row_group_size = row_group_size_min - if row_group_size > row_group_size_max: - row_group_size = row_group_size_max + row_group_size = max(row_group_size, row_group_size_min) + row_group_size = min(row_group_size, row_group_size_max) # write to Parquet file data_frame = pd.DataFrame(data=data) @@ -253,3 +288,74 @@ def _persist_parquet(self, local_path: str, **kwargs): f" row count: {buffer_row_count}, row group size: {row_group_size}" ) return [str(file_path)] + + def _persist_csv(self, local_path: str, **kwargs): + sep = self._config.get("sep", ",") + nullkey = self._config.get("nullkey", "") + + header = list(self._buffer.keys()) + data = pd.DataFrame(columns=header) + for k, v in self._buffer.items(): + field_schema = self._fields[k] + # When using df.to_csv(arr) to write non-scalar data, + # the repr function is used to convert the data to a string. + # if the value of arr is [1.0, 2.0], repr(arr) will change with the type of arr: + # when arr is a list, the output is '[1.0, 2.0]' + # when arr is a tuple, the output is '(1.0, 2.0)' + # when arr is a np.array, the output is '[1.0 2.0]' + # we needs the output to be '[1.0, 2.0]', consistent with the array format in json + # so 1. whether make sure that arr of type + # (BINARY_VECTOR, FLOAT_VECTOR, FLOAT16_VECTOR, BFLOAT16_VECTOR) is a LIST, + # 2. or convert arr into a string using json.dumps(arr) first and then add it to df + # I choose method 2 here + if field_schema.dtype in { + DataType.SPARSE_FLOAT_VECTOR, + DataType.BINARY_VECTOR, + DataType.FLOAT_VECTOR, + }: + arr = [] + for val in v: + arr.append(json.dumps(val)) + data[k] = pd.Series(arr, dtype=np.dtype("str")) + elif field_schema.dtype in {DataType.FLOAT16_VECTOR, DataType.BFLOAT16_VECTOR}: + # special process for float16 vector, the self._buffer stores bytes for + # float16 vector, convert the bytes to float list + dt = ( + np.dtype("bfloat16") + if (field_schema.dtype == DataType.BFLOAT16_VECTOR) + else np.dtype("float16") + ) + arr = [] + for val in v: + arr.append(json.dumps(np.frombuffer(val, dtype=dt).tolist())) + data[k] = pd.Series(arr, dtype=np.dtype("str")) + elif field_schema.dtype in { + DataType.JSON, + DataType.ARRAY, + }: + arr = [] + for val in v: + if val is None: + arr.append(nullkey) + else: + arr.append(json.dumps(val)) + data[k] = pd.Series(arr, dtype=np.dtype("str")) + elif field_schema.dtype in {DataType.BOOL}: + arr = [] + for val in v: + if val is not None: + arr.append("true" if val else "false") + data[k] = pd.Series(arr, dtype=np.dtype("str")) + else: + data[k] = pd.Series(v, dtype=NUMPY_TYPE_CREATOR[field_schema.dtype.name]) + + file_path = Path(local_path + ".csv") + try: + # pd.Series will convert None to np.nan, + # so we can use 'na_rep=nullkey' to replace NaN with nullkey + data.to_csv(file_path, sep=sep, na_rep=nullkey, index=False) + except Exception as e: + self._throw(f"Failed to persist file {file_path}, error: {e}") + + logger.info("Successfully persist file %s, row count: %s", file_path, len(data)) + return [str(file_path)] diff --git a/pymilvus/bulk_writer/bulk_import.py b/pymilvus/bulk_writer/bulk_import.py index c0169a9b9..1c110d230 100644 --- a/pymilvus/bulk_writer/bulk_import.py +++ b/pymilvus/bulk_writer/bulk_import.py @@ -12,7 +12,7 @@ import json import logging -from urllib.parse import urlparse +from typing import List, Optional import requests @@ -26,7 +26,7 @@ def _http_headers(api_key: str): return { "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) " "Chrome/17.0.963.56 Safari/535.11", - "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", + "Accept": "application/json", "Accept-Encodin": "gzip,deflate,sdch", "Accept-Languag": "en-US,en;q=0.5", "Authorization": f"Bearer {api_key}", @@ -40,7 +40,7 @@ def _throw(msg: str): def _handle_response(url: str, res: json): inner_code = res["code"] - if inner_code != 200: + if inner_code != 0: inner_message = res["message"] _throw(f"Failed to request url: {url}, code: {inner_code}, message: {inner_message}") @@ -78,48 +78,54 @@ def _get_request( ## bulkinsert RESTful api wrapper def bulk_import( url: str, - api_key: str, - object_url: str, - access_key: str, - secret_key: str, - cluster_id: str, collection_name: str, + files: Optional[List[List[str]]] = None, + object_url: str = "", + cluster_id: str = "", + api_key: str = "", + access_key: str = "", + secret_key: str = "", **kwargs, ) -> requests.Response: """call bulkinsert restful interface to import files Args: url (str): url of the server - object_url (str): data files url + collection_name (str): name of the target collection + partition_name (str): name of the target partition + files (list of list of str): The files that contain the data to import. + A sub-list contains a single JSON or Parquet file, or a set of Numpy files. + object_url (str): The URL of the object to import. + This URL should be accessible to the S3-compatible + object storage service, such as AWS S3, GCS, Azure blob storage. + cluster_id (str): id of a milvus instance(for cloud) + api_key (str): API key to authenticate your requests. access_key (str): access key to access the object storage secret_key (str): secret key to access the object storage - cluster_id (str): id of a milvus instance(for cloud) - collection_name (str): name of the target collection Returns: - json: response of the restful interface + response of the restful interface """ - up = urlparse(url) - if up.scheme.startswith("http"): - request_url = f"{url}/v1/vector/collections/import" - else: - request_url = f"https://{url}/v1/vector/collections/import" + request_url = url + "/v2/vectordb/jobs/import/create" + partition_name = kwargs.pop("partition_name", "") params = { + "collectionName": collection_name, + "partitionName": partition_name, + "files": files, "objectUrl": object_url, + "clusterId": cluster_id, "accessKey": access_key, "secretKey": secret_key, - "clusterId": cluster_id, - "collectionName": collection_name, } resp = _post_request(url=request_url, api_key=api_key, params=params, **kwargs) - _handle_response(url, resp.json()) + _handle_response(request_url, resp.json()) return resp def get_import_progress( - url: str, api_key: str, job_id: str, cluster_id: str, **kwargs + url: str, job_id: str, cluster_id: str = "", api_key: str = "", **kwargs ) -> requests.Response: """get job progress @@ -127,52 +133,54 @@ def get_import_progress( url (str): url of the server job_id (str): a job id cluster_id (str): id of a milvus instance(for cloud) + api_key (str): API key to authenticate your requests. Returns: - json: response of the restful interface + response of the restful interface """ - up = urlparse(url) - if up.scheme.startswith("http"): - request_url = f"{url}/v1/vector/collections/import/get" - else: - request_url = f"https://{url}/v1/vector/collections/import/get" + request_url = url + "/v2/vectordb/jobs/import/describe" params = { "jobId": job_id, "clusterId": cluster_id, } - resp = _get_request(url=request_url, api_key=api_key, params=params, **kwargs) - _handle_response(url, resp.json()) + resp = _post_request(url=request_url, api_key=api_key, params=params, **kwargs) + _handle_response(request_url, resp.json()) return resp def list_import_jobs( - url: str, api_key: str, cluster_id: str, page_size: int, current_page: int, **kwargs + url: str, + collection_name: str = "", + cluster_id: str = "", + api_key: str = "", + page_size: int = 10, + current_page: int = 1, + **kwargs, ) -> requests.Response: """list jobs in a cluster Args: url (str): url of the server + collection_name (str): name of the target collection cluster_id (str): id of a milvus instance(for cloud) + api_key (str): API key to authenticate your requests. page_size (int): pagination size current_page (int): pagination Returns: - json: response of the restful interface + response of the restful interface """ - up = urlparse(url) - if up.scheme.startswith("http"): - request_url = f"{url}/v1/vector/collections/import/list" - else: - request_url = f"https://{url}/v1/vector/collections/import/list" + request_url = url + "/v2/vectordb/jobs/import/list" params = { + "collectionName": collection_name, "clusterId": cluster_id, "pageSize": page_size, "currentPage": current_page, } - resp = _get_request(url=request_url, api_key=api_key, params=params, **kwargs) - _handle_response(url, resp.json()) + resp = _post_request(url=request_url, api_key=api_key, params=params, **kwargs) + _handle_response(request_url, resp.json()) return resp diff --git a/pymilvus/bulk_writer/bulk_writer.py b/pymilvus/bulk_writer/bulk_writer.py index 23ec57998..e6f1f71f4 100644 --- a/pymilvus/bulk_writer/bulk_writer.py +++ b/pymilvus/bulk_writer/bulk_writer.py @@ -13,6 +13,7 @@ import json import logging from threading import Lock +from typing import Optional import numpy as np @@ -39,6 +40,7 @@ def __init__( schema: CollectionSchema, chunk_size: int, file_type: BulkFileType, + config: Optional[dict] = None, **kwargs, ): self._schema = schema @@ -47,6 +49,7 @@ def __init__( self._total_row_count = 0 self._file_type = file_type self._buffer_lock = Lock() + self._config = config # the old parameter segment_size is changed to chunk_size, compatible with the legacy code self._chunk_size = chunk_size @@ -82,7 +85,7 @@ def chunk_size(self): def _new_buffer(self): old_buffer = self._buffer with self._buffer_lock: - self._buffer = Buffer(self._schema, self._file_type) + self._buffer = Buffer(self._schema, self._file_type, self._config) return old_buffer def append_row(self, row: dict, **kwargs): @@ -116,14 +119,23 @@ def _throw(self, msg: str): def _verify_vector(self, x: object, field: FieldSchema): dtype = DataType(field.dtype) validator = TYPE_VALIDATOR[dtype.name] - dim = field.params["dim"] - if not validator(x, dim): - self._throw( - f"Illegal vector data for vector field: '{field.name}'," - f" dim is not {dim} or type mismatch" - ) - - return len(x) * 4 if dtype == DataType.FLOAT_VECTOR else len(x) + if dtype != DataType.SPARSE_FLOAT_VECTOR: + dim = field.params["dim"] + try: + origin_list = validator(x, dim) + if dtype == DataType.FLOAT_VECTOR: + return origin_list, dim * 4 # for float vector, each dim occupies 4 bytes + if dtype == DataType.BINARY_VECTOR: + return origin_list, dim / 8 # for binary vector, 8 dim occupies 1 byte + return origin_list, dim * 2 # for float16 vector, each dim occupies 2 bytes + except MilvusException as e: + self._throw(f"Illegal vector data for vector field: '{field.name}': {e.message}") + else: + try: + validator(x) + return x, len(x) * 12 # for sparse vector, each key-value is int-float, 12 bytes + except MilvusException as e: + self._throw(f"Illegal vector data for vector field: '{field.name}': {e.message}") def _verify_json(self, x: object, field: FieldSchema): size = 0 @@ -182,16 +194,40 @@ def _verify_row(self, row: dict): ) else: continue + if field.is_function_output: + if field.name in row: + self._throw(f"Field '{field.name}' is function output, no need to provide") + else: + continue if field.name not in row: self._throw(f"The field '{field.name}' is missed in the row") dtype = DataType(field.dtype) - if dtype in {DataType.BINARY_VECTOR, DataType.FLOAT_VECTOR}: - if isinstance(row[field.name], np.ndarray): - row[field.name] = row[field.name].tolist() - row_size = row_size + self._verify_vector(row[field.name], field) + # deal with null (None) + if field.nullable and row[field.name] is None: + if ( + field.default_value is not None + and field.default_value.WhichOneof("data") is not None + ): + # set default value + data_type = field.default_value.WhichOneof("data") + row[field.name] = getattr(field.default_value, data_type) + else: + # skip field check if the field is null + continue + + if dtype in { + DataType.BINARY_VECTOR, + DataType.FLOAT_VECTOR, + DataType.FLOAT16_VECTOR, + DataType.BFLOAT16_VECTOR, + DataType.SPARSE_FLOAT_VECTOR, + }: + origin_list, byte_len = self._verify_vector(row[field.name], field) + row[field.name] = origin_list + row_size = row_size + byte_len elif dtype == DataType.VARCHAR: row_size = row_size + self._verify_varchar(row[field.name], field) elif dtype == DataType.JSON: diff --git a/pymilvus/bulk_writer/constants.py b/pymilvus/bulk_writer/constants.py index a25f9caf0..a8b7f05e1 100644 --- a/pymilvus/bulk_writer/constants.py +++ b/pymilvus/bulk_writer/constants.py @@ -18,6 +18,13 @@ DataType, ) +from .validators import ( + binary_vector_validator, + float16_vector_validator, + float_vector_validator, + sparse_vector_validator, +) + MB = 1024 * 1024 GB = 1024 * MB @@ -44,8 +51,11 @@ DataType.DOUBLE.name: lambda x: isinstance(x, float), DataType.VARCHAR.name: lambda x, max_len: isinstance(x, str) and len(x) <= max_len, DataType.JSON.name: lambda x: isinstance(x, dict), - DataType.FLOAT_VECTOR.name: lambda x, dim: isinstance(x, list) and len(x) == dim, - DataType.BINARY_VECTOR.name: lambda x, dim: isinstance(x, list) and len(x) * 8 == dim, + DataType.FLOAT_VECTOR.name: lambda x, dim: float_vector_validator(x, dim), + DataType.BINARY_VECTOR.name: lambda x, dim: binary_vector_validator(x, dim), + DataType.FLOAT16_VECTOR.name: lambda x, dim: float16_vector_validator(x, dim, False), + DataType.BFLOAT16_VECTOR.name: lambda x, dim: float16_vector_validator(x, dim, True), + DataType.SPARSE_FLOAT_VECTOR.name: lambda x: sparse_vector_validator(x), DataType.ARRAY.name: lambda x, cap: isinstance(x, list) and len(x) <= cap, } @@ -61,6 +71,9 @@ DataType.JSON.name: None, DataType.FLOAT_VECTOR.name: np.dtype("float32"), DataType.BINARY_VECTOR.name: np.dtype("uint8"), + DataType.FLOAT16_VECTOR.name: np.dtype("uint8"), + DataType.BFLOAT16_VECTOR.name: np.dtype("uint8"), + DataType.SPARSE_FLOAT_VECTOR: None, DataType.ARRAY.name: None, } @@ -71,3 +84,4 @@ class BulkFileType(IntEnum): JSON = 2 JSON_RB = 2 # deprecated PARQUET = 3 + CSV = 4 diff --git a/pymilvus/bulk_writer/local_bulk_writer.py b/pymilvus/bulk_writer/local_bulk_writer.py index d0981c7d2..c1597410c 100644 --- a/pymilvus/bulk_writer/local_bulk_writer.py +++ b/pymilvus/bulk_writer/local_bulk_writer.py @@ -37,9 +37,10 @@ def __init__( local_path: str, chunk_size: int = 128 * MB, file_type: BulkFileType = BulkFileType.PARQUET, + config: Optional[dict] = None, **kwargs, ): - super().__init__(schema, chunk_size, file_type, **kwargs) + super().__init__(schema, chunk_size, file_type, config, **kwargs) self._local_path = local_path self._uuid = str(uuid.uuid4()) self._flush_count = 0 @@ -109,7 +110,7 @@ def commit(self, **kwargs): f"Prepare to flush buffer, row_count: {super().buffer_row_count}, size: {super().buffer_size}" ) _async = kwargs.get("_async", False) - call_back = kwargs.get("call_back", None) + call_back = kwargs.get("call_back") x = Thread(target=self._flush, args=(call_back,)) logger.info(f"Flush thread begin, name: {x.name}") @@ -123,22 +124,26 @@ def commit(self, **kwargs): logger.info(f"Commit done with async={_async}") def _flush(self, call_back: Optional[Callable] = None): - self._flush_count = self._flush_count + 1 - target_path = Path.joinpath(self._local_path, str(self._flush_count)) - - old_buffer = super()._new_buffer() - if old_buffer.row_count > 0: - file_list = old_buffer.persist( - local_path=str(target_path), - buffer_size=self.buffer_size, - buffer_row_count=self.buffer_row_count, - ) - self._local_files.append(file_list) - if call_back: - call_back(file_list) - - del self._working_thread[threading.current_thread().name] - logger.info(f"Flush thread done, name: {threading.current_thread().name}") + try: + self._flush_count = self._flush_count + 1 + target_path = Path.joinpath(self._local_path, str(self._flush_count)) + + old_buffer = super()._new_buffer() + if old_buffer.row_count > 0: + file_list = old_buffer.persist( + local_path=str(target_path), + buffer_size=self.buffer_size, + buffer_row_count=self.buffer_row_count, + ) + self._local_files.append(file_list) + if call_back: + call_back(file_list) + except Exception as e: + logger.error(f"Failed to fulsh, error: {e}") + raise e from e + finally: + del self._working_thread[threading.current_thread().name] + logger.info(f"Flush thread finished, name: {threading.current_thread().name}") @property def data_path(self): diff --git a/pymilvus/bulk_writer/remote_bulk_writer.py b/pymilvus/bulk_writer/remote_bulk_writer.py index dd8a1945a..c989551c8 100644 --- a/pymilvus/bulk_writer/remote_bulk_writer.py +++ b/pymilvus/bulk_writer/remote_bulk_writer.py @@ -110,10 +110,11 @@ def __init__( connect_param: Optional[Union[S3ConnectParam, AzureConnectParam]], chunk_size: int = 1024 * MB, file_type: BulkFileType = BulkFileType.PARQUET, + config: Optional[dict] = None, **kwargs, ): local_path = Path(sys.argv[0]).resolve().parent.joinpath("bulk_writer") - super().__init__(schema, str(local_path), chunk_size, file_type, **kwargs) + super().__init__(schema, str(local_path), chunk_size, file_type, config, **kwargs) self._remote_path = Path("/").joinpath(remote_path).joinpath(super().uuid) self._connect_param = connect_param self._client = None @@ -279,7 +280,7 @@ def _upload(self, file_list: list): for file_path in file_list: ext = Path(file_path).suffix - if ext not in [".json", ".npy", ".parquet"]: + if ext not in [".json", ".npy", ".parquet", ".csv"]: continue relative_file_path = str(file_path).replace(str(super().data_path), "") diff --git a/pymilvus/bulk_writer/validators.py b/pymilvus/bulk_writer/validators.py new file mode 100644 index 000000000..936d85275 --- /dev/null +++ b/pymilvus/bulk_writer/validators.py @@ -0,0 +1,143 @@ +import numpy as np + +from pymilvus.exceptions import MilvusException + + +def float_vector_validator(x: object, dim: int): + if isinstance(x, list): # accepts list of float + if len(x) != dim: + raise MilvusException(message="array's length must be equal to vector dimension") + + for k in x: + if not isinstance(k, float): + raise MilvusException(message="array's element must be float value") + return x + + if isinstance(x, np.ndarray): # accepts numpy array of float + if (not issubclass(x.dtype.type, np.float32)) and ( + not issubclass(x.dtype.type, np.float64) + ): + msg = ( + 'numpy.ndarray\'s dtype must be "float32" or "float64" for FLOAT_VECTOR type field' + ) + raise MilvusException(message=msg) + + if len(x.shape) != 1: + raise MilvusException(message="numpy.ndarray's shape must be one dimension") + + if x.shape[0] != dim: + raise MilvusException( + message="numpy.ndarray's length must be equal to vector dimension" + ) + + return x.tolist() + + raise MilvusException( + message="only accept numpy.ndarray or list[float] for FLOAT_VECTOR type field" + ) + + +def binary_vector_validator(x: object, dim: int): + if isinstance(x, list): # accepts list such as [1, 0, 1, 1, 0, 0, 1, 0] + if len(x) != dim: + raise MilvusException(message="length of the list must be equal to vector dimension") + return np.packbits(x, axis=-1).tolist() + + if isinstance(x, bytes): # accepts bytes such as b'\x00\x01\x02\x03' + x = np.frombuffer(x, dtype=np.uint8).tolist() + if len(x) * 8 != dim: + raise MilvusException( + message="length of the bytes must be equal to 8x of vector dimension" + ) + return x + + if isinstance(x, np.ndarray): # accepts numpy array of uint8 + if not issubclass(x.dtype.type, np.uint8): + msg = 'numpy.ndarray\'s dtype must be "uint8" for BINARY_VECTOR type field' + raise MilvusException(message=msg) + + if len(x.shape) != 1: + raise MilvusException(message="numpy.ndarray's shape must be one dimension") + + if x.shape[0] * 8 != dim: + raise MilvusException( + message="numpy.ndarray's length must be equal to 8x of vector dimension" + ) + + return x.tolist() + + raise MilvusException( + message="only accept numpy.ndarray, list, bytes for BINARY_VECTOR type field" + ) + + +def float16_vector_validator(x: object, dim: int, is_bfloat: bool): + if isinstance(x, list): # accepts list of float + if len(x) != dim: + raise MilvusException(message="array's length must be equal to vector dimension") + + for k in x: + if not isinstance(k, float): + raise MilvusException(message="array's element must be float value") + + arr = ( + np.array(x, dtype=np.dtype("bfloat16")) if is_bfloat else np.array(x, dtype=np.float16) + ) + return arr.tobytes() + + if isinstance(x, np.ndarray): # accepts numpy array + if is_bfloat and x.dtype != "bfloat16": + msg = 'numpy.ndarray\'s dtype must be "bfloat16" for BFLOAT16_VECTOR type field' + raise MilvusException(message=msg) + if (not is_bfloat) and (not issubclass(x.dtype.type, np.float16)): + msg = 'numpy.ndarray\'s dtype must be "float16" for FLOAT16_VECTOR type field' + raise MilvusException(message=msg) + + if len(x.shape) != 1: + raise MilvusException(message="numpy.ndarray's shape must be one dimension") + + if x.shape[0] != dim: + raise MilvusException( + message="numpy.ndarray's length must be equal to vector dimension" + ) + + return x.tobytes() + + raise MilvusException( + message="only accept numpy.ndarray or list[float] for FLOAT16_VECTOR/BFLOAT16_VECTOR type field" + ) + + +def sparse_vector_validator(x: object): + if not isinstance(x, dict): + raise MilvusException(message="only accept dict for SPARSE_FLOAT_VECTOR type field") + + def check_pair(k: object, v: object): + if not isinstance(k, int): + raise MilvusException(message="sparse vector's index must be integer value") + if not isinstance(v, float): + raise MilvusException(message="sparse vector's value must be float value") + + # only accepts dict like {2: 13.23, 45: 0.54} or {"indices": [1, 2], "values": [0.1, 0.2]} + if "indices" in x and "values" in x: + indices = x["indices"] + values = x["values"] + if not isinstance(indices, list): + raise MilvusException(message="indices of sparse vector must be a list of int") + if not isinstance(values, list): + raise MilvusException(message="values of sparse vector must be a list of int") + if len(indices) != len(values): + raise MilvusException( + message="length of indices and values of sparse vector must be equal" + ) + if len(indices) == 0: + raise MilvusException(message="empty sparse vector is not allowed") + for i in range(len(indices)): + check_pair(indices[i], values[i]) + else: + if len(x) == 0: + raise MilvusException(message="empty sparse vector is not allowed") + for key, value in x.items(): + check_pair(key, value) + + return x diff --git a/pymilvus/client/abstract.py b/pymilvus/client/abstract.py index 5e2b63a1c..a7711749b 100644 --- a/pymilvus/client/abstract.py +++ b/pymilvus/client/abstract.py @@ -7,9 +7,9 @@ from pymilvus.grpc_gen import common_pb2, schema_pb2 from pymilvus.settings import Config -from . import entity_helper +from . import entity_helper, utils from .constants import DEFAULT_CONSISTENCY_LEVEL, RANKER_TYPE_RRF, RANKER_TYPE_WEIGHTED -from .types import DataType +from .types import DataType, FunctionType class FieldSchema: @@ -26,6 +26,9 @@ def __init__(self, raw: Any): self.params = {} self.is_partition_key = False self.is_dynamic = False + self.nullable = False + self.default_value = None + self.is_function_output = False # For array field self.element_type = None self.is_clustering_key = False @@ -41,10 +44,12 @@ def __pack(self, raw: Any): self.is_partition_key = raw.is_partition_key self.element_type = DataType(raw.element_type) self.is_clustering_key = raw.is_clustering_key - try: - self.is_dynamic = raw.is_dynamic - except Exception: - self.is_dynamic = False + self.default_value = raw.default_value + if raw.default_value is not None and raw.default_value.WhichOneof("data") is None: + self.default_value = None + self.is_dynamic = raw.is_dynamic + self.nullable = raw.nullable + self.is_function_output = raw.is_function_output for type_param in raw.type_params: if type_param.key == "params": @@ -52,6 +57,11 @@ def __pack(self, raw: Any): self.params[type_param.key] = json.loads(type_param.value) else: + if type_param.key in ["mmap.enabled"]: + self.params["mmap_enabled"] = ( + bool(type_param.value) if type_param.value.lower() != "false" else False + ) + continue self.params[type_param.key] = type_param.value if type_param.key in ["dim"]: self.params[type_param.key] = int(type_param.value) @@ -84,6 +94,12 @@ def dict(self): "type": self.type, "params": self.params or {}, } + if self.default_value is not None: + # default_value is nil match this situation + if self.default_value.WhichOneof("data") is None: + self.default_value = None + else: + _dict["default_value"] = self.default_value if self.element_type: _dict["element_type"] = self.element_type @@ -94,13 +110,60 @@ def dict(self): _dict["is_dynamic"] = True if self.auto_id: _dict["auto_id"] = True + if self.nullable: + _dict["nullable"] = True if self.is_primary: _dict["is_primary"] = self.is_primary if self.is_clustering_key: _dict["is_clustering_key"] = True + if self.is_function_output: + _dict["is_function_output"] = True return _dict +class FunctionSchema: + def __init__(self, raw: Any): + self._raw = raw + + self.name = None + self.description = None + self.type = None + self.params = {} + self.input_field_names = [] + self.input_field_ids = [] + self.output_field_names = [] + self.output_field_ids = [] + self.id = 0 + + self.__pack(self._raw) + + def __pack(self, raw: Any): + self.name = raw.name + self.description = raw.description + self.id = raw.id + self.type = FunctionType(raw.type) + self.params = {} + for param in raw.params: + self.params[param.key] = param.value + self.input_field_names = raw.input_field_names + self.input_field_ids = raw.input_field_ids + self.output_field_names = raw.output_field_names + self.output_field_ids = raw.output_field_ids + + def dict(self): + return { + "name": self.name, + "id": self.id, + "description": self.description, + "type": self.type, + "params": self.params, + "input_field_names": self.input_field_names, + "input_field_ids": self.input_field_ids, + "output_field_names": self.output_field_names, + "output_field_ids": self.output_field_ids, + } + + class CollectionSchema: def __init__(self, raw: Any): self._raw = raw @@ -109,6 +172,7 @@ def __init__(self, raw: Any): self.description = None self.params = {} self.fields = [] + self.functions = [] self.statistics = {} self.auto_id = False # auto_id is not in collection level any more later self.aliases = [] @@ -146,6 +210,11 @@ def __pack(self, raw: Any): self.fields = [FieldSchema(f) for f in raw.schema.fields] + self.functions = [FunctionSchema(f) for f in raw.schema.functions] + function_output_field_names = [f for fn in self.functions for f in fn.output_field_names] + for field in self.fields: + if field.name in function_output_field_names: + field.is_function_output = True # for s in raw.statistics: for p in raw.properties: @@ -171,6 +240,7 @@ def dict(self): "num_shards": self.num_shards, "description": self.description, "fields": [f.dict() for f in self.fields], + "functions": [f.dict() for f in self.functions], "aliases": self.aliases, "collection_id": self.collection_id, "consistency_level": self.consistency_level, @@ -241,10 +311,15 @@ def cost(self): return self._cost def __str__(self): + if self.cost: + return ( + f"(insert count: {self._insert_cnt}, delete count: {self._delete_cnt}, upsert count: {self._upsert_cnt}, " + f"timestamp: {self._timestamp}, success count: {self.succ_count}, err count: {self.err_count}, " + f"cost: {self._cost})" + ) return ( f"(insert count: {self._insert_cnt}, delete count: {self._delete_cnt}, upsert count: {self._upsert_cnt}, " - f"timestamp: {self._timestamp}, success count: {self.succ_count}, err count: {self.err_count}, " - f"cost: {self._cost})" + f"timestamp: {self._timestamp}, success count: {self.succ_count}, err count: {self.err_count}" ) __repr__ = __str__ @@ -337,7 +412,7 @@ def dict(self): class AnnSearchRequest: def __init__( self, - data: Union[List, entity_helper.SparseMatrixInputType], + data: Union[List, utils.SparseMatrixInputType], anns_field: str, param: Dict, limit: int, @@ -389,6 +464,7 @@ def __init__( res: schema_pb2.SearchResultData, round_decimal: Optional[int] = None, status: Optional[common_pb2.Status] = None, + session_ts: Optional[int] = 0, ): self._nq = res.num_queries all_topks = res.topks @@ -420,9 +496,12 @@ def __init__( Hits(topk, all_pks[start:end], all_scores[start:end], nq_th_fields, output_fields) ) nq_thres += topk - + self._session_ts = session_ts super().__init__(data) + def get_session_ts(self): + return self._session_ts + def get_fields_by_range( self, start: int, end: int, all_fields_data: List[schema_pb2.FieldData] ) -> Dict[str, Tuple[List[Any], schema_pb2.FieldData]]: @@ -437,38 +516,73 @@ def get_fields_by_range( is_dynamic=field.is_dynamic, ) if dtype == DataType.BOOL: - field2data[name] = scalars.bool_data.data[start:end], field_meta + field2data[name] = ( + apply_valid_data( + scalars.bool_data.data[start:end], field.valid_data, start, end + ), + field_meta, + ) continue if dtype in (DataType.INT8, DataType.INT16, DataType.INT32): - field2data[name] = scalars.int_data.data[start:end], field_meta + field2data[name] = ( + apply_valid_data( + scalars.int_data.data[start:end], field.valid_data, start, end + ), + field_meta, + ) continue if dtype == DataType.INT64: - field2data[name] = scalars.long_data.data[start:end], field_meta + field2data[name] = ( + apply_valid_data( + scalars.long_data.data[start:end], field.valid_data, start, end + ), + field_meta, + ) continue if dtype == DataType.FLOAT: - field2data[name] = scalars.float_data.data[start:end], field_meta + field2data[name] = ( + apply_valid_data( + scalars.float_data.data[start:end], field.valid_data, start, end + ), + field_meta, + ) continue if dtype == DataType.DOUBLE: - field2data[name] = scalars.double_data.data[start:end], field_meta + field2data[name] = ( + apply_valid_data( + scalars.double_data.data[start:end], field.valid_data, start, end + ), + field_meta, + ) continue if dtype == DataType.VARCHAR: - field2data[name] = scalars.string_data.data[start:end], field_meta + field2data[name] = ( + apply_valid_data( + scalars.string_data.data[start:end], field.valid_data, start, end + ), + field_meta, + ) continue if dtype == DataType.JSON: - json_dict_list = list(map(ujson.loads, scalars.json_data.data[start:end])) + res = apply_valid_data( + scalars.json_data.data[start:end], field.valid_data, start, end + ) + json_dict_list = [ujson.loads(item) if item is not None else item for item in res] field2data[name] = json_dict_list, field_meta continue if dtype == DataType.ARRAY: - topk_array_fields = scalars.array_data.data[start:end] + res = apply_valid_data( + scalars.array_data.data[start:end], field.valid_data, start, end + ) field2data[name] = ( - extract_array_row_data(topk_array_fields, scalars.array_data.element_type), + extract_array_row_data(res, scalars.array_data.element_type), field_meta, ) continue @@ -477,7 +591,16 @@ def get_fields_by_range( dim, vectors = field.vectors.dim, field.vectors field_meta.vectors.dim = dim if dtype == DataType.FLOAT_VECTOR: - field2data[name] = vectors.float_vector.data[start * dim : end * dim], field_meta + if start == 0 and (end - start) * dim >= len(vectors.float_vector.data): + # If the range equals to the lenth of ectors.float_vector.data, direct return + # it to avoid a copy. This logic improves performance by 25% for the case + # retrival 1536 dim embeddings with topk=16384. + field2data[name] = vectors.float_vector.data, field_meta + else: + field2data[name] = ( + vectors.float_vector.data[start * dim : end * dim], + field_meta, + ) continue if dtype == DataType.BINARY_VECTOR: @@ -514,7 +637,10 @@ def __iter__(self) -> SequenceIterator: def __str__(self) -> str: """Only print at most 10 query results""" - return f"data: {list(map(str, self[:10]))} {'...' if len(self) > 10 else ''}, cost: {self.cost}" + reminder = f" ... and {len(self) - 10} results remaining" if len(self) > 10 else "" + if self.cost: + return f"data: {list(map(str, self[:10]))}{reminder}, cost: {self.cost}" + return f"data: {list(map(str, self[:10]))}{reminder}" __repr__ = __str__ @@ -585,7 +711,8 @@ def __iter__(self) -> SequenceIterator: def __str__(self) -> str: """Only print at most 10 query results""" - return str(list(map(str, self[:10]))) + reminder = f" ... and {len(self) - 10} entities remaining" if len(self) > 10 else "" + return f"{list(map(str, self[:10]))!s}{reminder}" __repr__ = __str__ @@ -638,6 +765,10 @@ def extract_array_row_data( ) -> List[List[Any]]: row = [] for ith_array in scalars: + if ith_array is None: + row.append(None) + continue + if element_type == DataType.INT64: row.append(ith_array.long_data.data) continue @@ -664,6 +795,16 @@ def extract_array_row_data( return row +def apply_valid_data( + data: List[Any], valid_data: Union[None, List[bool]], start: int, end: int +) -> List[Any]: + if valid_data: + for i, valid in enumerate(valid_data[start:end]): + if not valid: + data[i] = None + return data + + class LoopBase: def __init__(self): self.__index = 0 diff --git a/pymilvus/client/asynch.py b/pymilvus/client/asynch.py index 8bb329450..44822e6b9 100644 --- a/pymilvus/client/asynch.py +++ b/pymilvus/client/asynch.py @@ -105,7 +105,7 @@ def result(self, **kwargs): self.exception() with self._condition: # future not finished. wait callback being called. - to = kwargs.get("timeout", None) + to = kwargs.get("timeout") if to is None: to = self._kwargs.get("timeout", None) diff --git a/pymilvus/client/check.py b/pymilvus/client/check.py index 110cd88c0..c76ab7f7a 100644 --- a/pymilvus/client/check.py +++ b/pymilvus/client/check.py @@ -17,17 +17,11 @@ def is_legal_address(addr: Any) -> bool: if len(a) != 2: return False - if not is_legal_host(a[0]) or not is_legal_port(a[1]): - return False - - return True + return is_legal_host(a[0]) and is_legal_port(a[1]) def is_legal_host(host: Any) -> bool: - if not isinstance(host, str) or len(host) == 0 or (":" in host): - return False - - return True + return isinstance(host, str) and len(host) > 0 and (":" not in host) def is_legal_port(port: Any) -> bool: @@ -58,7 +52,12 @@ def is_correct_date_str(param: str) -> bool: def is_legal_dimension(dim: Any) -> bool: - return isinstance(dim, int) + try: + _ = int(dim) + except ValueError: + return False + + return True def is_legal_index_size(index_size: Any) -> bool: @@ -163,10 +162,8 @@ def parser_range_date(date: Union[str, datetime.date]) -> str: def is_legal_date_range(start: str, end: str) -> bool: start_date = datetime.datetime.strptime(start, "%Y-%m-%d") end_date = datetime.datetime.strptime(end, "%Y-%m-%d") - if (end_date - start_date).days < 0: - return False - return True + return (end_date - start_date).days >= 0 def is_legal_partition_name(tag: Any) -> bool: @@ -190,7 +187,7 @@ def is_legal_search_data(data: Any) -> bool: if not isinstance(data, (list, np.ndarray)): return False - return all(isinstance(vector, (list, bytes, np.ndarray)) for vector in data) + return all(isinstance(vector, (list, bytes, np.ndarray, str)) for vector in data) def is_legal_output_fields(output_fields: Any) -> bool: diff --git a/pymilvus/client/constants.py b/pymilvus/client/constants.py index fc1b31fa3..efb38aa7a 100644 --- a/pymilvus/client/constants.py +++ b/pymilvus/client/constants.py @@ -8,9 +8,17 @@ BOUNDED_TS = 2 DEFAULT_CONSISTENCY_LEVEL = ConsistencyLevel.Bounded DEFAULT_RESOURCE_GROUP = "__default_resource_group" +DYNAMIC_FIELD_NAME = "$meta" REDUCE_STOP_FOR_BEST = "reduce_stop_for_best" GROUP_BY_FIELD = "group_by_field" +GROUP_SIZE = "group_size" +RANK_GROUP_SCORER = "rank_group_scorer" +STRICT_GROUP_SIZE = "strict_group_size" ITERATOR_FIELD = "iterator" +ITERATOR_SESSION_TS_FIELD = "iterator_session_ts" +PAGE_RETAIN_ORDER_FIELD = "page_retain_order" RANKER_TYPE_RRF = "rrf" RANKER_TYPE_WEIGHTED = "weighted" + +GUARANTEE_TIMESTAMP = "guarantee_timestamp" diff --git a/pymilvus/client/entity_helper.py b/pymilvus/client/entity_helper.py index 8d39acc75..93ee07182 100644 --- a/pymilvus/client/entity_helper.py +++ b/pymilvus/client/entity_helper.py @@ -1,10 +1,9 @@ import math import struct -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional import numpy as np import ujson -from scipy import sparse from pymilvus.exceptions import ( DataNotMatchException, @@ -16,67 +15,13 @@ from pymilvus.settings import Config from .types import DataType +from .utils import SciPyHelper, SparseMatrixInputType, SparseRowOutputType CHECK_STR_ARRAY = True -# in search results, if output fields includes a sparse float vector field, we -# will return a SparseRowOutputType for each entity. Using Dict for readability. -# TODO(SPARSE): to allow the user to specify output format. -SparseRowOutputType = Dict[int, float] - -# we accept the following types as input for sparse matrix in user facing APIs -# such as insert, search, etc.: -# - scipy sparse array/matrix family: csr, csc, coo, bsr, dia, dok, lil -# - iterable of iterables, each element(iterable) is a sparse vector with index -# as key and value as float. -# dict example: [{2: 0.33, 98: 0.72, ...}, {4: 0.45, 198: 0.52, ...}, ...] -# list of tuple example: [[(2, 0.33), (98, 0.72), ...], [(4, 0.45), ...], ...] -# both index/value can be str numbers: {'2': '3.1'} -SparseMatrixInputType = Union[ - Iterable[ - Union[ - SparseRowOutputType, - Iterable[Tuple[int, float]], # only type hint, we accept int/float like types - ] - ], - sparse.csc_array, - sparse.coo_array, - sparse.bsr_array, - sparse.dia_array, - sparse.dok_array, - sparse.lil_array, - sparse.csr_array, - sparse.spmatrix, -] - - -def sparse_is_scipy_matrix(data: Any): - return isinstance(data, sparse.spmatrix) - - -def sparse_is_scipy_array(data: Any): - # sparse.sparray, the common superclass of sparse.*_array, is introduced in - # scipy 1.11.0, which requires python 3.9, higher than pymilvus's current requirement. - return isinstance( - data, - ( - sparse.bsr_array, - sparse.coo_array, - sparse.csc_array, - sparse.csr_array, - sparse.dia_array, - sparse.dok_array, - sparse.lil_array, - ), - ) - - -def sparse_is_scipy_format(data: Any): - return sparse_is_scipy_matrix(data) or sparse_is_scipy_array(data) - def entity_is_sparse_matrix(entity: Any): - if sparse_is_scipy_format(entity): + if SciPyHelper.is_scipy_sparse(entity): return True try: @@ -99,10 +44,12 @@ def is_float_type(v: Any): if len(entity) == 0: return False for item in entity: - pairs = item.items() if isinstance(item, dict) else item - # each row must be a non-empty list of Tuple[int, float] - if len(pairs) == 0: + if SciPyHelper.is_scipy_sparse(item): + return item.shape[0] == 1 + if not isinstance(item, dict) and not isinstance(item, list): return False + pairs = item.items() if isinstance(item, dict) else item + # each row must be a list of Tuple[int, float]. we allow empty sparse row for pair in pairs: if len(pair) != 2 or not is_int_type(pair[0]) or not is_float_type(pair[1]): return False @@ -114,8 +61,7 @@ def is_float_type(v: Any): # parses plain bytes to a sparse float vector(SparseRowOutputType) def sparse_parse_single_row(data: bytes) -> SparseRowOutputType: if len(data) % 8 != 0: - msg = f"The length of data must be a multiple of 8, got {len(data)}" - raise ValueError(msg) + raise ParamError(message=f"The length of data must be a multiple of 8, got {len(data)}") return { struct.unpack("I", data[i : i + 4])[0]: struct.unpack("f", data[i + 4 : i + 8])[0] @@ -129,49 +75,54 @@ def sparse_rows_to_proto(data: SparseMatrixInputType) -> schema_types.SparseFloa # milvus interprets/persists the data. def sparse_float_row_to_bytes(indices: Iterable[int], values: Iterable[float]): if len(indices) != len(values): - msg = f"length of indices and values must be the same, got {len(indices)} and {len(values)}" - raise ValueError(msg) + raise ParamError( + message=f"length of indices and values must be the same, got {len(indices)} and {len(values)}" + ) data = b"" for i, v in sorted(zip(indices, values), key=lambda x: x[0]): if not (0 <= i < 2**32 - 1): - msg = f"sparse vector index must be positive and less than 2^32-1: {i}" - raise ValueError(msg) + raise ParamError( + message=f"sparse vector index must be positive and less than 2^32-1: {i}" + ) if math.isnan(v): - msg = "sparse vector value must not be NaN" - raise ValueError(msg) + raise ParamError(message="sparse vector value must not be NaN") data += struct.pack("I", i) data += struct.pack("f", v) return data - def unify_sparse_input(data: SparseMatrixInputType) -> sparse.csr_array: - if isinstance(data, sparse.csr_array): - return data - if sparse_is_scipy_array(data): - return data.tocsr() - if sparse_is_scipy_matrix(data): - return sparse.csr_array(data.tocsr()) - row_indices = [] - col_indices = [] - values = [] - for row_id, row_data in enumerate(data): - row = row_data.items() if isinstance(row_data, dict) else row_data - row_indices.extend([row_id] * len(row)) - col_indices.extend( - [int(col_id) if isinstance(col_id, str) else col_id for col_id, _ in row] - ) - values.extend([float(value) if isinstance(value, str) else value for _, value in row]) - return sparse.csr_array((values, (row_indices, col_indices))) - if not entity_is_sparse_matrix(data): - msg = "input must be a sparse matrix in supported format" - raise TypeError(msg) - csr = unify_sparse_input(data) + raise ParamError(message="input must be a sparse matrix in supported format") + result = schema_types.SparseFloatArray() - result.dim = csr.shape[1] - for start, end in zip(csr.indptr[:-1], csr.indptr[1:]): - result.contents.append( - sparse_float_row_to_bytes(csr.indices[start:end], csr.data[start:end]) - ) + + if SciPyHelper.is_scipy_sparse(data): + csr = data.tocsr() + result.dim = csr.shape[1] + for start, end in zip(csr.indptr[:-1], csr.indptr[1:]): + result.contents.append( + sparse_float_row_to_bytes(csr.indices[start:end], csr.data[start:end]) + ) + else: + dim = 0 + for _, row_data in enumerate(data): + if SciPyHelper.is_scipy_sparse(row_data): + if row_data.shape[0] != 1: + raise ParamError(message="invalid input for sparse float vector: expect 1 row") + dim = max(dim, row_data.shape[1]) + result.contents.append(sparse_float_row_to_bytes(row_data.indices, row_data.data)) + else: + indices = [] + values = [] + row = row_data.items() if isinstance(row_data, dict) else row_data + for index, value in row: + indices.append(int(index)) + values.append(float(value)) + result.contents.append(sparse_float_row_to_bytes(indices, values)) + row_dim = 0 + if len(indices) > 0: + row_dim = indices[-1] + 1 + dim = max(dim, row_dim) + result.dim = dim return result @@ -180,15 +131,14 @@ def sparse_proto_to_rows( sfv: schema_types.SparseFloatArray, start: Optional[int] = None, end: Optional[int] = None ) -> Iterable[SparseRowOutputType]: if not isinstance(sfv, schema_types.SparseFloatArray): - msg = "Vector must be a sparse float vector" - raise TypeError(msg) + raise ParamError(message="Vector must be a sparse float vector") start = start or 0 end = end or len(sfv.contents) return [sparse_parse_single_row(row_bytes) for row_bytes in sfv.contents[start:end]] def get_input_num_rows(entity: Any) -> int: - if sparse_is_scipy_format(entity): + if SciPyHelper.is_scipy_sparse(entity): return entity.shape[0] return len(entity) @@ -208,18 +158,7 @@ def get_max_len_of_var_char(field_info: Dict) -> int: return field_info.get("params", {}).get(k, v) -def check_str_arr(str_arr: Any, max_len: int): - for s in str_arr: - if not isinstance(s, str): - raise ParamError(message=f"expect string input, got: {type(s)}") - if len(s) > max_len: - raise ParamError( - message=f"invalid input, length of string exceeds max length. " - f"length: {len(s)}, max length: {max_len}" - ) - - -def convert_to_str_array(orig_str_arr: Any, field_info: Any, check: bool = True): +def convert_to_str_array(orig_str_arr: Any, field_info: Dict, check: bool = True): arr = [] if Config.EncodeProtocol.lower() != "utf-8".lower(): for s in orig_str_arr: @@ -228,7 +167,16 @@ def convert_to_str_array(orig_str_arr: Any, field_info: Any, check: bool = True) arr = orig_str_arr max_len = int(get_max_len_of_var_char(field_info)) if check: - check_str_arr(arr, max_len) + for s in arr: + if not isinstance(s, str): + raise ParamError( + message=f"field ({field_info['name']}) expect string input, got: {type(s)}" + ) + if len(s) > max_len: + raise ParamError( + message=f"invalid input of field ({field_info['name']}), " + f"length of string exceeds max length. length: {len(s)}, max length: {max_len}" + ) return arr @@ -246,15 +194,17 @@ def convert_to_json(obj: object): return ujson.dumps(obj, ensure_ascii=False).encode(Config.EncodeProtocol) -def convert_to_json_arr(objs: List[object]): +def convert_to_json_arr(objs: List[object], field_info: Any): arr = [] for obj in objs: + if obj is None: + raise ParamError(message=f"field ({field_info['name']}) expect not None input") arr.append(convert_to_json(obj)) return arr -def entity_to_json_arr(entity: Dict): - return convert_to_json_arr(entity.get("values", [])) +def entity_to_json_arr(entity: Dict, field_info: Any): + return convert_to_json_arr(entity.get("values", []), field_info) def convert_to_array_arr(objs: List[Any], field_info: Any): @@ -283,7 +233,7 @@ def convert_to_array(obj: List[Any], field_info: Any): field_data.string_data.data.extend(obj) return field_data raise ParamError( - message=f"UnSupported element type: {element_type} for Array field: {field_info.get('name')}" + message=f"Unsupported element type: {element_type} for Array field: {field_info.get('name')}" ) @@ -295,131 +245,324 @@ def pack_field_value_to_field_data( field_value: Any, field_data: schema_types.FieldData, field_info: Any ): field_type = field_data.type + field_name = field_info["name"] if field_type == DataType.BOOL: - field_data.scalars.bool_data.data.append(field_value) + try: + if field_value is None: + field_data.scalars.bool_data.data.extend([]) + else: + field_data.scalars.bool_data.data.append(field_value) + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "bool", type(field_value)) + ) from e elif field_type in (DataType.INT8, DataType.INT16, DataType.INT32): - field_data.scalars.int_data.data.append(field_value) + try: + # need to extend it, or cannot correctly identify field_data.scalars.int_data.data + if field_value is None: + field_data.scalars.int_data.data.extend([]) + else: + field_data.scalars.int_data.data.append(field_value) + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "int", type(field_value)) + ) from e elif field_type == DataType.INT64: - field_data.scalars.long_data.data.append(field_value) + try: + if field_value is None: + field_data.scalars.long_data.data.extend([]) + else: + field_data.scalars.long_data.data.append(field_value) + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "int64", type(field_value)) + ) from e elif field_type == DataType.FLOAT: - field_data.scalars.float_data.data.append(field_value) + try: + if field_value is None: + field_data.scalars.float_data.data.extend([]) + else: + field_data.scalars.float_data.data.append(field_value) + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "float", type(field_value)) + ) from e elif field_type == DataType.DOUBLE: - field_data.scalars.double_data.data.append(field_value) + try: + if field_value is None: + field_data.scalars.double_data.data.extend([]) + else: + field_data.scalars.double_data.data.append(field_value) + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "double", type(field_value)) + ) from e elif field_type == DataType.FLOAT_VECTOR: - f_value = field_value - if isinstance(field_value, np.ndarray): - if field_value.dtype not in ("float32", "float64"): - raise ParamError( - message="invalid input for float32 vector, expect np.ndarray with dtype=float32" - ) - f_value = field_value.tolist() - - field_data.vectors.dim = len(f_value) - field_data.vectors.float_vector.data.extend(f_value) - + try: + f_value = field_value + if isinstance(field_value, np.ndarray): + if field_value.dtype not in ("float32", "float64"): + raise ParamError( + message="invalid input for float32 vector, expect np.ndarray with dtype=float32" + ) + f_value = field_value.tolist() + + field_data.vectors.dim = len(f_value) + field_data.vectors.float_vector.data.extend(f_value) + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "float_vector", type(field_value)) + ) from e elif field_type == DataType.BINARY_VECTOR: - field_data.vectors.dim = len(field_value) * 8 - field_data.vectors.binary_vector += bytes(field_value) - + try: + field_data.vectors.dim = len(field_value) * 8 + field_data.vectors.binary_vector += bytes(field_value) + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "binary_vector", type(field_value)) + ) from e elif field_type == DataType.FLOAT16_VECTOR: - if isinstance(field_value, bytes): - v_bytes = field_value - elif isinstance(field_value, np.ndarray): - if field_value.dtype != "float16": + try: + if isinstance(field_value, bytes): + v_bytes = field_value + elif isinstance(field_value, np.ndarray): + if field_value.dtype != "float16": + raise ParamError( + message="invalid input for float16 vector, expect np.ndarray with dtype=float16" + ) + v_bytes = field_value.view(np.uint8).tobytes() + else: raise ParamError( - message="invalid input for float16 vector, expect np.ndarray with dtype=float16" + message="invalid input type for float16 vector, expect np.ndarray with dtype=float16" ) - v_bytes = field_value.view(np.uint8).tobytes() - else: - raise ParamError( - message="invalid input type for float16 vector, expect np.ndarray with dtype=float16" - ) - - field_data.vectors.dim = len(v_bytes) // 2 - field_data.vectors.float16_vector += v_bytes + field_data.vectors.dim = len(v_bytes) // 2 + field_data.vectors.float16_vector += v_bytes + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "float16_vector", type(field_value)) + ) from e elif field_type == DataType.BFLOAT16_VECTOR: - if isinstance(field_value, bytes): - v_bytes = field_value - elif isinstance(field_value, np.ndarray): - if field_value.dtype != "bfloat16": + try: + if isinstance(field_value, bytes): + v_bytes = field_value + elif isinstance(field_value, np.ndarray): + if field_value.dtype != "bfloat16": + raise ParamError( + message="invalid input for bfloat16 vector, expect np.ndarray with dtype=bfloat16" + ) + v_bytes = field_value.view(np.uint8).tobytes() + else: raise ParamError( - message="invalid input for bfloat16 vector, expect np.ndarray with dtype=bfloat16" + message="invalid input type for bfloat16 vector, expect np.ndarray with dtype=bfloat16" ) - v_bytes = field_value.view(np.uint8).tobytes() - else: - raise ParamError( - message="invalid input type for bfloat16 vector, expect np.ndarray with dtype=bfloat16" - ) - field_data.vectors.dim = len(v_bytes) // 2 - field_data.vectors.bfloat16_vector += v_bytes + field_data.vectors.dim = len(v_bytes) // 2 + field_data.vectors.bfloat16_vector += v_bytes + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "bfloat16_vector", type(field_value)) + ) from e elif field_type == DataType.SPARSE_FLOAT_VECTOR: - # field_value is a single row of sparse float vector in user provided format - if not sparse_is_scipy_format(field_value): - field_value = [field_value] - elif field_value.shape[0] != 1: - raise ParamError(message="invalid input for sparse float vector: expect 1 row") - if not entity_is_sparse_matrix(field_value): - raise ParamError(message="invalid input for sparse float vector") - field_data.vectors.sparse_float_vector.contents.append( - sparse_rows_to_proto(field_value).contents[0] - ) + try: + if not SciPyHelper.is_scipy_sparse(field_value): + field_value = [field_value] + elif field_value.shape[0] != 1: + raise ParamError(message="invalid input for sparse float vector: expect 1 row") + if not entity_is_sparse_matrix(field_value): + raise ParamError(message="invalid input for sparse float vector") + field_data.vectors.sparse_float_vector.contents.append( + sparse_rows_to_proto(field_value).contents[0] + ) + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "sparse_float_vector", type(field_value)) + ) from e elif field_type == DataType.VARCHAR: - field_data.scalars.string_data.data.append( - convert_to_str_array(field_value, field_info, CHECK_STR_ARRAY) - ) + try: + if field_value is None: + field_data.scalars.string_data.data.extend([]) + else: + field_data.scalars.string_data.data.append( + convert_to_str_array(field_value, field_info, CHECK_STR_ARRAY) + ) + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "varchar", type(field_value)) + ) from e elif field_type == DataType.JSON: - field_data.scalars.json_data.data.append(convert_to_json(field_value)) + try: + if field_value is None: + field_data.scalars.json_data.data.extend([]) + else: + field_data.scalars.json_data.data.append(convert_to_json(field_value)) + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "json", type(field_value)) + ) from e elif field_type == DataType.ARRAY: - field_data.scalars.array_data.data.append(convert_to_array(field_value, field_info)) + try: + if field_value is None: + field_data.scalars.array_data.data.extend([]) + else: + field_data.scalars.array_data.data.append(convert_to_array(field_value, field_info)) + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "array", type(field_value)) + ) from e else: - raise ParamError(message=f"UnSupported data type: {field_type}") + raise ParamError(message=f"Unsupported data type: {field_type}") # TODO: refactor here. -def entity_to_field_data(entity: Any, field_info: Any): +def entity_to_field_data(entity: Any, field_info: Any, num_rows: int): field_data = schema_types.FieldData() entity_type = entity.get("type") - field_data.field_name = entity.get("name") + field_name = entity.get("name") + field_data.field_name = field_name field_data.type = entity_type_to_dtype(entity_type) + entity_value = entity.get("values") + valid_data = [] + + if field_info.get("nullable", False) or field_info.get("default_value", None): + if len(entity_value) == 0: + valid_data = [False] * num_rows + else: + valid_data = [value is not None for value in entity_value] + entity_value = [value for value in entity_value if value is not None] + + field_data.valid_data.extend(valid_data) + entity["values"] = entity_value if entity_type == DataType.BOOL: - field_data.scalars.bool_data.data.extend(entity.get("values")) + try: + field_data.scalars.bool_data.data.extend(entity.get("values")) + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "bool", type(entity.get("values")[0])) + ) from e elif entity_type in (DataType.INT8, DataType.INT16, DataType.INT32): - field_data.scalars.int_data.data.extend(entity.get("values")) + try: + field_data.scalars.int_data.data.extend(entity.get("values")) + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "int", type(entity.get("values")[0])) + ) from e elif entity_type == DataType.INT64: - field_data.scalars.long_data.data.extend(entity.get("values")) + try: + field_data.scalars.long_data.data.extend(entity.get("values")) + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "int64", type(entity.get("values")[0])) + ) from e elif entity_type == DataType.FLOAT: - field_data.scalars.float_data.data.extend(entity.get("values")) + try: + field_data.scalars.float_data.data.extend(entity.get("values")) + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "float", type(entity.get("values")[0])) + ) from e elif entity_type == DataType.DOUBLE: - field_data.scalars.double_data.data.extend(entity.get("values")) + try: + field_data.scalars.double_data.data.extend(entity.get("values")) + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "double", type(entity.get("values")[0])) + ) from e elif entity_type == DataType.FLOAT_VECTOR: - field_data.vectors.dim = len(entity.get("values")[0]) - all_floats = [f for vector in entity.get("values") for f in vector] - field_data.vectors.float_vector.data.extend(all_floats) + try: + field_data.vectors.dim = len(entity.get("values")[0]) + all_floats = [f for vector in entity.get("values") for f in vector] + field_data.vectors.float_vector.data.extend(all_floats) + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "float_vector", type(entity.get("values")[0])) + ) from e elif entity_type == DataType.BINARY_VECTOR: - field_data.vectors.dim = len(entity.get("values")[0]) * 8 - field_data.vectors.binary_vector = b"".join(entity.get("values")) + try: + field_data.vectors.dim = len(entity.get("values")[0]) * 8 + field_data.vectors.binary_vector = b"".join(entity.get("values")) + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "binary_vector", type(entity.get("values")[0])) + ) from e elif entity_type == DataType.FLOAT16_VECTOR: - field_data.vectors.dim = len(entity.get("values")[0]) // 2 - field_data.vectors.float16_vector = b"".join(entity.get("values")) + try: + field_data.vectors.dim = len(entity.get("values")[0]) // 2 + field_data.vectors.float16_vector = b"".join(entity.get("values")) + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "float16_vector", type(entity.get("values")[0])) + ) from e elif entity_type == DataType.BFLOAT16_VECTOR: - field_data.vectors.dim = len(entity.get("values")[0]) // 2 - field_data.vectors.bfloat16_vector = b"".join(entity.get("values")) + try: + field_data.vectors.dim = len(entity.get("values")[0]) // 2 + field_data.vectors.bfloat16_vector = b"".join(entity.get("values")) + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "bfloat16_vector", type(entity.get("values")[0])) + ) from e elif entity_type == DataType.VARCHAR: - field_data.scalars.string_data.data.extend( - entity_to_str_arr(entity, field_info, CHECK_STR_ARRAY) - ) + try: + field_data.scalars.string_data.data.extend( + entity_to_str_arr(entity, field_info, CHECK_STR_ARRAY) + ) + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "varchar", type(entity.get("values")[0])) + ) from e elif entity_type == DataType.JSON: - field_data.scalars.json_data.data.extend(entity_to_json_arr(entity)) + try: + field_data.scalars.json_data.data.extend(entity_to_json_arr(entity, field_info)) + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "json", type(entity.get("values")[0])) + ) from e elif entity_type == DataType.ARRAY: - field_data.scalars.array_data.data.extend(entity_to_array_arr(entity, field_info)) + try: + field_data.scalars.array_data.data.extend(entity_to_array_arr(entity, field_info)) + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "array", type(entity.get("values")[0])) + ) from e elif entity_type == DataType.SPARSE_FLOAT_VECTOR: - field_data.vectors.sparse_float_vector.CopyFrom(sparse_rows_to_proto(entity.get("values"))) + try: + field_data.vectors.sparse_float_vector.CopyFrom( + sparse_rows_to_proto(entity.get("values")) + ) + except (TypeError, ValueError) as e: + raise DataNotMatchException( + message=ExceptionsMessage.FieldDataInconsistent + % (field_name, "sparse_float_vector", type(entity.get("values")[0])) + ) from e else: - raise ParamError(message=f"UnSupported data type: {entity_type}") + raise ParamError(message=f"Unsupported data type: {entity_type}") return field_data @@ -492,6 +635,9 @@ def check_append(field_data: Any): raise MilvusException(message="Not support string yet") if field_data.type == DataType.BOOL and len(field_data.scalars.bool_data.data) >= index: + if len(field_data.valid_data) > 0 and field_data.valid_data[index] is False: + entity_row_data[field_data.field_name] = None + return entity_row_data[field_data.field_name] = field_data.scalars.bool_data.data[index] return @@ -499,20 +645,32 @@ def check_append(field_data: Any): field_data.type in (DataType.INT8, DataType.INT16, DataType.INT32) and len(field_data.scalars.int_data.data) >= index ): + if len(field_data.valid_data) > 0 and field_data.valid_data[index] is False: + entity_row_data[field_data.field_name] = None + return entity_row_data[field_data.field_name] = field_data.scalars.int_data.data[index] return if field_data.type == DataType.INT64 and len(field_data.scalars.long_data.data) >= index: + if len(field_data.valid_data) > 0 and field_data.valid_data[index] is False: + entity_row_data[field_data.field_name] = None + return entity_row_data[field_data.field_name] = field_data.scalars.long_data.data[index] return if field_data.type == DataType.FLOAT and len(field_data.scalars.float_data.data) >= index: + if len(field_data.valid_data) > 0 and field_data.valid_data[index] is False: + entity_row_data[field_data.field_name] = None + return entity_row_data[field_data.field_name] = np.single( field_data.scalars.float_data.data[index] ) return if field_data.type == DataType.DOUBLE and len(field_data.scalars.double_data.data) >= index: + if len(field_data.valid_data) > 0 and field_data.valid_data[index] is False: + entity_row_data[field_data.field_name] = None + return entity_row_data[field_data.field_name] = field_data.scalars.double_data.data[index] return @@ -520,10 +678,16 @@ def check_append(field_data: Any): field_data.type == DataType.VARCHAR and len(field_data.scalars.string_data.data) >= index ): + if len(field_data.valid_data) > 0 and field_data.valid_data[index] is False: + entity_row_data[field_data.field_name] = None + return entity_row_data[field_data.field_name] = field_data.scalars.string_data.data[index] return if field_data.type == DataType.JSON and len(field_data.scalars.json_data.data) >= index: + if len(field_data.valid_data) > 0 and field_data.valid_data[index] is False: + entity_row_data[field_data.field_name] = None + return json_dict = ujson.loads(field_data.scalars.json_data.data[index]) if not field_data.is_dynamic: @@ -537,15 +701,22 @@ def check_append(field_data: Any): entity_row_data.update({k: v for k, v in json_dict.items() if k in dynamic_fields}) return if field_data.type == DataType.ARRAY and len(field_data.scalars.array_data.data) >= index: + if len(field_data.valid_data) > 0 and field_data.valid_data[index] is False: + entity_row_data[field_data.field_name] = None + return entity_row_data[field_data.field_name] = extract_array_row_data(field_data, index) if field_data.type == DataType.FLOAT_VECTOR: dim = field_data.vectors.dim if len(field_data.vectors.float_vector.data) >= index * dim: start_pos, end_pos = index * dim, (index + 1) * dim - entity_row_data[field_data.field_name] = [ - np.single(x) for x in field_data.vectors.float_vector.data[start_pos:end_pos] - ] + # Here we use numpy.array to convert the float64 values to numpy.float32 values, + # and return a list of numpy.float32 to users + # By using numpy.array, performance improved by 60% for topk=16384 dim=1536 case. + arr = np.array( + field_data.vectors.float_vector.data[start_pos:end_pos], dtype=np.float32 + ) + entity_row_data[field_data.field_name] = list(arr) elif field_data.type == DataType.BINARY_VECTOR: dim = field_data.vectors.dim if len(field_data.vectors.binary_vector) >= index * (dim // 8): diff --git a/pymilvus/client/grpc_handler.py b/pymilvus/client/grpc_handler.py index efdacc1dd..bd070251d 100644 --- a/pymilvus/client/grpc_handler.py +++ b/pymilvus/client/grpc_handler.py @@ -23,7 +23,7 @@ from pymilvus.grpc_gen import milvus_pb2 as milvus_types from pymilvus.settings import Config -from . import entity_helper, interceptor, ts_utils +from . import entity_helper, interceptor, ts_utils, utils from .abstract import AnnSearchRequest, BaseRanker, CollectionSchema, MutationResult, SearchResult from .asynch import ( CreateIndexFuture, @@ -37,11 +37,13 @@ is_legal_host, is_legal_port, ) +from .constants import ITERATOR_SESSION_TS_FIELD from .prepare import Prepare from .types import ( BulkInsertState, CompactionPlans, CompactionState, + DatabaseInfo, DataType, ExtraList, GrantInfo, @@ -86,9 +88,9 @@ def __init__( self._address = addr if addr is not None else self.__get_address(uri, host, port) self._log_level = None self._request_id = None - self._user = kwargs.get("user", None) + self._user = kwargs.get("user") self._set_authorization(**kwargs) - self._setup_db_interceptor(kwargs.get("db_name", None)) + self._setup_db_interceptor(kwargs.get("db_name")) self._setup_grpc_channel() self.callbacks = [] @@ -124,9 +126,9 @@ def _set_authorization(self, **kwargs): self._authorization_interceptor = None self._setup_authorization_interceptor( - kwargs.get("user", None), - kwargs.get("password", None), - kwargs.get("token", None), + kwargs.get("user"), + kwargs.get("password"), + kwargs.get("token"), ) def __enter__(self): @@ -469,7 +471,7 @@ def get_partition_stats( return response.stats def _get_info(self, collection_name: str, timeout: Optional[float] = None, **kwargs): - schema = kwargs.get("schema", None) + schema = kwargs.get("schema") if not schema: schema = self.describe_collection(collection_name, timeout=timeout) @@ -489,7 +491,7 @@ def insert_rows( **kwargs, ): request = self._prepare_row_insert_request( - collection_name, entities, partition_name, timeout, **kwargs + collection_name, entities, partition_name, schema, timeout, **kwargs ) resp = self._stub.Insert(request=request, timeout=timeout) check_status(resp.status) @@ -534,7 +536,7 @@ def _prepare_batch_insert_request( if param and not isinstance(param, milvus_types.InsertRequest): raise ParamError(message="The value of key 'insert_param' is invalid") if not isinstance(entities, list): - raise ParamError(message="None entities, please provide valid entities.") + raise ParamError(message="'entities' must be a list, please provide valid entity data.") schema = kwargs.get("schema") if not schema: @@ -566,7 +568,7 @@ def batch_insert( ) rf = self._stub.Insert.future(request, timeout=timeout) if kwargs.get("_async", False): - cb = kwargs.get("_callback", None) + cb = kwargs.get("_callback") f = MutationFuture(rf, cb, timeout=timeout, **kwargs) f.add_callback(ts_utils.update_ts_on_mutation(collection_name)) return f @@ -598,12 +600,13 @@ def delete( partition_name, expression, consistency_level=kwargs.get("consistency_level", 0), - param_name=kwargs.get("param_name", None), + param_name=kwargs.pop("param_name", None), + **kwargs, ) future = self._stub.Delete.future(req, timeout=timeout) if kwargs.get("_async", False): - cb = kwargs.get("_callback", None) + cb = kwargs.get("_callback") f = MutationFuture(future, cb, timeout=timeout, **kwargs) f.add_callback(ts_utils.update_ts_on_mutation(collection_name)) return f @@ -625,14 +628,13 @@ def _prepare_batch_upsert_request( entities: List, partition_name: Optional[str] = None, timeout: Optional[float] = None, - is_insert: bool = True, **kwargs, ): param = kwargs.get("upsert_param") if param and not isinstance(param, milvus_types.UpsertRequest): raise ParamError(message="The value of key 'upsert_param' is invalid") if not isinstance(entities, list): - raise ParamError(message="None entities, please provide valid entities.") + raise ParamError(message="'entities' must be a list, please provide valid entity data.") schema = kwargs.get("schema") if not schema: @@ -660,11 +662,11 @@ def upsert( try: request = self._prepare_batch_upsert_request( - collection_name, entities, partition_name, timeout, False, **kwargs + collection_name, entities, partition_name, timeout, **kwargs ) rf = self._stub.Upsert.future(request, timeout=timeout) if kwargs.get("_async", False) is True: - cb = kwargs.get("_callback", None) + cb = kwargs.get("_callback") f = MutationFuture(rf, cb, timeout=timeout, **kwargs) f.add_callback(ts_utils.update_ts_on_mutation(collection_name)) return f @@ -689,7 +691,7 @@ def _prepare_row_upsert_request( **kwargs, ): if not isinstance(rows, list): - raise ParamError(message="None rows, please provide valid row data.") + raise ParamError(message="'rows' must be a list, please provide valid row data.") fields_info, enable_dynamic = self._get_info(collection_name, timeout, **kwargs) return Prepare.row_upsert_param( @@ -727,14 +729,18 @@ def _execute_search( try: if kwargs.get("_async", False): future = self._stub.Search.future(request, timeout=timeout) - func = kwargs.get("_callback", None) + func = kwargs.get("_callback") return SearchFuture(future, func) response = self._stub.Search(request, timeout=timeout) check_status(response.status) round_decimal = kwargs.get("round_decimal", -1) - return SearchResult(response.results, round_decimal, status=response.status) - + return SearchResult( + response.results, + round_decimal, + status=response.status, + session_ts=response.session_ts, + ) except Exception as e: if kwargs.get("_async", False): return SearchFuture(None, None, e) @@ -746,7 +752,7 @@ def _execute_hybrid_search( try: if kwargs.get("_async", False): future = self._stub.HybridSearch.future(request, timeout=timeout) - func = kwargs.get("_callback", None) + func = kwargs.get("_callback") return SearchFuture(future, func) response = self._stub.HybridSearch(request, timeout=timeout) @@ -763,7 +769,7 @@ def _execute_hybrid_search( def search( self, collection_name: str, - data: Union[List[List[float]], entity_helper.SparseMatrixInputType], + data: Union[List[List[float]], utils.SparseMatrixInputType], anns_field: str, param: Dict, limit: int, @@ -781,7 +787,7 @@ def search( search_data=data, partition_name_array=partition_names, output_fields=output_fields, - guarantee_timestamp=kwargs.get("guarantee_timestamp", None), + guarantee_timestamp=kwargs.get("guarantee_timestamp"), timeout=timeout, ) @@ -817,7 +823,7 @@ def hybrid_search( round_decimal=round_decimal, partition_name_array=partition_names, output_fields=output_fields, - guarantee_timestamp=kwargs.get("guarantee_timestamp", None), + guarantee_timestamp=kwargs.get("guarantee_timestamp"), timeout=timeout, ) @@ -977,7 +983,7 @@ def _check(): index_future = CreateIndexFuture(future) index_future.add_callback(_check) - user_cb = kwargs.get("_callback", None) + user_cb = kwargs.get("_callback") if user_cb: index_future.add_callback(user_cb) return index_future @@ -1054,6 +1060,10 @@ def describe_index( info_dict["index_name"] = response.index_descriptions[0].index_name if info_dict.get("params"): info_dict["params"] = json.loads(info_dict["params"]) + info_dict["total_rows"] = response.index_descriptions[0].total_rows + info_dict["indexed_rows"] = response.index_descriptions[0].indexed_rows + info_dict["pending_index_rows"] = response.index_descriptions[0].pending_index_rows + info_dict["state"] = common_pb2.IndexState.Name(response.index_descriptions[0].state) return info_dict raise AmbiguousIndexName(message=ExceptionsMessage.AmbiguousIndexName) @@ -1073,6 +1083,7 @@ def get_index_build_progress( "total_rows": index_desc.total_rows, "indexed_rows": index_desc.indexed_rows, "pending_index_rows": index_desc.pending_index_rows, + "state": common_pb2.IndexState.Name(index_desc.state), } raise AmbiguousIndexName(message=ExceptionsMessage.AmbiguousIndexName) @@ -1137,17 +1148,31 @@ def load_collection( check_pass_param( collection_name=collection_name, replica_number=replica_number, timeout=timeout ) - _refresh = kwargs.get("_refresh", False) - _resource_groups = kwargs.get("_resource_groups") + # leading _ is misused for keywork escape for `async` + # other params now support prefix _ or not + # params without leading "_" have higher priority + refresh = kwargs.get("refresh", kwargs.get("_refresh", False)) + resource_groups = kwargs.get("resource_groups", kwargs.get("_resource_groups")) + load_fields = kwargs.get("load_fields", kwargs.get("_load_fields")) + skip_load_dynamic_field = kwargs.get( + "skip_load_dynamic_field", kwargs.get("_skip_load_dynamic_field", False) + ) + request = Prepare.load_collection( - "", collection_name, replica_number, _refresh, _resource_groups + "", + collection_name, + replica_number, + refresh, + resource_groups, + load_fields, + skip_load_dynamic_field, ) rf = self._stub.LoadCollection.future(request, timeout=timeout) response = rf.result() check_status(response) _async = kwargs.get("_async", False) if not _async: - self.wait_for_loading_collection(collection_name, timeout, is_refresh=_refresh) + self.wait_for_loading_collection(collection_name, timeout, is_refresh=refresh) @retry_on_rpc_failure() def load_collection_progress(self, collection_name: str, timeout: Optional[float] = None): @@ -1200,10 +1225,25 @@ def load_partitions( replica_number=replica_number, timeout=timeout, ) - _refresh = kwargs.get("_refresh", False) - _resource_groups = kwargs.get("_resource_groups") + # leading _ is misused for keywork escape for `async` + # other params now support prefix _ or not + # params without leading "_" have higher priority + refresh = kwargs.get("refresh", kwargs.get("_refresh", False)) + resource_groups = kwargs.get("resource_groups", kwargs.get("_resource_groups")) + load_fields = kwargs.get("load_fields", kwargs.get("_load_fields")) + skip_load_dynamic_field = kwargs.get( + "skip_load_dynamic_field", kwargs.get("_skip_load_dynamic_field", False) + ) + request = Prepare.load_partitions( - "", collection_name, partition_names, replica_number, _refresh, _resource_groups + "", + collection_name, + partition_names, + replica_number, + refresh, + resource_groups, + load_fields, + skip_load_dynamic_field, ) future = self._stub.LoadPartitions.future(request, timeout=timeout) @@ -1212,13 +1252,13 @@ def load_partitions( def _check(): if kwargs.get("sync", True): self.wait_for_loading_partitions( - collection_name, partition_names, is_refresh=_refresh + collection_name, partition_names, is_refresh=refresh ) load_partitions_future = LoadPartitionsFuture(future) load_partitions_future.add_callback(_check) - user_cb = kwargs.get("_callback", None) + user_cb = kwargs.get("_callback") if user_cb: load_partitions_future.add_callback(user_cb) @@ -1228,7 +1268,7 @@ def _check(): check_status(response) sync = kwargs.get("sync", True) if sync: - self.wait_for_loading_partitions(collection_name, partition_names, is_refresh=_refresh) + self.wait_for_loading_partitions(collection_name, partition_names, is_refresh=refresh) return None return None @@ -1272,8 +1312,8 @@ def get_loading_progress( return response.progress @retry_on_rpc_failure() - def create_database(self, db_name: str, timeout: Optional[float] = None): - request = Prepare.create_database_req(db_name) + def create_database(self, db_name: str, timeout: Optional[float] = None, **kwargs): + request = Prepare.create_database_req(db_name, **kwargs) status = self._stub.CreateDatabase(request, timeout=timeout) check_status(status) @@ -1290,6 +1330,21 @@ def list_database(self, timeout: Optional[float] = None): check_status(response.status) return list(response.db_names) + @retry_on_rpc_failure() + def alter_database( + self, db_name: str, properties: dict, timeout: Optional[float] = None, **kwargs + ): + request = Prepare.alter_database_req(db_name, properties) + status = self._stub.AlterDatabase(request, timeout=timeout) + check_status(status) + + @retry_on_rpc_failure() + def describe_database(self, db_name: str, timeout: Optional[float] = None): + request = Prepare.describe_database_req(db_name=db_name) + resp = self._stub.DescribeDatabase(request, timeout=timeout) + check_status(resp.status) + return DatabaseInfo(resp) + @retry_on_rpc_failure() def get_load_state( self, @@ -1412,7 +1467,7 @@ def _check(): flush_future = FlushFuture(future) flush_future.add_callback(_check) - user_cb = kwargs.get("_callback", None) + user_cb = kwargs.get("_callback") if user_cb: flush_future.add_callback(user_cb) @@ -1505,7 +1560,10 @@ def query( response.fields_data, index, dynamic_fields ) results.append(entity_row_data) - return ExtraList(results, extra=get_cost_extra(response.status)) + + extra_dict = get_cost_extra(response.status) + extra_dict[ITERATOR_SESSION_TS_FIELD] = response.session_ts + return ExtraList(results, extra=extra_dict) @retry_on_rpc_failure() def load_balance( @@ -1525,13 +1583,19 @@ def load_balance( check_status(status) @retry_on_rpc_failure() - def compact(self, collection_name: str, timeout: Optional[float] = None, **kwargs) -> int: + def compact( + self, + collection_name: str, + is_clustering: Optional[bool] = False, + timeout: Optional[float] = None, + **kwargs, + ) -> int: request = Prepare.describe_collection_request(collection_name) rf = self._stub.DescribeCollection.future(request, timeout=timeout) response = rf.result() check_status(response.status) - req = Prepare.manual_compaction(response.collectionID) + req = Prepare.manual_compaction(response.collectionID, is_clustering) future = self._stub.ManualCompaction.future(req, timeout=timeout) response = future.result() check_status(response.status) @@ -1933,7 +1997,7 @@ def _check(): flush_future = FlushFuture(future) flush_future.add_callback(_check) - user_cb = kwargs.get("_callback", None) + user_cb = kwargs.get("_callback") if user_cb: flush_future.add_callback(user_cb) @@ -1957,3 +2021,42 @@ def alloc_timestamp(self, timeout: Optional[float] = None) -> int: response = self._stub.AllocTimestamp(request, timeout=timeout) check_status(response.status) return response.timestamp + + @retry_on_rpc_failure() + def create_privilege_group(self, group_name: str, timeout: Optional[float] = None, **kwargs): + req = Prepare.create_privilege_group_req(group_name) + resp = self._stub.CreatePrivilegeGroup(req, wait_for_ready=True, timeout=timeout) + check_status(resp) + + @retry_on_rpc_failure() + def drop_privilege_group(self, group_name: str, timeout: Optional[float] = None, **kwargs): + req = Prepare.drop_privilege_group_req(group_name) + resp = self._stub.DropPrivilegeGroup(req, wait_for_ready=True, timeout=timeout) + check_status(resp) + + @retry_on_rpc_failure() + def list_privilege_groups(self, timeout: Optional[float] = None, **kwargs): + req = Prepare.list_privilege_groups_req() + resp = self._stub.ListPrivilegeGroups(req, wait_for_ready=True, timeout=timeout) + check_status(resp.status) + return resp.privilege_groups + + @retry_on_rpc_failure() + def add_privileges_to_group( + self, group_name: str, privileges: List[str], timeout: Optional[float] = None, **kwargs + ): + req = Prepare.operate_privilege_group_req( + group_name, privileges, milvus_types.OperatePrivilegeGroupType.AddPrivilegesToGroup + ) + resp = self._stub.OperatePrivilegeGroup(req, wait_for_ready=True, timeout=timeout) + check_status(resp) + + @retry_on_rpc_failure() + def remove_privileges_from_group( + self, group_name: str, privileges: List[str], timeout: Optional[float] = None, **kwargs + ): + req = Prepare.operate_privilege_group_req( + group_name, privileges, milvus_types.OperatePrivilegeGroupType.RemovePrivilegesFromGroup + ) + resp = self._stub.OperatePrivilegeGroup(req, wait_for_ready=True, timeout=timeout) + check_status(resp) diff --git a/pymilvus/client/prepare.py b/pymilvus/client/prepare.py index a756a1dc7..9e4820ab2 100644 --- a/pymilvus/client/prepare.py +++ b/pymilvus/client/prepare.py @@ -3,21 +3,27 @@ from typing import Any, Dict, Iterable, List, Mapping, Optional, Union import numpy as np +import ujson -from pymilvus.client import __version__, entity_helper from pymilvus.exceptions import DataNotMatchException, ExceptionsMessage, ParamError from pymilvus.grpc_gen import common_pb2 as common_types from pymilvus.grpc_gen import milvus_pb2 as milvus_types from pymilvus.grpc_gen import schema_pb2 as schema_types from pymilvus.orm.schema import CollectionSchema +from pymilvus.orm.types import infer_dtype_by_scalar_data -from . import blob, ts_utils, utils +from . import __version__, blob, entity_helper, ts_utils, utils from .check import check_pass_param, is_legal_collection_properties from .constants import ( DEFAULT_CONSISTENCY_LEVEL, + DYNAMIC_FIELD_NAME, GROUP_BY_FIELD, + GROUP_SIZE, ITERATOR_FIELD, + PAGE_RETAIN_ORDER_FIELD, + RANK_GROUP_SCORER, REDUCE_STOP_FOR_BEST, + STRICT_GROUP_SIZE, ) from .types import ( DataType, @@ -25,7 +31,7 @@ ResourceGroupConfig, get_consistency_level, ) -from .utils import traverse_info, traverse_rows_info +from .utils import traverse_info, traverse_upsert_info class Prepare: @@ -85,7 +91,7 @@ def create_collection_request( raise ParamError(message=msg) req.shards_num = num_shards - num_partitions = kwargs.get("num_partitions", None) + num_partitions = kwargs.get("num_partitions") if num_partitions is not None: if not isinstance(num_partitions, int) or isinstance(num_partitions, bool): msg = f"invalid num_partitions type, got {type(num_partitions)}, expected int" @@ -123,17 +129,36 @@ def get_schema_from_collection_schema( data_type=f.dtype, description=f.description, is_primary_key=f.is_primary, + default_value=f.default_value, + nullable=f.nullable, autoID=f.auto_id, is_partition_key=f.is_partition_key, is_dynamic=f.is_dynamic, element_type=f.element_type, is_clustering_key=f.is_clustering_key, + is_function_output=f.is_function_output, ) for k, v in f.params.items(): - kv_pair = common_types.KeyValuePair(key=str(k), value=str(v)) + kv_pair = common_types.KeyValuePair( + key=str(k) if k != "mmap_enabled" else "mmap.enabled", value=ujson.dumps(v) + ) field_schema.type_params.append(kv_pair) schema.fields.append(field_schema) + + for f in fields.functions: + function_schema = schema_types.FunctionSchema( + name=f.name, + description=f.description, + type=f.type, + input_field_names=f.input_field_names, + output_field_names=f.output_field_names, + ) + for k, v in f.params.items(): + kv_pair = common_types.KeyValuePair(key=str(k), value=str(v)) + function_schema.params.append(kv_pair) + schema.functions.append(function_schema) + return schema @staticmethod @@ -163,6 +188,10 @@ def get_field_schema( raise ParamError(message=msg) primary_field = field_name + nullable = field.get("nullable", False) + if not isinstance(nullable, bool): + raise ParamError(message="nullable must be boolean") + auto_id = field.get("auto_id", False) if not isinstance(auto_id, bool): raise ParamError(message="auto_id must be boolean") @@ -187,7 +216,12 @@ def get_field_schema( type_params = field.get("params", {}) if not isinstance(type_params, dict): raise ParamError(message="params should be dictionary type") - kvs = [common_types.KeyValuePair(key=str(k), value=str(v)) for k, v in type_params.items()] + kvs = [ + common_types.KeyValuePair( + key=str(k) if k != "mmap_enabled" else "mmap.enabled", value=str(v) + ) + for k, v in type_params.items() + ] field_schema.type_params.extend(kvs) return field_schema, primary_field, auto_id_field @@ -308,7 +342,7 @@ def show_partitions_request( if partition_names: if not isinstance(partition_names, (list,)): msg = f"partition_names must be a list of strings, but got: {partition_names}" - raise ParamError(msg) + raise ParamError(message=msg) for partition_name in partition_names: check_pass_param(partition_name=partition_name) req.partition_names.extend(partition_names) @@ -353,24 +387,36 @@ def partition_name(cls, collection_name: str, partition_name: str): raise ParamError(message="partition_name must be of str type") return milvus_types.PartitionName(collection_name=collection_name, tag=partition_name) + @staticmethod + def _is_input_field(field: Dict, is_upsert: bool): + return (not field.get("auto_id", False) or is_upsert) and not field.get( + "is_function_output", False + ) + + @staticmethod + def _num_input_fields(fields_info: List[Dict], is_upsert: bool): + return len([field for field in fields_info if Prepare._is_input_field(field, is_upsert)]) + @staticmethod def _parse_row_request( request: Union[milvus_types.InsertRequest, milvus_types.UpsertRequest], - fields_info: dict, + fields_info: List[Dict], enable_dynamic: bool, entities: List, ): + input_fields_info = [ + field for field in fields_info if Prepare._is_input_field(field, is_upsert=False) + ] fields_data = { field["name"]: schema_types.FieldData(field_name=field["name"], type=field["type"]) - for field in fields_info - if not field.get("auto_id", False) - } - field_info_map = { - field["name"]: field for field in fields_info if not field.get("auto_id", False) + for field in input_fields_info } + field_info_map = {field["name"]: field for field in input_fields_info} if enable_dynamic: - d_field = schema_types.FieldData(is_dynamic=True, type=DataType.JSON) + d_field = schema_types.FieldData( + field_name=DYNAMIC_FIELD_NAME, is_dynamic=True, type=DataType.JSON + ) fields_data[d_field.field_name] = d_field field_info_map[d_field.field_name] = d_field @@ -381,12 +427,30 @@ def _parse_row_request( raise TypeError(msg) for k, v in entity.items(): if k not in fields_data and not enable_dynamic: - raise DataNotMatchException(message=ExceptionsMessage.InsertUnexpectedField) + raise DataNotMatchException( + message=ExceptionsMessage.InsertUnexpectedField % k + ) if k in fields_data: field_info, field_data = field_info_map[k], fields_data[k] + if field_info.get("nullable", False) or field_info.get( + "default_value", None + ): + field_data.valid_data.append(v is not None) entity_helper.pack_field_value_to_field_data(v, field_data, field_info) - + for field in input_fields_info: + key = field["name"] + if key in entity: + continue + + field_info, field_data = field_info_map[key], fields_data[key] + if field_info.get("nullable", False) or field_info.get("default_value", None): + field_data.valid_data.append(False) + entity_helper.pack_field_value_to_field_data(None, field_data, field_info) + else: + raise DataNotMatchException( + message=ExceptionsMessage.InsertMissedField % key + ) json_dict = { k: v for k, v in entity.items() if k not in fields_data and enable_dynamic } @@ -398,21 +462,102 @@ def _parse_row_request( except (TypeError, ValueError) as e: raise DataNotMatchException(message=ExceptionsMessage.DataTypeInconsistent) from e - request.fields_data.extend( - [fields_data[field["name"]] for field in fields_info if not field.get("auto_id", False)] - ) + request.fields_data.extend(fields_data.values()) + + expected_num_input_fields = len(input_fields_info) + (1 if enable_dynamic else 0) + + if len(fields_data) != expected_num_input_fields: + msg = f"{ExceptionsMessage.FieldsNumInconsistent}, expected {expected_num_input_fields} fields, got {len(fields_data)}" + raise ParamError(message=msg) + + return request + + @staticmethod + def _parse_upsert_row_request( + request: Union[milvus_types.InsertRequest, milvus_types.UpsertRequest], + fields_info: List[Dict], + enable_dynamic: bool, + entities: List, + ): + input_fields_info = [ + field for field in fields_info if Prepare._is_input_field(field, is_upsert=True) + ] + fields_data = { + field["name"]: schema_types.FieldData(field_name=field["name"], type=field["type"]) + for field in input_fields_info + } + field_info_map = {field["name"]: field for field in input_fields_info} if enable_dynamic: - request.fields_data.append(d_field) + d_field = schema_types.FieldData( + field_name=DYNAMIC_FIELD_NAME, is_dynamic=True, type=DataType.JSON + ) + fields_data[d_field.field_name] = d_field + field_info_map[d_field.field_name] = d_field + + try: + for entity in entities: + if not isinstance(entity, Dict): + msg = f"expected Dict, got '{type(entity).__name__}'" + raise TypeError(msg) + for k, v in entity.items(): + if k not in fields_data and not enable_dynamic: + raise DataNotMatchException( + message=ExceptionsMessage.InsertUnexpectedField % k + ) + + if k in fields_data: + field_info, field_data = field_info_map[k], fields_data[k] + if field_info.get("nullable", False) or field_info.get( + "default_value", None + ): + field_data.valid_data.append(v is not None) + entity_helper.pack_field_value_to_field_data(v, field_data, field_info) + for field in input_fields_info: + key = field["name"] + if key in entity: + continue + + field_info, field_data = field_info_map[key], fields_data[key] + if field_info.get("nullable", False) or field_info.get("default_value", None): + field_data.valid_data.append(False) + entity_helper.pack_field_value_to_field_data(None, field_data, field_info) + else: + raise DataNotMatchException( + message=ExceptionsMessage.InsertMissedField % key + ) + json_dict = { + k: v for k, v in entity.items() if k not in fields_data and enable_dynamic + } + + if enable_dynamic: + json_value = entity_helper.convert_to_json(json_dict) + d_field.scalars.json_data.data.append(json_value) + + except (TypeError, ValueError) as e: + raise DataNotMatchException(message=ExceptionsMessage.DataTypeInconsistent) from e + + request.fields_data.extend(fields_data.values()) + + for _, field in enumerate(input_fields_info): + is_dynamic = False + field_name = field["name"] + + if field.get("is_dynamic", False): + is_dynamic = True + + for j, entity in enumerate(entities): + if is_dynamic and field_name in entity: + raise ParamError( + message=f"dynamic field enabled, {field_name} shouldn't in entities[{j}]" + ) + + expected_num_input_fields = len(input_fields_info) + (1 if enable_dynamic else 0) + + if len(fields_data) != expected_num_input_fields: + msg = f"{ExceptionsMessage.FieldsNumInconsistent}, expected {expected_num_input_fields} fields, got {len(fields_data)}" + raise ParamError(message=msg) - _, _, auto_id_loc = traverse_rows_info(fields_info, entities) - if auto_id_loc is not None: - if (enable_dynamic and len(fields_data) != len(fields_info)) or ( - not enable_dynamic and len(fields_data) + 1 != len(fields_info) - ): - raise ParamError(ExceptionsMessage.FieldsNumInconsistent) - elif enable_dynamic and len(fields_data) != len(fields_info) + 1: - raise ParamError(ExceptionsMessage.FieldsNumInconsistent) return request @classmethod @@ -457,10 +602,41 @@ def row_upsert_param( num_rows=len(entities), ) - return cls._parse_row_request(request, fields_info, enable_dynamic, entities) + return cls._parse_upsert_row_request(request, fields_info, enable_dynamic, entities) + + @staticmethod + def _pre_insert_batch_check( + entities: List, + fields_info: Any, + ): + for entity in entities: + if ( + entity.get("name") is None + or entity.get("values") is None + or entity.get("type") is None + ): + raise ParamError( + message="Missing param in entities, a field must have type, name and values" + ) + if not fields_info: + raise ParamError(message="Missing collection meta to validate entities") + + location, primary_key_loc, _ = traverse_info(fields_info) + + # though impossible from sdk + if primary_key_loc is None: + raise ParamError(message="primary key not found") + + expected_num_input_fields = Prepare._num_input_fields(fields_info, is_upsert=False) + + if len(entities) != expected_num_input_fields: + msg = f"expected number of fields: {expected_num_input_fields}, actual number of fields in entities: {len(entities)}" + raise ParamError(message=msg) + + return location @staticmethod - def _pre_batch_check( + def _pre_upsert_batch_check( entities: List, fields_info: Any, ): @@ -476,19 +652,17 @@ def _pre_batch_check( if not fields_info: raise ParamError(message="Missing collection meta to validate entities") - location, primary_key_loc, auto_id_loc = traverse_info(fields_info) + location, primary_key_loc = traverse_upsert_info(fields_info) # though impossible from sdk if primary_key_loc is None: raise ParamError(message="primary key not found") - if auto_id_loc is None and len(entities) != len(fields_info): - msg = f"number of fields: {len(fields_info)}, number of entities: {len(entities)}" - raise ParamError(msg) + expected_num_input_fields = Prepare._num_input_fields(fields_info, is_upsert=True) - if auto_id_loc is not None and len(entities) + 1 != len(fields_info): - msg = f"number of fields: {len(fields_info)}, number of entities: {len(entities)}" - raise ParamError(msg) + if len(entities) != expected_num_input_fields: + msg = f"expected number of fields: {expected_num_input_fields}, actual number of fields in entities: {len(entities)}" + raise ParamError(message=msg) return location @staticmethod @@ -502,17 +676,22 @@ def _parse_batch_request( try: for entity in entities: latest_field_size = entity_helper.get_input_num_rows(entity.get("values")) - if pre_field_size not in (0, latest_field_size): - raise ParamError( - message=( - f"Field data size misaligned for field [{entity.get('name')}] ", - f"got size=[{latest_field_size}] ", - f"alignment size=[{pre_field_size}]", + if latest_field_size != 0: + if pre_field_size not in (0, latest_field_size): + raise ParamError( + message=( + f"Field data size misaligned for field [{entity.get('name')}] ", + f"got size=[{latest_field_size}] ", + f"alignment size=[{pre_field_size}]", + ) ) - ) - pre_field_size = latest_field_size + pre_field_size = latest_field_size + if pre_field_size == 0: + raise ParamError(message=ExceptionsMessage.NumberRowsInvalid) + request.num_rows = pre_field_size + for entity in entities: field_data = entity_helper.entity_to_field_data( - entity, fields_info[location[entity.get("name")]] + entity, fields_info[location[entity.get("name")]], request.num_rows ) request.fields_data.append(field_data) except (TypeError, ValueError) as e: @@ -531,7 +710,7 @@ def batch_insert_param( partition_name: str, fields_info: Any, ): - location = cls._pre_batch_check(entities, fields_info) + location = cls._pre_insert_batch_check(entities, fields_info) tag = partition_name if isinstance(partition_name, str) else "" request = milvus_types.InsertRequest(collection_name=collection_name, partition_name=tag) @@ -545,7 +724,7 @@ def batch_upsert_param( partition_name: str, fields_info: Any, ): - location = cls._pre_batch_check(entities, fields_info) + location = cls._pre_upsert_batch_check(entities, fields_info) tag = partition_name if isinstance(partition_name, str) else "" request = milvus_types.UpsertRequest(collection_name=collection_name, partition_name=tag) @@ -579,6 +758,7 @@ def check_str(instr: str, prefix: str): partition_name=partition_name, expr=expr, consistency_level=get_consistency_level(consistency_level), + expr_template_values=cls.prepare_expression_template(kwargs.get("expr_params", {})), ) @classmethod @@ -613,6 +793,10 @@ def _prepare_placeholder_str(cls, data: Any): pl_type = PlaceholderType.BinaryVector pl_values = data # data is already a list of bytes + elif isinstance(data[0], str): + pl_type = PlaceholderType.VARCHAR + pl_values = (value.encode("utf-8") for value in data) + else: pl_type = PlaceholderType.FloatVector pl_values = (blob.vector_float_to_bytes(entity) for entity in data) @@ -622,11 +806,80 @@ def _prepare_placeholder_str(cls, data: Any): common_types.PlaceholderGroup(placeholders=[pl]) ) + @classmethod + def prepare_expression_template(cls, values: Dict) -> Any: + def all_elements_same_type(lst: List): + return all(isinstance(item, type(lst[0])) for item in lst) + + def add_array_data(v: List) -> schema_types.TemplateArrayValue: + data = schema_types.TemplateArrayValue() + if len(v) == 0: + return data + element_type = ( + infer_dtype_by_scalar_data(v[0]) if all_elements_same_type(v) else schema_types.JSON + ) + if element_type in (schema_types.Bool,): + data.bool_data.data.extend(v) + return data + if element_type in ( + schema_types.Int8, + schema_types.Int16, + schema_types.Int32, + schema_types.Int64, + ): + data.long_data.data.extend(v) + return data + if element_type in (schema_types.Float, schema_types.Double): + data.double_data.data.extend(v) + return data + if element_type in (schema_types.VarChar, schema_types.String): + data.string_data.data.extend(v) + return data + if element_type in (schema_types.Array,): + for e in v: + data.array_data.data.append(add_array_data(e)) + return data + if element_type in (schema_types.JSON,): + for e in v: + data.json_data.data.append(entity_helper.convert_to_json(e)) + return data + raise ParamError(message=f"Unsupported element type: {element_type}") + + def add_data(v: Any) -> schema_types.TemplateValue: + dtype = infer_dtype_by_scalar_data(v) + data = schema_types.TemplateValue() + if dtype in (schema_types.Bool,): + data.bool_val = v + return data + if dtype in ( + schema_types.Int8, + schema_types.Int16, + schema_types.Int32, + schema_types.Int64, + ): + data.int64_val = v + return data + if dtype in (schema_types.Float, schema_types.Double): + data.float_val = v + return data + if dtype in (schema_types.VarChar, schema_types.String): + data.string_val = v + return data + if dtype in (schema_types.Array,): + data.array_val.CopyFrom(add_array_data(v)) + return data + raise ParamError(message=f"Unsupported element type: {dtype}") + + expression_template_values = {} + for k, v in values.items(): + expression_template_values[k] = add_data(v) + return expression_template_values + @classmethod def search_requests_with_expr( cls, collection_name: str, - data: Union[List, entity_helper.SparseMatrixInputType], + data: Union[List, utils.SparseMatrixInputType], anns_field: str, param: Dict, limit: int, @@ -643,6 +896,20 @@ def search_requests_with_expr( if not isinstance(params, dict): raise ParamError(message=f"Search params must be a dict, got {type(params)}") + if PAGE_RETAIN_ORDER_FIELD in kwargs and PAGE_RETAIN_ORDER_FIELD in param: + raise ParamError( + message="Provide page_retain_order both in kwargs and param, expect just one" + ) + page_retain_order = kwargs.get(PAGE_RETAIN_ORDER_FIELD) or param.get( + PAGE_RETAIN_ORDER_FIELD + ) + if page_retain_order is not None: + if not isinstance(page_retain_order, bool): + raise ParamError( + message=f"wrong type for page_retain_order, expect bool, got {type(page_retain_order)}" + ) + params[PAGE_RETAIN_ORDER_FIELD] = page_retain_order + search_params = { "topk": limit, "params": params, @@ -668,6 +935,14 @@ def search_requests_with_expr( if group_by_field is not None: search_params[GROUP_BY_FIELD] = group_by_field + group_size = kwargs.get(GROUP_SIZE) + if group_size is not None: + search_params[GROUP_SIZE] = group_size + + strict_group_size = kwargs.get(STRICT_GROUP_SIZE) + if strict_group_size is not None: + search_params[STRICT_GROUP_SIZE] = strict_group_size + if param.get("metric_type") is not None: search_params["metric_type"] = param["metric_type"] @@ -692,6 +967,7 @@ def search_requests_with_expr( placeholder_group=plg_str, dsl_type=common_types.DslType.BoolExprV1, search_params=req_params, + expr_template_values=cls.prepare_expression_template(kwargs.get("expr_params", {})), ) if expr is not None: request.dsl = expr @@ -732,6 +1008,42 @@ def hybrid_search_request_with_ranker( ] ) + if kwargs.get(RANK_GROUP_SCORER) is not None: + request.rank_params.extend( + [ + common_types.KeyValuePair( + key=RANK_GROUP_SCORER, value=kwargs.get(RANK_GROUP_SCORER) + ) + ] + ) + + if kwargs.get(GROUP_BY_FIELD) is not None: + request.rank_params.extend( + [ + common_types.KeyValuePair( + key=GROUP_BY_FIELD, value=utils.dumps(kwargs.get(GROUP_BY_FIELD)) + ) + ] + ) + + if kwargs.get(GROUP_SIZE) is not None: + request.rank_params.extend( + [ + common_types.KeyValuePair( + key=GROUP_SIZE, value=utils.dumps(kwargs.get(GROUP_SIZE)) + ) + ] + ) + + if kwargs.get(STRICT_GROUP_SIZE) is not None: + request.rank_params.extend( + [ + common_types.KeyValuePair( + key=STRICT_GROUP_SIZE, value=utils.dumps(kwargs.get(STRICT_GROUP_SIZE)) + ) + ] + ) + return request @classmethod @@ -808,6 +1120,8 @@ def load_collection( replica_number: int, refresh: bool, resource_groups: List[str], + load_fields: List[str], + skip_load_dynamic_field: bool, ): return milvus_types.LoadCollectionRequest( db_name=db_name, @@ -815,6 +1129,8 @@ def load_collection( replica_number=replica_number, refresh=refresh, resource_groups=resource_groups, + load_fields=load_fields, + skip_load_dynamic_field=skip_load_dynamic_field, ) @classmethod @@ -832,6 +1148,8 @@ def load_partitions( replica_number: int, refresh: bool, resource_groups: List[str], + load_fields: List[str], + skip_load_dynamic_field: bool, ): return milvus_types.LoadPartitionsRequest( db_name=db_name, @@ -840,6 +1158,8 @@ def load_partitions( replica_number=replica_number, refresh=refresh, resource_groups=resource_groups, + load_fields=load_fields, + skip_load_dynamic_field=skip_load_dynamic_field, ) @classmethod @@ -925,18 +1245,25 @@ def query_request( guarantee_timestamp=kwargs.get("guarantee_timestamp", 0), use_default_consistency=use_default_consistency, consistency_level=kwargs.get("consistency_level", 0), + expr_template_values=cls.prepare_expression_template(kwargs.get("expr_params", {})), ) - limit = kwargs.get("limit", None) + limit = kwargs.get("limit") if limit is not None: req.query_params.append(common_types.KeyValuePair(key="limit", value=str(limit))) - offset = kwargs.get("offset", None) + offset = kwargs.get("offset") if offset is not None: req.query_params.append(common_types.KeyValuePair(key="offset", value=str(offset))) ignore_growing = kwargs.get("ignore_growing", False) stop_reduce_for_best = kwargs.get(REDUCE_STOP_FOR_BEST, False) + is_iterator = kwargs.get(ITERATOR_FIELD) + if is_iterator is not None: + req.query_params.append( + common_types.KeyValuePair(key=ITERATOR_FIELD, value=is_iterator) + ) + req.query_params.append( common_types.KeyValuePair(key="ignore_growing", value=str(ignore_growing)) ) @@ -961,12 +1288,16 @@ def load_balance_request( ) @classmethod - def manual_compaction(cls, collection_id: int): + def manual_compaction(cls, collection_id: int, is_clustering: bool): if collection_id is None or not isinstance(collection_id, int): raise ParamError(message=f"collection_id value {collection_id} is illegal") + if is_clustering is None or not isinstance(is_clustering, bool): + raise ParamError(message=f"is_clustering value {is_clustering} is illegal") + request = milvus_types.ManualCompactionRequest() request.collectionID = collection_id + request.majorCompaction = is_clustering return request @@ -1000,7 +1331,7 @@ def get_replicas(cls, collection_id: int): @classmethod def do_bulk_insert(cls, collection_name: str, partition_name: str, files: list, **kwargs): - channel_names = kwargs.get("channel_names", None) + channel_names = kwargs.get("channel_names") req = milvus_types.ImportRequest( collection_name=collection_name, partition_name=partition_name, @@ -1010,7 +1341,7 @@ def do_bulk_insert(cls, collection_name: str, partition_name: str, files: list, req.channel_names.extend(channel_names) for k, v in kwargs.items(): - if k in ("bucket",): + if k in ("bucket", "backup", "sep", "nullkey"): kv_pair = common_types.KeyValuePair(key=str(k), value=str(v)) req.options.append(kv_pair) @@ -1020,7 +1351,7 @@ def do_bulk_insert(cls, collection_name: str, partition_name: str, files: list, def get_bulk_insert_state(cls, task_id: int): if task_id is None or not isinstance(task_id, int): msg = f"task_id value {task_id} is not an integer" - raise ParamError(msg) + raise ParamError(message=msg) return milvus_types.GetImportStateRequest(task=task_id) @@ -1028,7 +1359,7 @@ def get_bulk_insert_state(cls, task_id: int): def list_bulk_insert_tasks(cls, limit: int, collection_name: str): if limit is None or not isinstance(limit, int): msg = f"limit value {limit} is not an integer" - raise ParamError(msg) + raise ParamError(message=msg) return milvus_types.ListImportTasksRequest( collection_name=collection_name, @@ -1226,9 +1557,17 @@ def register_request(cls, user: str, host: str, **kwargs): ) @classmethod - def create_database_req(cls, db_name: str): + def create_database_req(cls, db_name: str, **kwargs): check_pass_param(db_name=db_name) - return milvus_types.CreateDatabaseRequest(db_name=db_name) + + req = milvus_types.CreateDatabaseRequest(db_name=db_name) + properties = kwargs.get("properties") + if is_legal_collection_properties(properties): + properties = [ + common_types.KeyValuePair(key=str(k), value=str(v)) for k, v in properties.items() + ] + req.properties.extend(properties) + return req @classmethod def drop_database_req(cls, db_name: str): @@ -1238,3 +1577,46 @@ def drop_database_req(cls, db_name: str): @classmethod def list_database_req(cls): return milvus_types.ListDatabasesRequest() + + @classmethod + def alter_database_req(cls, db_name: str, properties: Dict): + check_pass_param(db_name=db_name) + kvs = [common_types.KeyValuePair(key=k, value=str(v)) for k, v in properties.items()] + return milvus_types.AlterDatabaseRequest(db_name=db_name, properties=kvs) + + @classmethod + def describe_database_req(cls, db_name: str): + check_pass_param(db_name=db_name) + return milvus_types.DescribeDatabaseRequest(db_name=db_name) + + @classmethod + def create_privilege_group_req(cls, group_name: str): + check_pass_param(group_name=group_name) + return milvus_types.CreatePrivilegeGroupRequest(group_name=group_name) + + @classmethod + def drop_privilege_group_req(cls, group_name: str): + check_pass_param(group_name=group_name) + return milvus_types.DropPrivilegeGroupRequest(group_name=group_name) + + @classmethod + def list_privilege_groups_req(cls): + return milvus_types.ListPrivilegeGroupsRequest() + + @classmethod + def operate_privilege_group_req(cls, group_name: str, privileges: List[str], operate_type: Any): + check_pass_param(group_name=group_name) + check_pass_param(operate_type=operate_type) + if not isinstance( + privileges, + (list), + ): + msg = f"Privileges {privileges} is not a list" + raise ParamError(message=msg) + for p in privileges: + check_pass_param(privilege=p) + return milvus_types.OperatePrivilegeGroupRequest( + group_name=group_name, + privileges=[milvus_types.PrivilegeEntity(name=p) for p in privileges], + type=operate_type, + ) diff --git a/pymilvus/client/stub.py b/pymilvus/client/stub.py index e5b200d0e..62d82797b 100644 --- a/pymilvus/client/stub.py +++ b/pymilvus/client/stub.py @@ -1044,13 +1044,16 @@ def load_balance( **kwargs, ) - def compact(self, collection_name, timeout=None, **kwargs) -> int: + def compact(self, collection_name, is_clustering=False, timeout=None, **kwargs) -> int: """ Do compaction for the collection. :param collection_name: The collection name to compact :type collection_name: str + :param is_clustering: trigger clustering compaction + :type is_clustering: bool + :param timeout: The timeout for this method, unit: second :type timeout: int @@ -1060,15 +1063,22 @@ def compact(self, collection_name, timeout=None, **kwargs) -> int: :raises MilvusException: If collection name not exist. """ with self._connection() as handler: - return handler.compact(collection_name, timeout=timeout, **kwargs) + return handler.compact( + collection_name, is_clustering=is_clustering, timeout=timeout, **kwargs + ) - def get_compaction_state(self, compaction_id: int, timeout=None, **kwargs) -> CompactionState: + def get_compaction_state( + self, compaction_id: int, is_clustering=False, timeout=None, **kwargs + ) -> CompactionState: """ Get compaction states of a targeted compaction id :param compaction_id: the id returned by compact :type compaction_id: int + :param is_clustering: get clustering compaction + :type is_clustering: bool + :param timeout: The timeout for this method, unit: second :type timeout: int @@ -1079,7 +1089,9 @@ def get_compaction_state(self, compaction_id: int, timeout=None, **kwargs) -> Co """ with self._connection() as handler: - return handler.get_compaction_state(compaction_id, timeout=timeout, **kwargs) + return handler.get_compaction_state( + compaction_id, is_clustering=is_clustering, timeout=timeout, **kwargs + ) def wait_for_compaction_completed( self, compaction_id: int, timeout=None, **kwargs diff --git a/pymilvus/client/ts_utils.py b/pymilvus/client/ts_utils.py index c260aa4a8..78fc1309e 100644 --- a/pymilvus/client/ts_utils.py +++ b/pymilvus/client/ts_utils.py @@ -4,7 +4,7 @@ from pymilvus.grpc_gen import common_pb2 -from .constants import BOUNDED_TS, EVENTUALLY_TS +from .constants import BOUNDED_TS, EVENTUALLY_TS, GUARANTEE_TIMESTAMP, ITERATOR_FIELD from .singleton_utils import Singleton from .types import get_consistency_level from .utils import hybridts_to_unixtime @@ -75,26 +75,29 @@ def get_bounded_ts(): def construct_guarantee_ts(collection_name: str, kwargs: Dict): + if kwargs.get(ITERATOR_FIELD) is not None: + return True + consistency_level = kwargs.get("consistency_level") use_default = consistency_level is None if use_default: # in case of the default consistency is Customized or Session, # we set guarantee_timestamp to the cached mutation ts or 1 - kwargs["guarantee_timestamp"] = get_collection_ts(collection_name) or get_eventually_ts() + kwargs[GUARANTEE_TIMESTAMP] = get_collection_ts(collection_name) or get_eventually_ts() return True consistency_level = get_consistency_level(consistency_level) kwargs["consistency_level"] = consistency_level if consistency_level == ConsistencyLevel.Strong: # Milvus will assign a newest ts. - kwargs["guarantee_timestamp"] = 0 + kwargs[GUARANTEE_TIMESTAMP] = 0 elif consistency_level == ConsistencyLevel.Session: # Using the last write ts of the collection. # TODO: get a timestamp from server? - kwargs["guarantee_timestamp"] = get_collection_ts(collection_name) or get_eventually_ts() + kwargs[GUARANTEE_TIMESTAMP] = get_collection_ts(collection_name) or get_eventually_ts() elif consistency_level == ConsistencyLevel.Bounded: # Milvus will assign ts according to the server timestamp and a configured time interval - kwargs["guarantee_timestamp"] = get_bounded_ts() + kwargs[GUARANTEE_TIMESTAMP] = get_bounded_ts() else: # Users customize the consistency level, no modification on `guarantee_timestamp`. - kwargs.setdefault("guarantee_timestamp", get_eventually_ts()) + kwargs.setdefault(GUARANTEE_TIMESTAMP, get_eventually_ts()) return use_default diff --git a/pymilvus/client/types.py b/pymilvus/client/types.py index 628e21744..509fb6a39 100644 --- a/pymilvus/client/types.py +++ b/pymilvus/client/types.py @@ -14,6 +14,20 @@ ConsistencyLevel = common_pb2.ConsistencyLevel +# OmitZeroDict: ignore the key-value pairs with value as 0 when printing +class OmitZeroDict(dict): + def omit_zero_len(self): + return len(dict(filter(lambda x: x[1], self.items()))) + + # filter the key-value pairs with value as 0 + def __str__(self): + return str(dict(filter(lambda x: x[1], self.items()))) + + # no filter + def __repr__(self): + return str(dict(self)) + + class Status: """ :attribute code: int (optional) default as ok @@ -94,6 +108,12 @@ class DataType(IntEnum): UNKNOWN = 999 +class FunctionType(IntEnum): + UNKNOWN = 0 + BM25 = 1 + TEXTEMBEDDING = 2 + + class RangeType(IntEnum): LT = 0 # less than LTE = 1 # less than or equal @@ -133,7 +153,6 @@ class MetricType(IntEnum): HAMMING = 3 JACCARD = 4 TANIMOTO = 5 - # SUBSTRUCTURE = 6 SUPERSTRUCTURE = 7 @@ -160,6 +179,7 @@ class PlaceholderType(IntEnum): FLOAT16_VECTOR = 102 BFLOAT16_VECTOR = 103 SparseFloatVector = 104 + VARCHAR = 21 class State(IntEnum): @@ -230,6 +250,10 @@ def __init__( self.in_timeout = in_timeout self.completed = completed + @property + def state_name(self): + return self.state.name + def __repr__(self) -> str: return f""" CompactionState @@ -900,11 +924,13 @@ class ExtraList(list): def __init__(self, *args, extra: Optional[Dict] = None, **kwargs) -> None: super().__init__(*args, **kwargs) - self.extra = extra or {} + self.extra = OmitZeroDict(extra or {}) def __str__(self) -> str: """Only print at most 10 query results""" - return f"data: {list(map(str, self[:10]))} {'...' if len(self) else ''}, extra_info: {self.extra}" + if self.extra and self.extra.omit_zero_len() != 0: + return f"data: {list(map(str, self[:10]))} {'...' if len(self) > 10 else ''}, extra_info: {self.extra}" + return f"data: {list(map(str, self[:10]))} {'...' if len(self) > 10 else ''}" __repr__ = __str__ @@ -920,3 +946,32 @@ def get_cost_extra(status: Optional[common_pb2.Status] = None): # Construct extra dict, the cost unit is the vcu, similar to tokenlike the def construct_cost_extra(cost: int): return {"cost": cost} + + +class DatabaseInfo: + """ + Represents the information of a database. + Atributes: + name (str): The name of the database. + properties (dict): The properties of the database. + Example: + DatabaseInfo(name="test_db", id=1, properties={"key": "value"}) + """ + + @property + def name(self) -> str: + return self._name + + @property + def properties(self) -> Dict: + return self._properties + + def __init__(self, info: Any) -> None: + self._name = info.db_name + self._properties = {} + + for p in info.properties: + self.properties[p.key] = p.value + + def __str__(self) -> str: + return f"DatabaseInfo(name={self.name}, properties={self.properties})" diff --git a/pymilvus/client/utils.py b/pymilvus/client/utils.py index 7ecc35670..46bc8173f 100644 --- a/pymilvus/client/utils.py +++ b/pymilvus/client/utils.py @@ -1,6 +1,7 @@ import datetime +import importlib.util from datetime import timedelta -from typing import Any, List, Optional, Union +from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Union import ujson @@ -239,10 +240,6 @@ def traverse_rows_info(fields_info: Any, entities: List): message=f"dynamic field enabled, {field_name} shouldn't in entities[{j}]" ) - value = entity.get(field_name, None) - if value is None: - raise ParamError(message=f"Field {field_name} don't match in entities[{j}]") - # though impossible from sdk if primary_key_loc is None: raise ParamError(message="primary key not found") @@ -264,9 +261,117 @@ def traverse_info(fields_info: Any): return location, primary_key_loc, auto_id_loc +def traverse_upsert_info(fields_info: Any): + location, primary_key_loc = {}, None + for i, field in enumerate(fields_info): + if field.get("is_primary", False): + primary_key_loc = i + + location[field["name"]] = i + + return location, primary_key_loc + + def get_server_type(host: str): return ZILLIZ if (isinstance(host, str) and "zilliz" in host.lower()) else MILVUS def dumps(v: Union[dict, str]) -> str: return ujson.dumps(v) if isinstance(v, dict) else str(v) + + +class SciPyHelper: + _checked = False + + # whether scipy.sparse.*_matrix classes exists + _matrix_available = False + # whether scipy.sparse.*_array classes exists + _array_available = False + + @classmethod + def _init(cls): + if cls._checked: + return + scipy_spec = importlib.util.find_spec("scipy") + if scipy_spec is not None: + # when scipy is not installed, find_spec("scipy.sparse") directly + # throws exception instead of returning None. + sparse_spec = importlib.util.find_spec("scipy.sparse") + if sparse_spec is not None: + scipy_sparse = importlib.util.module_from_spec(sparse_spec) + sparse_spec.loader.exec_module(scipy_sparse) + # all scipy.sparse.*_matrix classes are introduced in the same scipy + # version, so we only need to check one of them. + cls._matrix_available = hasattr(scipy_sparse, "csr_matrix") + # all scipy.sparse.*_array classes are introduced in the same scipy + # version, so we only need to check one of them. + cls._array_available = hasattr(scipy_sparse, "csr_array") + + cls._checked = True + + @classmethod + def is_spmatrix(cls, data: Any): + cls._init() + if not cls._matrix_available: + return False + from scipy.sparse import isspmatrix + + return isspmatrix(data) + + @classmethod + def is_sparray(cls, data: Any): + cls._init() + if not cls._array_available: + return False + from scipy.sparse import issparse, isspmatrix + + return issparse(data) and not isspmatrix(data) + + @classmethod + def is_scipy_sparse(cls, data: Any): + return cls.is_spmatrix(data) or cls.is_sparray(data) + + +# in search results, if output fields includes a sparse float vector field, we +# will return a SparseRowOutputType for each entity. Using Dict for readability. +# TODO(SPARSE): to allow the user to specify output format. +SparseRowOutputType = Dict[int, float] + + +# this import will be called only during static type checking +if TYPE_CHECKING: + from scipy.sparse import ( + bsr_array, + coo_array, + csc_array, + csr_array, + dia_array, + dok_array, + lil_array, + spmatrix, + ) + +# we accept the following types as input for sparse matrix in user facing APIs +# such as insert, search, etc.: +# - scipy sparse array/matrix family: csr, csc, coo, bsr, dia, dok, lil +# - iterable of iterables, each element(iterable) is a sparse vector with index +# as key and value as float. +# dict example: [{2: 0.33, 98: 0.72, ...}, {4: 0.45, 198: 0.52, ...}, ...] +# list of tuple example: [[(2, 0.33), (98, 0.72), ...], [(4, 0.45), ...], ...] +# both index/value can be str numbers: {'2': '3.1'} +SparseMatrixInputType = Union[ + Iterable[ + Union[ + SparseRowOutputType, + Iterable[Tuple[int, float]], # only type hint, we accept int/float like types + ] + ], + "csc_array", + "coo_array", + "bsr_array", + "dia_array", + "dok_array", + "lil_array", + "csr_array", + "spmatrix", +] diff --git a/pymilvus/decorators.py b/pymilvus/decorators.py index e897c3405..a85132039 100644 --- a/pymilvus/decorators.py +++ b/pymilvus/decorators.py @@ -53,8 +53,8 @@ def wrapper(func: Any): def handler(*args, **kwargs): # This has to make sure every timeout parameter is passing # throught kwargs form as `timeout=10` - _timeout = kwargs.get("timeout", None) - _retry_times = kwargs.get("retry_times", None) + _timeout = kwargs.get("timeout") + _retry_times = kwargs.get("retry_times") _retry_on_rate_limit = kwargs.get("retry_on_rate_limit", True) retry_timeout = _timeout if _timeout is not None and isinstance(_timeout, int) else None @@ -96,7 +96,8 @@ def timeout(start_time: Optional[float] = None) -> bool: f"[{func.__name__}] retry:{counter}, cost: {back_off:.2f}s, " f"reason: <{e.__class__.__name__}: {e.code()}, {e.details()}>" ) - LOGGER.warning(WARNING_COLOR.format(retry_msg)) + # retry msg uses info level + LOGGER.info(retry_msg) time.sleep(back_off) back_off = min(back_off * back_off_multiplier, max_back_off) @@ -166,8 +167,8 @@ def tracing_request(): def wrapper(func: Callable): @functools.wraps(func) def handler(self: Callable, *args, **kwargs): - level = kwargs.get("log_level", None) - req_id = kwargs.get("client_request_id", None) + level = kwargs.get("log_level") + req_id = kwargs.get("client_request_id") if level: self.set_onetime_loglevel(level) if req_id: diff --git a/pymilvus/exceptions.py b/pymilvus/exceptions.py index ec91e3ae4..5505f5b7a 100644 --- a/pymilvus/exceptions.py +++ b/pymilvus/exceptions.py @@ -129,6 +129,10 @@ class FieldsTypeException(MilvusException): """Raise when fields is invalid""" +class FunctionsTypeException(MilvusException): + """Raise when functions are invalid""" + + class FieldTypeException(MilvusException): """Raise when one field is invalid""" @@ -141,10 +145,6 @@ class InvalidConsistencyLevel(MilvusException): """Raise when consistency level is invalid""" -class UpsertAutoIDTrueException(MilvusException): - """Raise when upsert autoID is true""" - - class ExceptionsMessage: NoHostPort = "connection configuration must contain 'host' and 'port'." HostType = "Type of 'host' must be str." @@ -198,6 +198,7 @@ class ExceptionsMessage: DataTypeInconsistent = ( "The Input data type is inconsistent with defined schema, please check it." ) + FieldDataInconsistent = "The Input data type is inconsistent with defined schema, {%s} field should be a %s, but got a {%s} instead." DataTypeNotSupport = "Data type is not support." DataLengthsInconsistent = "Arrays must all be same length." DataFrameInvalid = "Cannot infer schema from empty dataframe." @@ -207,13 +208,42 @@ class ExceptionsMessage: IndexNotExist = "Index doesn't exist." CollectionType = "The type of collection must be pymilvus.Collection." FieldsType = "The fields of schema must be type list." + FunctionsType = "The functions of collection must be type list." + FunctionIncorrectInputOutputType = "The type of function input and output must be str." + FunctionInvalidOutputField = ( + "The output field must not be primary key, partition key, clustering key." + ) + FunctionDuplicateInputs = "Duplicate input field names are not allowed in function." + FunctionDuplicateOutputs = "Duplicate output field names are not allowed in function." + FunctionCommonInputOutput = "Input and output field names must be different." + BM25FunctionIncorrectInputOutputCount = ( + "BM25 function must have exact 1 input and 1 output field." + ) + TextEmbeddingFunctionIncorrectInputOutputCount = ( + "TextEmbedding function must have exact 1 input and 1 output field." + ) + TextEmbeddingFunctionIncorrectInputFieldType = ( + "TextEmbedding function input field must be VARCHAR." + ) + TextEmbeddingFunctionIncorrectOutputFieldType = ( + "TextEmbedding function output field must be FLOAT_VECTOR." + ) + BM25FunctionIncorrectInputFieldType = "BM25 function input field must be VARCHAR." + BM25FunctionIncorrectOutputFieldType = "BM25 function output field must be SPARSE_FLOAT_VECTOR." + FunctionMissingInputField = "Function input field not found in collection schema." + FunctionMissingOutputField = "Function output field not found in collection schema." + UnknownFunctionType = "Unknown function type." + FunctionIncorrectType = "The function of schema type must be Function." FieldType = "The field of schema type must be FieldSchema." FieldDtype = "Field dtype must be of DataType" ExprType = "The type of expr must be string ,but %r is given." EnvConfigErr = "Environment variable %s has a wrong format, please check it: %s" AmbiguousIndexName = "There are multiple indexes, please specify the index_name." InsertUnexpectedField = ( - "Attempt to insert an unexpected field to collection without enabling dynamic field" + "Attempt to insert an unexpected field `%s` to collection without enabling dynamic field" + ) + InsertMissedField = ( + "Insert missed an field `%s` to collection without set nullable==true or set default_value" ) UpsertAutoIDTrue = "Upsert don't support autoid == true" AmbiguousDeleteFilterParam = ( @@ -223,7 +253,6 @@ class ExceptionsMessage: "Ambiguous parameter, either ids or filter should be specified, cannot support both." ) JSONKeyMustBeStr = "JSON key must be str." - ClusteringKeyNotPrimary = "Clustering key field should not be primary field" ClusteringKeyType = ( "Clustering key field type must be DataType.INT8, DataType.INT16, " "DataType.INT32, DataType.INT64, DataType.FLOAT, DataType.DOUBLE, " @@ -233,3 +262,7 @@ class ExceptionsMessage: ClusteringKeyOnlyOne = "Expected only one clustering key field, got [%s, %s, ...]." IsClusteringKeyType = "Param is_clustering_key must be bool type." ClusteringKeyFieldType = "Param clustering_key_field must be str type." + UpsertPrimaryKeyEmpty = "Upsert need to assign pk." + DefaultValueInvalid = ( + "Default value cannot be None for a field that is defined as nullable == false." + ) diff --git a/pymilvus/grpc_gen/common_pb2.py b/pymilvus/grpc_gen/common_pb2.py index 61e3b9f4e..7f7a1b3bf 100644 --- a/pymilvus/grpc_gen/common_pb2.py +++ b/pymilvus/grpc_gen/common_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: common.proto -# Protobuf Python Version: 4.25.0 +# Protobuf Python Version: 4.25.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool @@ -15,7 +15,7 @@ from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0c\x63ommon.proto\x12\x13milvus.proto.common\x1a google/protobuf/descriptor.proto\"\xf3\x01\n\x06Status\x12\x36\n\nerror_code\x18\x01 \x01(\x0e\x32\x1e.milvus.proto.common.ErrorCodeB\x02\x18\x01\x12\x0e\n\x06reason\x18\x02 \x01(\t\x12\x0c\n\x04\x63ode\x18\x03 \x01(\x05\x12\x11\n\tretriable\x18\x04 \x01(\x08\x12\x0e\n\x06\x64\x65tail\x18\x05 \x01(\t\x12>\n\nextra_info\x18\x06 \x03(\x0b\x32*.milvus.proto.common.Status.ExtraInfoEntry\x1a\x30\n\x0e\x45xtraInfoEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"*\n\x0cKeyValuePair\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"(\n\x0bKeyDataPair\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"\x15\n\x04\x42lob\x12\r\n\x05value\x18\x01 \x01(\x0c\"c\n\x10PlaceholderValue\x12\x0b\n\x03tag\x18\x01 \x01(\t\x12\x32\n\x04type\x18\x02 \x01(\x0e\x32$.milvus.proto.common.PlaceholderType\x12\x0e\n\x06values\x18\x03 \x03(\x0c\"O\n\x10PlaceholderGroup\x12;\n\x0cplaceholders\x18\x01 \x03(\x0b\x32%.milvus.proto.common.PlaceholderValue\"#\n\x07\x41\x64\x64ress\x12\n\n\x02ip\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\x03\"\xaf\x02\n\x07MsgBase\x12.\n\x08msg_type\x18\x01 \x01(\x0e\x32\x1c.milvus.proto.common.MsgType\x12\r\n\x05msgID\x18\x02 \x01(\x03\x12\x11\n\ttimestamp\x18\x03 \x01(\x04\x12\x10\n\x08sourceID\x18\x04 \x01(\x03\x12\x10\n\x08targetID\x18\x05 \x01(\x03\x12@\n\nproperties\x18\x06 \x03(\x0b\x32,.milvus.proto.common.MsgBase.PropertiesEntry\x12\x39\n\rreplicateInfo\x18\x07 \x01(\x0b\x32\".milvus.proto.common.ReplicateInfo\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\":\n\rReplicateInfo\x12\x13\n\x0bisReplicate\x18\x01 \x01(\x08\x12\x14\n\x0cmsgTimestamp\x18\x02 \x01(\x04\"7\n\tMsgHeader\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\"M\n\x0c\x44MLMsgHeader\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x11\n\tshardName\x18\x02 \x01(\t\"\xbb\x01\n\x0cPrivilegeExt\x12\x34\n\x0bobject_type\x18\x01 \x01(\x0e\x32\x1f.milvus.proto.common.ObjectType\x12>\n\x10object_privilege\x18\x02 \x01(\x0e\x32$.milvus.proto.common.ObjectPrivilege\x12\x19\n\x11object_name_index\x18\x03 \x01(\x05\x12\x1a\n\x12object_name_indexs\x18\x04 \x01(\x05\"2\n\x0cSegmentStats\x12\x11\n\tSegmentID\x18\x01 \x01(\x03\x12\x0f\n\x07NumRows\x18\x02 \x01(\x03\"\xd5\x01\n\nClientInfo\x12\x10\n\x08sdk_type\x18\x01 \x01(\t\x12\x13\n\x0bsdk_version\x18\x02 \x01(\t\x12\x12\n\nlocal_time\x18\x03 \x01(\t\x12\x0c\n\x04user\x18\x04 \x01(\t\x12\x0c\n\x04host\x18\x05 \x01(\t\x12?\n\x08reserved\x18\x06 \x03(\x0b\x32-.milvus.proto.common.ClientInfo.ReservedEntry\x1a/\n\rReservedEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xe3\x01\n\nServerInfo\x12\x12\n\nbuild_tags\x18\x01 \x01(\t\x12\x12\n\nbuild_time\x18\x02 \x01(\t\x12\x12\n\ngit_commit\x18\x03 \x01(\t\x12\x12\n\ngo_version\x18\x04 \x01(\t\x12\x13\n\x0b\x64\x65ploy_mode\x18\x05 \x01(\t\x12?\n\x08reserved\x18\x06 \x03(\x0b\x32-.milvus.proto.common.ServerInfo.ReservedEntry\x1a/\n\rReservedEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\">\n\x08NodeInfo\x12\x0f\n\x07node_id\x18\x01 \x01(\x03\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\t\x12\x10\n\x08hostname\x18\x03 \x01(\t*\xc7\n\n\tErrorCode\x12\x0b\n\x07Success\x10\x00\x12\x13\n\x0fUnexpectedError\x10\x01\x12\x11\n\rConnectFailed\x10\x02\x12\x14\n\x10PermissionDenied\x10\x03\x12\x17\n\x13\x43ollectionNotExists\x10\x04\x12\x13\n\x0fIllegalArgument\x10\x05\x12\x14\n\x10IllegalDimension\x10\x07\x12\x14\n\x10IllegalIndexType\x10\x08\x12\x19\n\x15IllegalCollectionName\x10\t\x12\x0f\n\x0bIllegalTOPK\x10\n\x12\x14\n\x10IllegalRowRecord\x10\x0b\x12\x13\n\x0fIllegalVectorID\x10\x0c\x12\x17\n\x13IllegalSearchResult\x10\r\x12\x10\n\x0c\x46ileNotFound\x10\x0e\x12\x0e\n\nMetaFailed\x10\x0f\x12\x0f\n\x0b\x43\x61\x63heFailed\x10\x10\x12\x16\n\x12\x43\x61nnotCreateFolder\x10\x11\x12\x14\n\x10\x43\x61nnotCreateFile\x10\x12\x12\x16\n\x12\x43\x61nnotDeleteFolder\x10\x13\x12\x14\n\x10\x43\x61nnotDeleteFile\x10\x14\x12\x13\n\x0f\x42uildIndexError\x10\x15\x12\x10\n\x0cIllegalNLIST\x10\x16\x12\x15\n\x11IllegalMetricType\x10\x17\x12\x0f\n\x0bOutOfMemory\x10\x18\x12\x11\n\rIndexNotExist\x10\x19\x12\x13\n\x0f\x45mptyCollection\x10\x1a\x12\x1b\n\x17UpdateImportTaskFailure\x10\x1b\x12\x1a\n\x16\x43ollectionNameNotFound\x10\x1c\x12\x1b\n\x17\x43reateCredentialFailure\x10\x1d\x12\x1b\n\x17UpdateCredentialFailure\x10\x1e\x12\x1b\n\x17\x44\x65leteCredentialFailure\x10\x1f\x12\x18\n\x14GetCredentialFailure\x10 \x12\x18\n\x14ListCredUsersFailure\x10!\x12\x12\n\x0eGetUserFailure\x10\"\x12\x15\n\x11\x43reateRoleFailure\x10#\x12\x13\n\x0f\x44ropRoleFailure\x10$\x12\x1a\n\x16OperateUserRoleFailure\x10%\x12\x15\n\x11SelectRoleFailure\x10&\x12\x15\n\x11SelectUserFailure\x10\'\x12\x19\n\x15SelectResourceFailure\x10(\x12\x1b\n\x17OperatePrivilegeFailure\x10)\x12\x16\n\x12SelectGrantFailure\x10*\x12!\n\x1dRefreshPolicyInfoCacheFailure\x10+\x12\x15\n\x11ListPolicyFailure\x10,\x12\x12\n\x0eNotShardLeader\x10-\x12\x16\n\x12NoReplicaAvailable\x10.\x12\x13\n\x0fSegmentNotFound\x10/\x12\r\n\tForceDeny\x10\x30\x12\r\n\tRateLimit\x10\x31\x12\x12\n\x0eNodeIDNotMatch\x10\x32\x12\x14\n\x10UpsertAutoIDTrue\x10\x33\x12\x1c\n\x18InsufficientMemoryToLoad\x10\x34\x12\x18\n\x14MemoryQuotaExhausted\x10\x35\x12\x16\n\x12\x44iskQuotaExhausted\x10\x36\x12\x15\n\x11TimeTickLongDelay\x10\x37\x12\x11\n\rNotReadyServe\x10\x38\x12\x1b\n\x17NotReadyCoordActivating\x10\x39\x12\x0f\n\x0b\x44\x61taCoordNA\x10\x64\x12\x12\n\rDDRequestRace\x10\xe8\x07\x1a\x02\x18\x01*c\n\nIndexState\x12\x12\n\x0eIndexStateNone\x10\x00\x12\x0c\n\x08Unissued\x10\x01\x12\x0e\n\nInProgress\x10\x02\x12\x0c\n\x08\x46inished\x10\x03\x12\n\n\x06\x46\x61iled\x10\x04\x12\t\n\x05Retry\x10\x05*\x82\x01\n\x0cSegmentState\x12\x14\n\x10SegmentStateNone\x10\x00\x12\x0c\n\x08NotExist\x10\x01\x12\x0b\n\x07Growing\x10\x02\x12\n\n\x06Sealed\x10\x03\x12\x0b\n\x07\x46lushed\x10\x04\x12\x0c\n\x08\x46lushing\x10\x05\x12\x0b\n\x07\x44ropped\x10\x06\x12\r\n\tImporting\x10\x07*\x94\x01\n\x0fPlaceholderType\x12\x08\n\x04None\x10\x00\x12\x10\n\x0c\x42inaryVector\x10\x64\x12\x0f\n\x0b\x46loatVector\x10\x65\x12\x11\n\rFloat16Vector\x10\x66\x12\x12\n\x0e\x42\x46loat16Vector\x10g\x12\x15\n\x11SparseFloatVector\x10h\x12\t\n\x05Int64\x10\x05\x12\x0b\n\x07VarChar\x10\x15*\xe0\x10\n\x07MsgType\x12\r\n\tUndefined\x10\x00\x12\x14\n\x10\x43reateCollection\x10\x64\x12\x12\n\x0e\x44ropCollection\x10\x65\x12\x11\n\rHasCollection\x10\x66\x12\x16\n\x12\x44\x65scribeCollection\x10g\x12\x13\n\x0fShowCollections\x10h\x12\x14\n\x10GetSystemConfigs\x10i\x12\x12\n\x0eLoadCollection\x10j\x12\x15\n\x11ReleaseCollection\x10k\x12\x0f\n\x0b\x43reateAlias\x10l\x12\r\n\tDropAlias\x10m\x12\x0e\n\nAlterAlias\x10n\x12\x13\n\x0f\x41lterCollection\x10o\x12\x14\n\x10RenameCollection\x10p\x12\x11\n\rDescribeAlias\x10q\x12\x0f\n\x0bListAliases\x10r\x12\x14\n\x0f\x43reatePartition\x10\xc8\x01\x12\x12\n\rDropPartition\x10\xc9\x01\x12\x11\n\x0cHasPartition\x10\xca\x01\x12\x16\n\x11\x44\x65scribePartition\x10\xcb\x01\x12\x13\n\x0eShowPartitions\x10\xcc\x01\x12\x13\n\x0eLoadPartitions\x10\xcd\x01\x12\x16\n\x11ReleasePartitions\x10\xce\x01\x12\x11\n\x0cShowSegments\x10\xfa\x01\x12\x14\n\x0f\x44\x65scribeSegment\x10\xfb\x01\x12\x11\n\x0cLoadSegments\x10\xfc\x01\x12\x14\n\x0fReleaseSegments\x10\xfd\x01\x12\x14\n\x0fHandoffSegments\x10\xfe\x01\x12\x18\n\x13LoadBalanceSegments\x10\xff\x01\x12\x15\n\x10\x44\x65scribeSegments\x10\x80\x02\x12\x1c\n\x17\x46\x65\x64\x65rListIndexedSegment\x10\x81\x02\x12\"\n\x1d\x46\x65\x64\x65rDescribeSegmentIndexData\x10\x82\x02\x12\x10\n\x0b\x43reateIndex\x10\xac\x02\x12\x12\n\rDescribeIndex\x10\xad\x02\x12\x0e\n\tDropIndex\x10\xae\x02\x12\x17\n\x12GetIndexStatistics\x10\xaf\x02\x12\x0f\n\nAlterIndex\x10\xb0\x02\x12\x0b\n\x06Insert\x10\x90\x03\x12\x0b\n\x06\x44\x65lete\x10\x91\x03\x12\n\n\x05\x46lush\x10\x92\x03\x12\x17\n\x12ResendSegmentStats\x10\x93\x03\x12\x0b\n\x06Upsert\x10\x94\x03\x12\x0b\n\x06Search\x10\xf4\x03\x12\x11\n\x0cSearchResult\x10\xf5\x03\x12\x12\n\rGetIndexState\x10\xf6\x03\x12\x1a\n\x15GetIndexBuildProgress\x10\xf7\x03\x12\x1c\n\x17GetCollectionStatistics\x10\xf8\x03\x12\x1b\n\x16GetPartitionStatistics\x10\xf9\x03\x12\r\n\x08Retrieve\x10\xfa\x03\x12\x13\n\x0eRetrieveResult\x10\xfb\x03\x12\x14\n\x0fWatchDmChannels\x10\xfc\x03\x12\x15\n\x10RemoveDmChannels\x10\xfd\x03\x12\x17\n\x12WatchQueryChannels\x10\xfe\x03\x12\x18\n\x13RemoveQueryChannels\x10\xff\x03\x12\x1d\n\x18SealedSegmentsChangeInfo\x10\x80\x04\x12\x17\n\x12WatchDeltaChannels\x10\x81\x04\x12\x14\n\x0fGetShardLeaders\x10\x82\x04\x12\x10\n\x0bGetReplicas\x10\x83\x04\x12\x13\n\x0eUnsubDmChannel\x10\x84\x04\x12\x14\n\x0fGetDistribution\x10\x85\x04\x12\x15\n\x10SyncDistribution\x10\x86\x04\x12\x10\n\x0bSegmentInfo\x10\xd8\x04\x12\x0f\n\nSystemInfo\x10\xd9\x04\x12\x14\n\x0fGetRecoveryInfo\x10\xda\x04\x12\x14\n\x0fGetSegmentState\x10\xdb\x04\x12\r\n\x08TimeTick\x10\xb0\t\x12\x13\n\x0eQueryNodeStats\x10\xb1\t\x12\x0e\n\tLoadIndex\x10\xb2\t\x12\x0e\n\tRequestID\x10\xb3\t\x12\x0f\n\nRequestTSO\x10\xb4\t\x12\x14\n\x0f\x41llocateSegment\x10\xb5\t\x12\x16\n\x11SegmentStatistics\x10\xb6\t\x12\x15\n\x10SegmentFlushDone\x10\xb7\t\x12\x0f\n\nDataNodeTt\x10\xb8\t\x12\x0c\n\x07\x43onnect\x10\xb9\t\x12\x14\n\x0fListClientInfos\x10\xba\t\x12\x13\n\x0e\x41llocTimestamp\x10\xbb\t\x12\x15\n\x10\x43reateCredential\x10\xdc\x0b\x12\x12\n\rGetCredential\x10\xdd\x0b\x12\x15\n\x10\x44\x65leteCredential\x10\xde\x0b\x12\x15\n\x10UpdateCredential\x10\xdf\x0b\x12\x16\n\x11ListCredUsernames\x10\xe0\x0b\x12\x0f\n\nCreateRole\x10\xc0\x0c\x12\r\n\x08\x44ropRole\x10\xc1\x0c\x12\x14\n\x0fOperateUserRole\x10\xc2\x0c\x12\x0f\n\nSelectRole\x10\xc3\x0c\x12\x0f\n\nSelectUser\x10\xc4\x0c\x12\x13\n\x0eSelectResource\x10\xc5\x0c\x12\x15\n\x10OperatePrivilege\x10\xc6\x0c\x12\x10\n\x0bSelectGrant\x10\xc7\x0c\x12\x1b\n\x16RefreshPolicyInfoCache\x10\xc8\x0c\x12\x0f\n\nListPolicy\x10\xc9\x0c\x12\x18\n\x13\x43reateResourceGroup\x10\xa4\r\x12\x16\n\x11\x44ropResourceGroup\x10\xa5\r\x12\x17\n\x12ListResourceGroups\x10\xa6\r\x12\x1a\n\x15\x44\x65scribeResourceGroup\x10\xa7\r\x12\x11\n\x0cTransferNode\x10\xa8\r\x12\x14\n\x0fTransferReplica\x10\xa9\r\x12\x19\n\x14UpdateResourceGroups\x10\xaa\r\x12\x13\n\x0e\x43reateDatabase\x10\x89\x0e\x12\x11\n\x0c\x44ropDatabase\x10\x8a\x0e\x12\x12\n\rListDatabases\x10\x8b\x0e*\"\n\x07\x44slType\x12\x07\n\x03\x44sl\x10\x00\x12\x0e\n\nBoolExprV1\x10\x01*B\n\x0f\x43ompactionState\x12\x11\n\rUndefiedState\x10\x00\x12\r\n\tExecuting\x10\x01\x12\r\n\tCompleted\x10\x02*X\n\x10\x43onsistencyLevel\x12\n\n\x06Strong\x10\x00\x12\x0b\n\x07Session\x10\x01\x12\x0b\n\x07\x42ounded\x10\x02\x12\x0e\n\nEventually\x10\x03\x12\x0e\n\nCustomized\x10\x04*\x9e\x01\n\x0bImportState\x12\x11\n\rImportPending\x10\x00\x12\x10\n\x0cImportFailed\x10\x01\x12\x11\n\rImportStarted\x10\x02\x12\x13\n\x0fImportPersisted\x10\x05\x12\x11\n\rImportFlushed\x10\x08\x12\x13\n\x0fImportCompleted\x10\x06\x12\x1a\n\x16ImportFailedAndCleaned\x10\x07*2\n\nObjectType\x12\x0e\n\nCollection\x10\x00\x12\n\n\x06Global\x10\x01\x12\x08\n\x04User\x10\x02*\xd6\n\n\x0fObjectPrivilege\x12\x10\n\x0cPrivilegeAll\x10\x00\x12\x1d\n\x19PrivilegeCreateCollection\x10\x01\x12\x1b\n\x17PrivilegeDropCollection\x10\x02\x12\x1f\n\x1bPrivilegeDescribeCollection\x10\x03\x12\x1c\n\x18PrivilegeShowCollections\x10\x04\x12\x11\n\rPrivilegeLoad\x10\x05\x12\x14\n\x10PrivilegeRelease\x10\x06\x12\x17\n\x13PrivilegeCompaction\x10\x07\x12\x13\n\x0fPrivilegeInsert\x10\x08\x12\x13\n\x0fPrivilegeDelete\x10\t\x12\x1a\n\x16PrivilegeGetStatistics\x10\n\x12\x18\n\x14PrivilegeCreateIndex\x10\x0b\x12\x18\n\x14PrivilegeIndexDetail\x10\x0c\x12\x16\n\x12PrivilegeDropIndex\x10\r\x12\x13\n\x0fPrivilegeSearch\x10\x0e\x12\x12\n\x0ePrivilegeFlush\x10\x0f\x12\x12\n\x0ePrivilegeQuery\x10\x10\x12\x18\n\x14PrivilegeLoadBalance\x10\x11\x12\x13\n\x0fPrivilegeImport\x10\x12\x12\x1c\n\x18PrivilegeCreateOwnership\x10\x13\x12\x17\n\x13PrivilegeUpdateUser\x10\x14\x12\x1a\n\x16PrivilegeDropOwnership\x10\x15\x12\x1c\n\x18PrivilegeSelectOwnership\x10\x16\x12\x1c\n\x18PrivilegeManageOwnership\x10\x17\x12\x17\n\x13PrivilegeSelectUser\x10\x18\x12\x13\n\x0fPrivilegeUpsert\x10\x19\x12 \n\x1cPrivilegeCreateResourceGroup\x10\x1a\x12\x1e\n\x1aPrivilegeDropResourceGroup\x10\x1b\x12\"\n\x1ePrivilegeDescribeResourceGroup\x10\x1c\x12\x1f\n\x1bPrivilegeListResourceGroups\x10\x1d\x12\x19\n\x15PrivilegeTransferNode\x10\x1e\x12\x1c\n\x18PrivilegeTransferReplica\x10\x1f\x12\x1f\n\x1bPrivilegeGetLoadingProgress\x10 \x12\x19\n\x15PrivilegeGetLoadState\x10!\x12\x1d\n\x19PrivilegeRenameCollection\x10\"\x12\x1b\n\x17PrivilegeCreateDatabase\x10#\x12\x19\n\x15PrivilegeDropDatabase\x10$\x12\x1a\n\x16PrivilegeListDatabases\x10%\x12\x15\n\x11PrivilegeFlushAll\x10&\x12\x1c\n\x18PrivilegeCreatePartition\x10\'\x12\x1a\n\x16PrivilegeDropPartition\x10(\x12\x1b\n\x17PrivilegeShowPartitions\x10)\x12\x19\n\x15PrivilegeHasPartition\x10*\x12\x1a\n\x16PrivilegeGetFlushState\x10+\x12\x18\n\x14PrivilegeCreateAlias\x10,\x12\x16\n\x12PrivilegeDropAlias\x10-\x12\x1a\n\x16PrivilegeDescribeAlias\x10.\x12\x18\n\x14PrivilegeListAliases\x10/\x12!\n\x1dPrivilegeUpdateResourceGroups\x10\x30\x12\x1a\n\x16PrivilegeAlterDatabase\x10\x31*S\n\tStateCode\x12\x10\n\x0cInitializing\x10\x00\x12\x0b\n\x07Healthy\x10\x01\x12\x0c\n\x08\x41\x62normal\x10\x02\x12\x0b\n\x07StandBy\x10\x03\x12\x0c\n\x08Stopping\x10\x04*c\n\tLoadState\x12\x15\n\x11LoadStateNotExist\x10\x00\x12\x14\n\x10LoadStateNotLoad\x10\x01\x12\x14\n\x10LoadStateLoading\x10\x02\x12\x13\n\x0fLoadStateLoaded\x10\x03:^\n\x11privilege_ext_obj\x12\x1f.google.protobuf.MessageOptions\x18\xe9\x07 \x01(\x0b\x32!.milvus.proto.common.PrivilegeExtBm\n\x0eio.milvus.grpcB\x0b\x43ommonProtoP\x01Z4github.com/milvus-io/milvus-proto/go-api/v2/commonpb\xa0\x01\x01\xaa\x02\x12Milvus.Client.Grpcb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0c\x63ommon.proto\x12\x13milvus.proto.common\x1a google/protobuf/descriptor.proto\"\xf3\x01\n\x06Status\x12\x36\n\nerror_code\x18\x01 \x01(\x0e\x32\x1e.milvus.proto.common.ErrorCodeB\x02\x18\x01\x12\x0e\n\x06reason\x18\x02 \x01(\t\x12\x0c\n\x04\x63ode\x18\x03 \x01(\x05\x12\x11\n\tretriable\x18\x04 \x01(\x08\x12\x0e\n\x06\x64\x65tail\x18\x05 \x01(\t\x12>\n\nextra_info\x18\x06 \x03(\x0b\x32*.milvus.proto.common.Status.ExtraInfoEntry\x1a\x30\n\x0e\x45xtraInfoEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"*\n\x0cKeyValuePair\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"(\n\x0bKeyDataPair\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"\x15\n\x04\x42lob\x12\r\n\x05value\x18\x01 \x01(\x0c\"c\n\x10PlaceholderValue\x12\x0b\n\x03tag\x18\x01 \x01(\t\x12\x32\n\x04type\x18\x02 \x01(\x0e\x32$.milvus.proto.common.PlaceholderType\x12\x0e\n\x06values\x18\x03 \x03(\x0c\"O\n\x10PlaceholderGroup\x12;\n\x0cplaceholders\x18\x01 \x03(\x0b\x32%.milvus.proto.common.PlaceholderValue\"#\n\x07\x41\x64\x64ress\x12\n\n\x02ip\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\x03\"\xaf\x02\n\x07MsgBase\x12.\n\x08msg_type\x18\x01 \x01(\x0e\x32\x1c.milvus.proto.common.MsgType\x12\r\n\x05msgID\x18\x02 \x01(\x03\x12\x11\n\ttimestamp\x18\x03 \x01(\x04\x12\x10\n\x08sourceID\x18\x04 \x01(\x03\x12\x10\n\x08targetID\x18\x05 \x01(\x03\x12@\n\nproperties\x18\x06 \x03(\x0b\x32,.milvus.proto.common.MsgBase.PropertiesEntry\x12\x39\n\rreplicateInfo\x18\x07 \x01(\x0b\x32\".milvus.proto.common.ReplicateInfo\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\":\n\rReplicateInfo\x12\x13\n\x0bisReplicate\x18\x01 \x01(\x08\x12\x14\n\x0cmsgTimestamp\x18\x02 \x01(\x04\"7\n\tMsgHeader\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\"M\n\x0c\x44MLMsgHeader\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x11\n\tshardName\x18\x02 \x01(\t\"\xbb\x01\n\x0cPrivilegeExt\x12\x34\n\x0bobject_type\x18\x01 \x01(\x0e\x32\x1f.milvus.proto.common.ObjectType\x12>\n\x10object_privilege\x18\x02 \x01(\x0e\x32$.milvus.proto.common.ObjectPrivilege\x12\x19\n\x11object_name_index\x18\x03 \x01(\x05\x12\x1a\n\x12object_name_indexs\x18\x04 \x01(\x05\"2\n\x0cSegmentStats\x12\x11\n\tSegmentID\x18\x01 \x01(\x03\x12\x0f\n\x07NumRows\x18\x02 \x01(\x03\"\xd5\x01\n\nClientInfo\x12\x10\n\x08sdk_type\x18\x01 \x01(\t\x12\x13\n\x0bsdk_version\x18\x02 \x01(\t\x12\x12\n\nlocal_time\x18\x03 \x01(\t\x12\x0c\n\x04user\x18\x04 \x01(\t\x12\x0c\n\x04host\x18\x05 \x01(\t\x12?\n\x08reserved\x18\x06 \x03(\x0b\x32-.milvus.proto.common.ClientInfo.ReservedEntry\x1a/\n\rReservedEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xe3\x01\n\nServerInfo\x12\x12\n\nbuild_tags\x18\x01 \x01(\t\x12\x12\n\nbuild_time\x18\x02 \x01(\t\x12\x12\n\ngit_commit\x18\x03 \x01(\t\x12\x12\n\ngo_version\x18\x04 \x01(\t\x12\x13\n\x0b\x64\x65ploy_mode\x18\x05 \x01(\t\x12?\n\x08reserved\x18\x06 \x03(\x0b\x32-.milvus.proto.common.ServerInfo.ReservedEntry\x1a/\n\rReservedEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\">\n\x08NodeInfo\x12\x0f\n\x07node_id\x18\x01 \x01(\x03\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\t\x12\x10\n\x08hostname\x18\x03 \x01(\t*\xc9\x0b\n\tErrorCode\x12\x0b\n\x07Success\x10\x00\x12\x13\n\x0fUnexpectedError\x10\x01\x12\x11\n\rConnectFailed\x10\x02\x12\x14\n\x10PermissionDenied\x10\x03\x12\x17\n\x13\x43ollectionNotExists\x10\x04\x12\x13\n\x0fIllegalArgument\x10\x05\x12\x14\n\x10IllegalDimension\x10\x07\x12\x14\n\x10IllegalIndexType\x10\x08\x12\x19\n\x15IllegalCollectionName\x10\t\x12\x0f\n\x0bIllegalTOPK\x10\n\x12\x14\n\x10IllegalRowRecord\x10\x0b\x12\x13\n\x0fIllegalVectorID\x10\x0c\x12\x17\n\x13IllegalSearchResult\x10\r\x12\x10\n\x0c\x46ileNotFound\x10\x0e\x12\x0e\n\nMetaFailed\x10\x0f\x12\x0f\n\x0b\x43\x61\x63heFailed\x10\x10\x12\x16\n\x12\x43\x61nnotCreateFolder\x10\x11\x12\x14\n\x10\x43\x61nnotCreateFile\x10\x12\x12\x16\n\x12\x43\x61nnotDeleteFolder\x10\x13\x12\x14\n\x10\x43\x61nnotDeleteFile\x10\x14\x12\x13\n\x0f\x42uildIndexError\x10\x15\x12\x10\n\x0cIllegalNLIST\x10\x16\x12\x15\n\x11IllegalMetricType\x10\x17\x12\x0f\n\x0bOutOfMemory\x10\x18\x12\x11\n\rIndexNotExist\x10\x19\x12\x13\n\x0f\x45mptyCollection\x10\x1a\x12\x1b\n\x17UpdateImportTaskFailure\x10\x1b\x12\x1a\n\x16\x43ollectionNameNotFound\x10\x1c\x12\x1b\n\x17\x43reateCredentialFailure\x10\x1d\x12\x1b\n\x17UpdateCredentialFailure\x10\x1e\x12\x1b\n\x17\x44\x65leteCredentialFailure\x10\x1f\x12\x18\n\x14GetCredentialFailure\x10 \x12\x18\n\x14ListCredUsersFailure\x10!\x12\x12\n\x0eGetUserFailure\x10\"\x12\x15\n\x11\x43reateRoleFailure\x10#\x12\x13\n\x0f\x44ropRoleFailure\x10$\x12\x1a\n\x16OperateUserRoleFailure\x10%\x12\x15\n\x11SelectRoleFailure\x10&\x12\x15\n\x11SelectUserFailure\x10\'\x12\x19\n\x15SelectResourceFailure\x10(\x12\x1b\n\x17OperatePrivilegeFailure\x10)\x12\x16\n\x12SelectGrantFailure\x10*\x12!\n\x1dRefreshPolicyInfoCacheFailure\x10+\x12\x15\n\x11ListPolicyFailure\x10,\x12\x12\n\x0eNotShardLeader\x10-\x12\x16\n\x12NoReplicaAvailable\x10.\x12\x13\n\x0fSegmentNotFound\x10/\x12\r\n\tForceDeny\x10\x30\x12\r\n\tRateLimit\x10\x31\x12\x12\n\x0eNodeIDNotMatch\x10\x32\x12\x14\n\x10UpsertAutoIDTrue\x10\x33\x12\x1c\n\x18InsufficientMemoryToLoad\x10\x34\x12\x18\n\x14MemoryQuotaExhausted\x10\x35\x12\x16\n\x12\x44iskQuotaExhausted\x10\x36\x12\x15\n\x11TimeTickLongDelay\x10\x37\x12\x11\n\rNotReadyServe\x10\x38\x12\x1b\n\x17NotReadyCoordActivating\x10\x39\x12\x1f\n\x1b\x43reatePrivilegeGroupFailure\x10:\x12\x1d\n\x19\x44ropPrivilegeGroupFailure\x10;\x12\x1e\n\x1aListPrivilegeGroupsFailure\x10<\x12 \n\x1cOperatePrivilegeGroupFailure\x10=\x12\x0f\n\x0b\x44\x61taCoordNA\x10\x64\x12\x12\n\rDDRequestRace\x10\xe8\x07\x1a\x02\x18\x01*c\n\nIndexState\x12\x12\n\x0eIndexStateNone\x10\x00\x12\x0c\n\x08Unissued\x10\x01\x12\x0e\n\nInProgress\x10\x02\x12\x0c\n\x08\x46inished\x10\x03\x12\n\n\x06\x46\x61iled\x10\x04\x12\t\n\x05Retry\x10\x05*\x82\x01\n\x0cSegmentState\x12\x14\n\x10SegmentStateNone\x10\x00\x12\x0c\n\x08NotExist\x10\x01\x12\x0b\n\x07Growing\x10\x02\x12\n\n\x06Sealed\x10\x03\x12\x0b\n\x07\x46lushed\x10\x04\x12\x0c\n\x08\x46lushing\x10\x05\x12\x0b\n\x07\x44ropped\x10\x06\x12\r\n\tImporting\x10\x07*2\n\x0cSegmentLevel\x12\n\n\x06Legacy\x10\x00\x12\x06\n\x02L0\x10\x01\x12\x06\n\x02L1\x10\x02\x12\x06\n\x02L2\x10\x03*\x94\x01\n\x0fPlaceholderType\x12\x08\n\x04None\x10\x00\x12\x10\n\x0c\x42inaryVector\x10\x64\x12\x0f\n\x0b\x46loatVector\x10\x65\x12\x11\n\rFloat16Vector\x10\x66\x12\x12\n\x0e\x42\x46loat16Vector\x10g\x12\x15\n\x11SparseFloatVector\x10h\x12\t\n\x05Int64\x10\x05\x12\x0b\n\x07VarChar\x10\x15*\xae\x12\n\x07MsgType\x12\r\n\tUndefined\x10\x00\x12\x14\n\x10\x43reateCollection\x10\x64\x12\x12\n\x0e\x44ropCollection\x10\x65\x12\x11\n\rHasCollection\x10\x66\x12\x16\n\x12\x44\x65scribeCollection\x10g\x12\x13\n\x0fShowCollections\x10h\x12\x14\n\x10GetSystemConfigs\x10i\x12\x12\n\x0eLoadCollection\x10j\x12\x15\n\x11ReleaseCollection\x10k\x12\x0f\n\x0b\x43reateAlias\x10l\x12\r\n\tDropAlias\x10m\x12\x0e\n\nAlterAlias\x10n\x12\x13\n\x0f\x41lterCollection\x10o\x12\x14\n\x10RenameCollection\x10p\x12\x11\n\rDescribeAlias\x10q\x12\x0f\n\x0bListAliases\x10r\x12\x14\n\x0f\x43reatePartition\x10\xc8\x01\x12\x12\n\rDropPartition\x10\xc9\x01\x12\x11\n\x0cHasPartition\x10\xca\x01\x12\x16\n\x11\x44\x65scribePartition\x10\xcb\x01\x12\x13\n\x0eShowPartitions\x10\xcc\x01\x12\x13\n\x0eLoadPartitions\x10\xcd\x01\x12\x16\n\x11ReleasePartitions\x10\xce\x01\x12\x11\n\x0cShowSegments\x10\xfa\x01\x12\x14\n\x0f\x44\x65scribeSegment\x10\xfb\x01\x12\x11\n\x0cLoadSegments\x10\xfc\x01\x12\x14\n\x0fReleaseSegments\x10\xfd\x01\x12\x14\n\x0fHandoffSegments\x10\xfe\x01\x12\x18\n\x13LoadBalanceSegments\x10\xff\x01\x12\x15\n\x10\x44\x65scribeSegments\x10\x80\x02\x12\x1c\n\x17\x46\x65\x64\x65rListIndexedSegment\x10\x81\x02\x12\"\n\x1d\x46\x65\x64\x65rDescribeSegmentIndexData\x10\x82\x02\x12\x10\n\x0b\x43reateIndex\x10\xac\x02\x12\x12\n\rDescribeIndex\x10\xad\x02\x12\x0e\n\tDropIndex\x10\xae\x02\x12\x17\n\x12GetIndexStatistics\x10\xaf\x02\x12\x0f\n\nAlterIndex\x10\xb0\x02\x12\x0b\n\x06Insert\x10\x90\x03\x12\x0b\n\x06\x44\x65lete\x10\x91\x03\x12\n\n\x05\x46lush\x10\x92\x03\x12\x17\n\x12ResendSegmentStats\x10\x93\x03\x12\x0b\n\x06Upsert\x10\x94\x03\x12\x10\n\x0bManualFlush\x10\x95\x03\x12\x11\n\x0c\x46lushSegment\x10\x96\x03\x12\x12\n\rCreateSegment\x10\x97\x03\x12\x0b\n\x06Search\x10\xf4\x03\x12\x11\n\x0cSearchResult\x10\xf5\x03\x12\x12\n\rGetIndexState\x10\xf6\x03\x12\x1a\n\x15GetIndexBuildProgress\x10\xf7\x03\x12\x1c\n\x17GetCollectionStatistics\x10\xf8\x03\x12\x1b\n\x16GetPartitionStatistics\x10\xf9\x03\x12\r\n\x08Retrieve\x10\xfa\x03\x12\x13\n\x0eRetrieveResult\x10\xfb\x03\x12\x14\n\x0fWatchDmChannels\x10\xfc\x03\x12\x15\n\x10RemoveDmChannels\x10\xfd\x03\x12\x17\n\x12WatchQueryChannels\x10\xfe\x03\x12\x18\n\x13RemoveQueryChannels\x10\xff\x03\x12\x1d\n\x18SealedSegmentsChangeInfo\x10\x80\x04\x12\x17\n\x12WatchDeltaChannels\x10\x81\x04\x12\x14\n\x0fGetShardLeaders\x10\x82\x04\x12\x10\n\x0bGetReplicas\x10\x83\x04\x12\x13\n\x0eUnsubDmChannel\x10\x84\x04\x12\x14\n\x0fGetDistribution\x10\x85\x04\x12\x15\n\x10SyncDistribution\x10\x86\x04\x12\x10\n\x0bSegmentInfo\x10\xd8\x04\x12\x0f\n\nSystemInfo\x10\xd9\x04\x12\x14\n\x0fGetRecoveryInfo\x10\xda\x04\x12\x14\n\x0fGetSegmentState\x10\xdb\x04\x12\r\n\x08TimeTick\x10\xb0\t\x12\x13\n\x0eQueryNodeStats\x10\xb1\t\x12\x0e\n\tLoadIndex\x10\xb2\t\x12\x0e\n\tRequestID\x10\xb3\t\x12\x0f\n\nRequestTSO\x10\xb4\t\x12\x14\n\x0f\x41llocateSegment\x10\xb5\t\x12\x16\n\x11SegmentStatistics\x10\xb6\t\x12\x15\n\x10SegmentFlushDone\x10\xb7\t\x12\x0f\n\nDataNodeTt\x10\xb8\t\x12\x0c\n\x07\x43onnect\x10\xb9\t\x12\x14\n\x0fListClientInfos\x10\xba\t\x12\x13\n\x0e\x41llocTimestamp\x10\xbb\t\x12\x15\n\x10\x43reateCredential\x10\xdc\x0b\x12\x12\n\rGetCredential\x10\xdd\x0b\x12\x15\n\x10\x44\x65leteCredential\x10\xde\x0b\x12\x15\n\x10UpdateCredential\x10\xdf\x0b\x12\x16\n\x11ListCredUsernames\x10\xe0\x0b\x12\x0f\n\nCreateRole\x10\xc0\x0c\x12\r\n\x08\x44ropRole\x10\xc1\x0c\x12\x14\n\x0fOperateUserRole\x10\xc2\x0c\x12\x0f\n\nSelectRole\x10\xc3\x0c\x12\x0f\n\nSelectUser\x10\xc4\x0c\x12\x13\n\x0eSelectResource\x10\xc5\x0c\x12\x15\n\x10OperatePrivilege\x10\xc6\x0c\x12\x10\n\x0bSelectGrant\x10\xc7\x0c\x12\x1b\n\x16RefreshPolicyInfoCache\x10\xc8\x0c\x12\x0f\n\nListPolicy\x10\xc9\x0c\x12\x19\n\x14\x43reatePrivilegeGroup\x10\xca\x0c\x12\x17\n\x12\x44ropPrivilegeGroup\x10\xcb\x0c\x12\x18\n\x13ListPrivilegeGroups\x10\xcc\x0c\x12\x1a\n\x15OperatePrivilegeGroup\x10\xcd\x0c\x12\x18\n\x13\x43reateResourceGroup\x10\xa4\r\x12\x16\n\x11\x44ropResourceGroup\x10\xa5\r\x12\x17\n\x12ListResourceGroups\x10\xa6\r\x12\x1a\n\x15\x44\x65scribeResourceGroup\x10\xa7\r\x12\x11\n\x0cTransferNode\x10\xa8\r\x12\x14\n\x0fTransferReplica\x10\xa9\r\x12\x19\n\x14UpdateResourceGroups\x10\xaa\r\x12\x13\n\x0e\x43reateDatabase\x10\x89\x0e\x12\x11\n\x0c\x44ropDatabase\x10\x8a\x0e\x12\x12\n\rListDatabases\x10\x8b\x0e\x12\x12\n\rAlterDatabase\x10\x8c\x0e\x12\x15\n\x10\x44\x65scribeDatabase\x10\x8d\x0e*\"\n\x07\x44slType\x12\x07\n\x03\x44sl\x10\x00\x12\x0e\n\nBoolExprV1\x10\x01*B\n\x0f\x43ompactionState\x12\x11\n\rUndefiedState\x10\x00\x12\r\n\tExecuting\x10\x01\x12\r\n\tCompleted\x10\x02*X\n\x10\x43onsistencyLevel\x12\n\n\x06Strong\x10\x00\x12\x0b\n\x07Session\x10\x01\x12\x0b\n\x07\x42ounded\x10\x02\x12\x0e\n\nEventually\x10\x03\x12\x0e\n\nCustomized\x10\x04*\x9e\x01\n\x0bImportState\x12\x11\n\rImportPending\x10\x00\x12\x10\n\x0cImportFailed\x10\x01\x12\x11\n\rImportStarted\x10\x02\x12\x13\n\x0fImportPersisted\x10\x05\x12\x11\n\rImportFlushed\x10\x08\x12\x13\n\x0fImportCompleted\x10\x06\x12\x1a\n\x16ImportFailedAndCleaned\x10\x07*2\n\nObjectType\x12\x0e\n\nCollection\x10\x00\x12\n\n\x06Global\x10\x01\x12\x08\n\x04User\x10\x02*\x84\r\n\x0fObjectPrivilege\x12\x10\n\x0cPrivilegeAll\x10\x00\x12\x1d\n\x19PrivilegeCreateCollection\x10\x01\x12\x1b\n\x17PrivilegeDropCollection\x10\x02\x12\x1f\n\x1bPrivilegeDescribeCollection\x10\x03\x12\x1c\n\x18PrivilegeShowCollections\x10\x04\x12\x11\n\rPrivilegeLoad\x10\x05\x12\x14\n\x10PrivilegeRelease\x10\x06\x12\x17\n\x13PrivilegeCompaction\x10\x07\x12\x13\n\x0fPrivilegeInsert\x10\x08\x12\x13\n\x0fPrivilegeDelete\x10\t\x12\x1a\n\x16PrivilegeGetStatistics\x10\n\x12\x18\n\x14PrivilegeCreateIndex\x10\x0b\x12\x18\n\x14PrivilegeIndexDetail\x10\x0c\x12\x16\n\x12PrivilegeDropIndex\x10\r\x12\x13\n\x0fPrivilegeSearch\x10\x0e\x12\x12\n\x0ePrivilegeFlush\x10\x0f\x12\x12\n\x0ePrivilegeQuery\x10\x10\x12\x18\n\x14PrivilegeLoadBalance\x10\x11\x12\x13\n\x0fPrivilegeImport\x10\x12\x12\x1c\n\x18PrivilegeCreateOwnership\x10\x13\x12\x17\n\x13PrivilegeUpdateUser\x10\x14\x12\x1a\n\x16PrivilegeDropOwnership\x10\x15\x12\x1c\n\x18PrivilegeSelectOwnership\x10\x16\x12\x1c\n\x18PrivilegeManageOwnership\x10\x17\x12\x17\n\x13PrivilegeSelectUser\x10\x18\x12\x13\n\x0fPrivilegeUpsert\x10\x19\x12 \n\x1cPrivilegeCreateResourceGroup\x10\x1a\x12\x1e\n\x1aPrivilegeDropResourceGroup\x10\x1b\x12\"\n\x1ePrivilegeDescribeResourceGroup\x10\x1c\x12\x1f\n\x1bPrivilegeListResourceGroups\x10\x1d\x12\x19\n\x15PrivilegeTransferNode\x10\x1e\x12\x1c\n\x18PrivilegeTransferReplica\x10\x1f\x12\x1f\n\x1bPrivilegeGetLoadingProgress\x10 \x12\x19\n\x15PrivilegeGetLoadState\x10!\x12\x1d\n\x19PrivilegeRenameCollection\x10\"\x12\x1b\n\x17PrivilegeCreateDatabase\x10#\x12\x19\n\x15PrivilegeDropDatabase\x10$\x12\x1a\n\x16PrivilegeListDatabases\x10%\x12\x15\n\x11PrivilegeFlushAll\x10&\x12\x1c\n\x18PrivilegeCreatePartition\x10\'\x12\x1a\n\x16PrivilegeDropPartition\x10(\x12\x1b\n\x17PrivilegeShowPartitions\x10)\x12\x19\n\x15PrivilegeHasPartition\x10*\x12\x1a\n\x16PrivilegeGetFlushState\x10+\x12\x18\n\x14PrivilegeCreateAlias\x10,\x12\x16\n\x12PrivilegeDropAlias\x10-\x12\x1a\n\x16PrivilegeDescribeAlias\x10.\x12\x18\n\x14PrivilegeListAliases\x10/\x12!\n\x1dPrivilegeUpdateResourceGroups\x10\x30\x12\x1a\n\x16PrivilegeAlterDatabase\x10\x31\x12\x1d\n\x19PrivilegeDescribeDatabase\x10\x32\x12\x17\n\x13PrivilegeBackupRBAC\x10\x33\x12\x18\n\x14PrivilegeRestoreRBAC\x10\x34\x12\x1a\n\x16PrivilegeGroupReadOnly\x10\x35\x12\x1b\n\x17PrivilegeGroupReadWrite\x10\x36\x12\x17\n\x13PrivilegeGroupAdmin\x10\x37\x12!\n\x1dPrivilegeCreatePrivilegeGroup\x10\x38\x12\x1f\n\x1bPrivilegeDropPrivilegeGroup\x10\x39\x12 \n\x1cPrivilegeListPrivilegeGroups\x10:\x12\"\n\x1ePrivilegeOperatePrivilegeGroup\x10;*S\n\tStateCode\x12\x10\n\x0cInitializing\x10\x00\x12\x0b\n\x07Healthy\x10\x01\x12\x0c\n\x08\x41\x62normal\x10\x02\x12\x0b\n\x07StandBy\x10\x03\x12\x0c\n\x08Stopping\x10\x04*c\n\tLoadState\x12\x15\n\x11LoadStateNotExist\x10\x00\x12\x14\n\x10LoadStateNotLoad\x10\x01\x12\x14\n\x10LoadStateLoading\x10\x02\x12\x13\n\x0fLoadStateLoaded\x10\x03:^\n\x11privilege_ext_obj\x12\x1f.google.protobuf.MessageOptions\x18\xe9\x07 \x01(\x0b\x32!.milvus.proto.common.PrivilegeExtBm\n\x0eio.milvus.grpcB\x0b\x43ommonProtoP\x01Z4github.com/milvus-io/milvus-proto/go-api/v2/commonpb\xa0\x01\x01\xaa\x02\x12Milvus.Client.Grpcb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -36,31 +36,33 @@ _globals['_SERVERINFO_RESERVEDENTRY']._options = None _globals['_SERVERINFO_RESERVEDENTRY']._serialized_options = b'8\001' _globals['_ERRORCODE']._serialized_start=1900 - _globals['_ERRORCODE']._serialized_end=3251 - _globals['_INDEXSTATE']._serialized_start=3253 - _globals['_INDEXSTATE']._serialized_end=3352 - _globals['_SEGMENTSTATE']._serialized_start=3355 - _globals['_SEGMENTSTATE']._serialized_end=3485 - _globals['_PLACEHOLDERTYPE']._serialized_start=3488 - _globals['_PLACEHOLDERTYPE']._serialized_end=3636 - _globals['_MSGTYPE']._serialized_start=3639 - _globals['_MSGTYPE']._serialized_end=5783 - _globals['_DSLTYPE']._serialized_start=5785 - _globals['_DSLTYPE']._serialized_end=5819 - _globals['_COMPACTIONSTATE']._serialized_start=5821 - _globals['_COMPACTIONSTATE']._serialized_end=5887 - _globals['_CONSISTENCYLEVEL']._serialized_start=5889 - _globals['_CONSISTENCYLEVEL']._serialized_end=5977 - _globals['_IMPORTSTATE']._serialized_start=5980 - _globals['_IMPORTSTATE']._serialized_end=6138 - _globals['_OBJECTTYPE']._serialized_start=6140 - _globals['_OBJECTTYPE']._serialized_end=6190 - _globals['_OBJECTPRIVILEGE']._serialized_start=6193 - _globals['_OBJECTPRIVILEGE']._serialized_end=7559 - _globals['_STATECODE']._serialized_start=7561 - _globals['_STATECODE']._serialized_end=7644 - _globals['_LOADSTATE']._serialized_start=7646 - _globals['_LOADSTATE']._serialized_end=7745 + _globals['_ERRORCODE']._serialized_end=3381 + _globals['_INDEXSTATE']._serialized_start=3383 + _globals['_INDEXSTATE']._serialized_end=3482 + _globals['_SEGMENTSTATE']._serialized_start=3485 + _globals['_SEGMENTSTATE']._serialized_end=3615 + _globals['_SEGMENTLEVEL']._serialized_start=3617 + _globals['_SEGMENTLEVEL']._serialized_end=3667 + _globals['_PLACEHOLDERTYPE']._serialized_start=3670 + _globals['_PLACEHOLDERTYPE']._serialized_end=3818 + _globals['_MSGTYPE']._serialized_start=3821 + _globals['_MSGTYPE']._serialized_end=6171 + _globals['_DSLTYPE']._serialized_start=6173 + _globals['_DSLTYPE']._serialized_end=6207 + _globals['_COMPACTIONSTATE']._serialized_start=6209 + _globals['_COMPACTIONSTATE']._serialized_end=6275 + _globals['_CONSISTENCYLEVEL']._serialized_start=6277 + _globals['_CONSISTENCYLEVEL']._serialized_end=6365 + _globals['_IMPORTSTATE']._serialized_start=6368 + _globals['_IMPORTSTATE']._serialized_end=6526 + _globals['_OBJECTTYPE']._serialized_start=6528 + _globals['_OBJECTTYPE']._serialized_end=6578 + _globals['_OBJECTPRIVILEGE']._serialized_start=6581 + _globals['_OBJECTPRIVILEGE']._serialized_end=8249 + _globals['_STATECODE']._serialized_start=8251 + _globals['_STATECODE']._serialized_end=8334 + _globals['_LOADSTATE']._serialized_start=8336 + _globals['_LOADSTATE']._serialized_end=8435 _globals['_STATUS']._serialized_start=72 _globals['_STATUS']._serialized_end=315 _globals['_STATUS_EXTRAINFOENTRY']._serialized_start=267 diff --git a/pymilvus/grpc_gen/common_pb2.pyi b/pymilvus/grpc_gen/common_pb2.pyi index 54c0f366a..f82c6b219 100644 --- a/pymilvus/grpc_gen/common_pb2.pyi +++ b/pymilvus/grpc_gen/common_pb2.pyi @@ -66,6 +66,10 @@ class ErrorCode(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): TimeTickLongDelay: _ClassVar[ErrorCode] NotReadyServe: _ClassVar[ErrorCode] NotReadyCoordActivating: _ClassVar[ErrorCode] + CreatePrivilegeGroupFailure: _ClassVar[ErrorCode] + DropPrivilegeGroupFailure: _ClassVar[ErrorCode] + ListPrivilegeGroupsFailure: _ClassVar[ErrorCode] + OperatePrivilegeGroupFailure: _ClassVar[ErrorCode] DataCoordNA: _ClassVar[ErrorCode] DDRequestRace: _ClassVar[ErrorCode] @@ -89,6 +93,13 @@ class SegmentState(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): Dropped: _ClassVar[SegmentState] Importing: _ClassVar[SegmentState] +class SegmentLevel(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + Legacy: _ClassVar[SegmentLevel] + L0: _ClassVar[SegmentLevel] + L1: _ClassVar[SegmentLevel] + L2: _ClassVar[SegmentLevel] + class PlaceholderType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): __slots__ = () None: _ClassVar[PlaceholderType] @@ -144,6 +155,9 @@ class MsgType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): Flush: _ClassVar[MsgType] ResendSegmentStats: _ClassVar[MsgType] Upsert: _ClassVar[MsgType] + ManualFlush: _ClassVar[MsgType] + FlushSegment: _ClassVar[MsgType] + CreateSegment: _ClassVar[MsgType] Search: _ClassVar[MsgType] SearchResult: _ClassVar[MsgType] GetIndexState: _ClassVar[MsgType] @@ -194,6 +208,10 @@ class MsgType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): SelectGrant: _ClassVar[MsgType] RefreshPolicyInfoCache: _ClassVar[MsgType] ListPolicy: _ClassVar[MsgType] + CreatePrivilegeGroup: _ClassVar[MsgType] + DropPrivilegeGroup: _ClassVar[MsgType] + ListPrivilegeGroups: _ClassVar[MsgType] + OperatePrivilegeGroup: _ClassVar[MsgType] CreateResourceGroup: _ClassVar[MsgType] DropResourceGroup: _ClassVar[MsgType] ListResourceGroups: _ClassVar[MsgType] @@ -204,6 +222,8 @@ class MsgType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): CreateDatabase: _ClassVar[MsgType] DropDatabase: _ClassVar[MsgType] ListDatabases: _ClassVar[MsgType] + AlterDatabase: _ClassVar[MsgType] + DescribeDatabase: _ClassVar[MsgType] class DslType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): __slots__ = () @@ -292,6 +312,16 @@ class ObjectPrivilege(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): PrivilegeListAliases: _ClassVar[ObjectPrivilege] PrivilegeUpdateResourceGroups: _ClassVar[ObjectPrivilege] PrivilegeAlterDatabase: _ClassVar[ObjectPrivilege] + PrivilegeDescribeDatabase: _ClassVar[ObjectPrivilege] + PrivilegeBackupRBAC: _ClassVar[ObjectPrivilege] + PrivilegeRestoreRBAC: _ClassVar[ObjectPrivilege] + PrivilegeGroupReadOnly: _ClassVar[ObjectPrivilege] + PrivilegeGroupReadWrite: _ClassVar[ObjectPrivilege] + PrivilegeGroupAdmin: _ClassVar[ObjectPrivilege] + PrivilegeCreatePrivilegeGroup: _ClassVar[ObjectPrivilege] + PrivilegeDropPrivilegeGroup: _ClassVar[ObjectPrivilege] + PrivilegeListPrivilegeGroups: _ClassVar[ObjectPrivilege] + PrivilegeOperatePrivilegeGroup: _ClassVar[ObjectPrivilege] class StateCode(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): __slots__ = () @@ -364,6 +394,10 @@ DiskQuotaExhausted: ErrorCode TimeTickLongDelay: ErrorCode NotReadyServe: ErrorCode NotReadyCoordActivating: ErrorCode +CreatePrivilegeGroupFailure: ErrorCode +DropPrivilegeGroupFailure: ErrorCode +ListPrivilegeGroupsFailure: ErrorCode +OperatePrivilegeGroupFailure: ErrorCode DataCoordNA: ErrorCode DDRequestRace: ErrorCode IndexStateNone: IndexState @@ -380,6 +414,10 @@ Flushed: SegmentState Flushing: SegmentState Dropped: SegmentState Importing: SegmentState +Legacy: SegmentLevel +L0: SegmentLevel +L1: SegmentLevel +L2: SegmentLevel None: PlaceholderType BinaryVector: PlaceholderType FloatVector: PlaceholderType @@ -430,6 +468,9 @@ Delete: MsgType Flush: MsgType ResendSegmentStats: MsgType Upsert: MsgType +ManualFlush: MsgType +FlushSegment: MsgType +CreateSegment: MsgType Search: MsgType SearchResult: MsgType GetIndexState: MsgType @@ -480,6 +521,10 @@ OperatePrivilege: MsgType SelectGrant: MsgType RefreshPolicyInfoCache: MsgType ListPolicy: MsgType +CreatePrivilegeGroup: MsgType +DropPrivilegeGroup: MsgType +ListPrivilegeGroups: MsgType +OperatePrivilegeGroup: MsgType CreateResourceGroup: MsgType DropResourceGroup: MsgType ListResourceGroups: MsgType @@ -490,6 +535,8 @@ UpdateResourceGroups: MsgType CreateDatabase: MsgType DropDatabase: MsgType ListDatabases: MsgType +AlterDatabase: MsgType +DescribeDatabase: MsgType Dsl: DslType BoolExprV1: DslType UndefiedState: CompactionState @@ -560,6 +607,16 @@ PrivilegeDescribeAlias: ObjectPrivilege PrivilegeListAliases: ObjectPrivilege PrivilegeUpdateResourceGroups: ObjectPrivilege PrivilegeAlterDatabase: ObjectPrivilege +PrivilegeDescribeDatabase: ObjectPrivilege +PrivilegeBackupRBAC: ObjectPrivilege +PrivilegeRestoreRBAC: ObjectPrivilege +PrivilegeGroupReadOnly: ObjectPrivilege +PrivilegeGroupReadWrite: ObjectPrivilege +PrivilegeGroupAdmin: ObjectPrivilege +PrivilegeCreatePrivilegeGroup: ObjectPrivilege +PrivilegeDropPrivilegeGroup: ObjectPrivilege +PrivilegeListPrivilegeGroups: ObjectPrivilege +PrivilegeOperatePrivilegeGroup: ObjectPrivilege Initializing: StateCode Healthy: StateCode Abnormal: StateCode diff --git a/pymilvus/grpc_gen/feder_pb2.py b/pymilvus/grpc_gen/feder_pb2.py index 992594e4a..2d24371b4 100644 --- a/pymilvus/grpc_gen/feder_pb2.py +++ b/pymilvus/grpc_gen/feder_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: feder.proto -# Protobuf Python Version: 4.25.0 +# Protobuf Python Version: 4.25.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool diff --git a/pymilvus/grpc_gen/milvus-proto b/pymilvus/grpc_gen/milvus-proto index dfd489935..6de3d96f6 160000 --- a/pymilvus/grpc_gen/milvus-proto +++ b/pymilvus/grpc_gen/milvus-proto @@ -1 +1 @@ -Subproject commit dfd48993512166cf276ecdd56d243a6f43b2a357 +Subproject commit 6de3d96f664f3866796c9e8afe7894e40fcc5029 diff --git a/pymilvus/grpc_gen/milvus_pb2.py b/pymilvus/grpc_gen/milvus_pb2.py index a6bca24d0..7549a2bc4 100644 --- a/pymilvus/grpc_gen/milvus_pb2.py +++ b/pymilvus/grpc_gen/milvus_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: milvus.proto -# Protobuf Python Version: 4.25.0 +# Protobuf Python Version: 4.25.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool @@ -20,7 +20,7 @@ from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0cmilvus.proto\x12\x13milvus.proto.milvus\x1a\x0c\x63ommon.proto\x1a\x08rg.proto\x1a\x0cschema.proto\x1a\x0b\x66\x65\x64\x65r.proto\x1a\tmsg.proto\x1a google/protobuf/descriptor.proto\"\x8d\x01\n\x12\x43reateAliasRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\r\n\x05\x61lias\x18\x04 \x01(\t:\x12\xca>\x0f\x08\x01\x10,\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"r\n\x10\x44ropAliasRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\r\n\x05\x61lias\x18\x03 \x01(\t:\x12\xca>\x0f\x08\x01\x10-\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\x8c\x01\n\x11\x41lterAliasRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\r\n\x05\x61lias\x18\x04 \x01(\t:\x12\xca>\x0f\x08\x01\x10,\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"v\n\x14\x44\x65scribeAliasRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\r\n\x05\x61lias\x18\x03 \x01(\t:\x12\xca>\x0f\x08\x01\x10.\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"x\n\x15\x44\x65scribeAliasResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\r\n\x05\x61lias\x18\x03 \x01(\t\x12\x12\n\ncollection\x18\x04 \x01(\t\"~\n\x12ListAliasesRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t:\x12\xca>\x0f\x08\x01\x10/\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"}\n\x13ListAliasesResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x0f\n\x07\x61liases\x18\x04 \x03(\t\"\xb8\x02\n\x17\x43reateCollectionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x0e\n\x06schema\x18\x04 \x01(\x0c\x12\x12\n\nshards_num\x18\x05 \x01(\x05\x12@\n\x11\x63onsistency_level\x18\x06 \x01(\x0e\x32%.milvus.proto.common.ConsistencyLevel\x12\x35\n\nproperties\x18\x07 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\x16\n\x0enum_partitions\x18\x08 \x01(\x03:\x12\xca>\x0f\x08\x01\x10\x01\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\x81\x01\n\x15\x44ropCollectionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t:\x12\xca>\x0f\x08\x01\x10\x02\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\xcf\x01\n\x16\x41lterCollectionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x14\n\x0c\x63ollectionID\x18\x04 \x01(\x03\x12\x35\n\nproperties\x18\x05 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair:\x12\xca>\x0f\x08\x01\x10\x01\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\x80\x01\n\x14HasCollectionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x12\n\ntime_stamp\x18\x04 \x01(\x04\"J\n\x0c\x42oolResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\r\n\x05value\x18\x02 \x01(\x08\"L\n\x0eStringResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\r\n\x05value\x18\x02 \x01(\t\"\xaf\x01\n\x19\x44\x65scribeCollectionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x14\n\x0c\x63ollectionID\x18\x04 \x01(\x03\x12\x12\n\ntime_stamp\x18\x05 \x01(\x04:\x12\xca>\x0f\x08\x01\x10\x03\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\xb9\x04\n\x1a\x44\x65scribeCollectionResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x35\n\x06schema\x18\x02 \x01(\x0b\x32%.milvus.proto.schema.CollectionSchema\x12\x14\n\x0c\x63ollectionID\x18\x03 \x01(\x03\x12\x1d\n\x15virtual_channel_names\x18\x04 \x03(\t\x12\x1e\n\x16physical_channel_names\x18\x05 \x03(\t\x12\x19\n\x11\x63reated_timestamp\x18\x06 \x01(\x04\x12\x1d\n\x15\x63reated_utc_timestamp\x18\x07 \x01(\x04\x12\x12\n\nshards_num\x18\x08 \x01(\x05\x12\x0f\n\x07\x61liases\x18\t \x03(\t\x12\x39\n\x0fstart_positions\x18\n \x03(\x0b\x32 .milvus.proto.common.KeyDataPair\x12@\n\x11\x63onsistency_level\x18\x0b \x01(\x0e\x32%.milvus.proto.common.ConsistencyLevel\x12\x17\n\x0f\x63ollection_name\x18\x0c \x01(\t\x12\x35\n\nproperties\x18\r \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\x0f\n\x07\x64\x62_name\x18\x0e \x01(\t\x12\x16\n\x0enum_partitions\x18\x0f \x01(\x03\x12\r\n\x05\x64\x62_id\x18\x10 \x01(\x03\"\xb8\x01\n\x15LoadCollectionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x16\n\x0ereplica_number\x18\x04 \x01(\x05\x12\x17\n\x0fresource_groups\x18\x05 \x03(\t\x12\x0f\n\x07refresh\x18\x06 \x01(\x08:\x07\xca>\x04\x10\x05\x18\x03\"y\n\x18ReleaseCollectionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t:\x07\xca>\x04\x10\x06\x18\x03\"\xab\x01\n\x14GetStatisticsRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x17\n\x0fpartition_names\x18\x04 \x03(\t\x12\x1b\n\x13guarantee_timestamp\x18\x05 \x01(\x04:\x07\xca>\x04\x10\n\x18\x03\"v\n\x15GetStatisticsResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x30\n\x05stats\x18\x02 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\"\x7f\n\x1eGetCollectionStatisticsRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t:\x07\xca>\x04\x10\n\x18\x03\"\x80\x01\n\x1fGetCollectionStatisticsResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x30\n\x05stats\x18\x02 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\"\xb4\x01\n\x16ShowCollectionsRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x12\n\ntime_stamp\x18\x03 \x01(\x04\x12+\n\x04type\x18\x04 \x01(\x0e\x32\x1d.milvus.proto.milvus.ShowType\x12\x1c\n\x10\x63ollection_names\x18\x05 \x03(\tB\x02\x18\x01\"\xf7\x01\n\x17ShowCollectionsResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x18\n\x10\x63ollection_names\x18\x02 \x03(\t\x12\x16\n\x0e\x63ollection_ids\x18\x03 \x03(\x03\x12\x1a\n\x12\x63reated_timestamps\x18\x04 \x03(\x04\x12\x1e\n\x16\x63reated_utc_timestamps\x18\x05 \x03(\x04\x12 \n\x14inMemory_percentages\x18\x06 \x03(\x03\x42\x02\x18\x01\x12\x1f\n\x17query_service_available\x18\x07 \x03(\x08\"\x8f\x01\n\x16\x43reatePartitionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x16\n\x0epartition_name\x18\x04 \x01(\t:\x07\xca>\x04\x10\'\x18\x03\"\x8d\x01\n\x14\x44ropPartitionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x16\n\x0epartition_name\x18\x04 \x01(\t:\x07\xca>\x04\x10(\x18\x03\"\x8c\x01\n\x13HasPartitionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x16\n\x0epartition_name\x18\x04 \x01(\t:\x07\xca>\x04\x10*\x18\x03\"\xd1\x01\n\x15LoadPartitionsRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x17\n\x0fpartition_names\x18\x04 \x03(\t\x12\x16\n\x0ereplica_number\x18\x05 \x01(\x05\x12\x17\n\x0fresource_groups\x18\x06 \x03(\t\x12\x0f\n\x07refresh\x18\x07 \x01(\x08:\x07\xca>\x04\x10\x05\x18\x03\"\x92\x01\n\x18ReleasePartitionsRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x17\n\x0fpartition_names\x18\x04 \x03(\t:\x07\xca>\x04\x10\x06\x18\x03\"\x8d\x01\n\x1dGetPartitionStatisticsRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x16\n\x0epartition_name\x18\x04 \x01(\t\"\x7f\n\x1eGetPartitionStatisticsResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x30\n\x05stats\x18\x02 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\"\xd6\x01\n\x15ShowPartitionsRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x14\n\x0c\x63ollectionID\x18\x04 \x01(\x03\x12\x17\n\x0fpartition_names\x18\x05 \x03(\t\x12/\n\x04type\x18\x06 \x01(\x0e\x32\x1d.milvus.proto.milvus.ShowTypeB\x02\x18\x01:\x07\xca>\x04\x10)\x18\x03\"\xd2\x01\n\x16ShowPartitionsResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x17\n\x0fpartition_names\x18\x02 \x03(\t\x12\x14\n\x0cpartitionIDs\x18\x03 \x03(\x03\x12\x1a\n\x12\x63reated_timestamps\x18\x04 \x03(\x04\x12\x1e\n\x16\x63reated_utc_timestamps\x18\x05 \x03(\x04\x12 \n\x14inMemory_percentages\x18\x06 \x03(\x03\x42\x02\x18\x01\"m\n\x16\x44\x65scribeSegmentRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x14\n\x0c\x63ollectionID\x18\x02 \x01(\x03\x12\x11\n\tsegmentID\x18\x03 \x01(\x03\"\x8f\x01\n\x17\x44\x65scribeSegmentResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x0f\n\x07indexID\x18\x02 \x01(\x03\x12\x0f\n\x07\x62uildID\x18\x03 \x01(\x03\x12\x14\n\x0c\x65nable_index\x18\x04 \x01(\x08\x12\x0f\n\x07\x66ieldID\x18\x05 \x01(\x03\"l\n\x13ShowSegmentsRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x14\n\x0c\x63ollectionID\x18\x02 \x01(\x03\x12\x13\n\x0bpartitionID\x18\x03 \x01(\x03\"W\n\x14ShowSegmentsResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x12\n\nsegmentIDs\x18\x02 \x03(\x03\"\xd4\x01\n\x12\x43reateIndexRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x12\n\nfield_name\x18\x04 \x01(\t\x12\x37\n\x0c\x65xtra_params\x18\x05 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\x12\n\nindex_name\x18\x06 \x01(\t:\x07\xca>\x04\x10\x0b\x18\x03\"\xbf\x01\n\x11\x41lterIndexRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x12\n\nindex_name\x18\x04 \x01(\t\x12\x37\n\x0c\x65xtra_params\x18\x05 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair:\x07\xca>\x04\x10\x0b\x18\x03\"\xb0\x01\n\x14\x44\x65scribeIndexRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x12\n\nfield_name\x18\x04 \x01(\t\x12\x12\n\nindex_name\x18\x05 \x01(\t\x12\x11\n\ttimestamp\x18\x06 \x01(\x04:\x07\xca>\x04\x10\x0c\x18\x03\"\x95\x02\n\x10IndexDescription\x12\x12\n\nindex_name\x18\x01 \x01(\t\x12\x0f\n\x07indexID\x18\x02 \x01(\x03\x12\x31\n\x06params\x18\x03 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\x12\n\nfield_name\x18\x04 \x01(\t\x12\x14\n\x0cindexed_rows\x18\x05 \x01(\x03\x12\x12\n\ntotal_rows\x18\x06 \x01(\x03\x12.\n\x05state\x18\x07 \x01(\x0e\x32\x1f.milvus.proto.common.IndexState\x12\x1f\n\x17index_state_fail_reason\x18\x08 \x01(\t\x12\x1a\n\x12pending_index_rows\x18\t \x01(\x03\"\x87\x01\n\x15\x44\x65scribeIndexResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x41\n\x12index_descriptions\x18\x02 \x03(\x0b\x32%.milvus.proto.milvus.IndexDescription\"\xa5\x01\n\x1cGetIndexBuildProgressRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x12\n\nfield_name\x18\x04 \x01(\t\x12\x12\n\nindex_name\x18\x05 \x01(\t:\x07\xca>\x04\x10\x0c\x18\x03\"v\n\x1dGetIndexBuildProgressResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x14\n\x0cindexed_rows\x18\x02 \x01(\x03\x12\x12\n\ntotal_rows\x18\x03 \x01(\x03\"\x9d\x01\n\x14GetIndexStateRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x12\n\nfield_name\x18\x04 \x01(\t\x12\x12\n\nindex_name\x18\x05 \x01(\t:\x07\xca>\x04\x10\x0c\x18\x03\"\x89\x01\n\x15GetIndexStateResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12.\n\x05state\x18\x02 \x01(\x0e\x32\x1f.milvus.proto.common.IndexState\x12\x13\n\x0b\x66\x61il_reason\x18\x03 \x01(\t\"\x99\x01\n\x10\x44ropIndexRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x12\n\nfield_name\x18\x04 \x01(\t\x12\x12\n\nindex_name\x18\x05 \x01(\t:\x07\xca>\x04\x10\r\x18\x03\"\xe0\x01\n\rInsertRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x16\n\x0epartition_name\x18\x04 \x01(\t\x12\x33\n\x0b\x66ields_data\x18\x05 \x03(\x0b\x32\x1e.milvus.proto.schema.FieldData\x12\x11\n\thash_keys\x18\x06 \x03(\r\x12\x10\n\x08num_rows\x18\x07 \x01(\r:\x07\xca>\x04\x10\x08\x18\x03\"\xe0\x01\n\rUpsertRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x16\n\x0epartition_name\x18\x04 \x01(\t\x12\x33\n\x0b\x66ields_data\x18\x05 \x03(\x0b\x32\x1e.milvus.proto.schema.FieldData\x12\x11\n\thash_keys\x18\x06 \x03(\r\x12\x10\n\x08num_rows\x18\x07 \x01(\r:\x07\xca>\x04\x10\x19\x18\x03\"\xf0\x01\n\x0eMutationResult\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12%\n\x03IDs\x18\x02 \x01(\x0b\x32\x18.milvus.proto.schema.IDs\x12\x12\n\nsucc_index\x18\x03 \x03(\r\x12\x11\n\terr_index\x18\x04 \x03(\r\x12\x14\n\x0c\x61\x63knowledged\x18\x05 \x01(\x08\x12\x12\n\ninsert_cnt\x18\x06 \x01(\x03\x12\x12\n\ndelete_cnt\x18\x07 \x01(\x03\x12\x12\n\nupsert_cnt\x18\x08 \x01(\x03\x12\x11\n\ttimestamp\x18\t \x01(\x04\"\xe9\x01\n\rDeleteRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x16\n\x0epartition_name\x18\x04 \x01(\t\x12\x0c\n\x04\x65xpr\x18\x05 \x01(\t\x12\x11\n\thash_keys\x18\x06 \x03(\r\x12@\n\x11\x63onsistency_level\x18\x07 \x01(\x0e\x32%.milvus.proto.common.ConsistencyLevel:\x07\xca>\x04\x10\t\x18\x03\"\xb0\x01\n\x10SubSearchRequest\x12\x0b\n\x03\x64sl\x18\x01 \x01(\t\x12\x19\n\x11placeholder_group\x18\x02 \x01(\x0c\x12.\n\x08\x64sl_type\x18\x03 \x01(\x0e\x32\x1c.milvus.proto.common.DslType\x12\x38\n\rsearch_params\x18\x04 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\n\n\x02nq\x18\x05 \x01(\x03\"\xcc\x04\n\rSearchRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x17\n\x0fpartition_names\x18\x04 \x03(\t\x12\x0b\n\x03\x64sl\x18\x05 \x01(\t\x12\x19\n\x11placeholder_group\x18\x06 \x01(\x0c\x12.\n\x08\x64sl_type\x18\x07 \x01(\x0e\x32\x1c.milvus.proto.common.DslType\x12\x15\n\routput_fields\x18\x08 \x03(\t\x12\x38\n\rsearch_params\x18\t \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\x18\n\x10travel_timestamp\x18\n \x01(\x04\x12\x1b\n\x13guarantee_timestamp\x18\x0b \x01(\x04\x12\n\n\x02nq\x18\x0c \x01(\x03\x12\x1b\n\x13not_return_all_meta\x18\r \x01(\x08\x12@\n\x11\x63onsistency_level\x18\x0e \x01(\x0e\x32%.milvus.proto.common.ConsistencyLevel\x12\x1f\n\x17use_default_consistency\x18\x0f \x01(\x08\x12\x1e\n\x16search_by_primary_keys\x18\x10 \x01(\x08\x12\x37\n\x08sub_reqs\x18\x11 \x03(\x0b\x32%.milvus.proto.milvus.SubSearchRequest:\x07\xca>\x04\x10\x0e\x18\x03\"5\n\x04Hits\x12\x0b\n\x03IDs\x18\x01 \x03(\x03\x12\x10\n\x08row_data\x18\x02 \x03(\x0c\x12\x0e\n\x06scores\x18\x03 \x03(\x02\"\x8d\x01\n\rSearchResults\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x36\n\x07results\x18\x02 \x01(\x0b\x32%.milvus.proto.schema.SearchResultData\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\"\xc9\x03\n\x13HybridSearchRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x17\n\x0fpartition_names\x18\x04 \x03(\t\x12\x34\n\x08requests\x18\x05 \x03(\x0b\x32\".milvus.proto.milvus.SearchRequest\x12\x36\n\x0brank_params\x18\x06 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\x18\n\x10travel_timestamp\x18\x07 \x01(\x04\x12\x1b\n\x13guarantee_timestamp\x18\x08 \x01(\x04\x12\x1b\n\x13not_return_all_meta\x18\t \x01(\x08\x12\x15\n\routput_fields\x18\n \x03(\t\x12@\n\x11\x63onsistency_level\x18\x0b \x01(\x0e\x32%.milvus.proto.common.ConsistencyLevel\x12\x1f\n\x17use_default_consistency\x18\x0c \x01(\x08:\x07\xca>\x04\x10\x0e\x18\x03\"n\n\x0c\x46lushRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x18\n\x10\x63ollection_names\x18\x03 \x03(\t:\x07\xca>\x04\x10\x0f \x03\"\x9b\x05\n\rFlushResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12G\n\x0b\x63oll_segIDs\x18\x03 \x03(\x0b\x32\x32.milvus.proto.milvus.FlushResponse.CollSegIDsEntry\x12R\n\x11\x66lush_coll_segIDs\x18\x04 \x03(\x0b\x32\x37.milvus.proto.milvus.FlushResponse.FlushCollSegIDsEntry\x12N\n\x0f\x63oll_seal_times\x18\x05 \x03(\x0b\x32\x35.milvus.proto.milvus.FlushResponse.CollSealTimesEntry\x12J\n\rcoll_flush_ts\x18\x06 \x03(\x0b\x32\x33.milvus.proto.milvus.FlushResponse.CollFlushTsEntry\x1aQ\n\x0f\x43ollSegIDsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12-\n\x05value\x18\x02 \x01(\x0b\x32\x1e.milvus.proto.schema.LongArray:\x02\x38\x01\x1aV\n\x14\x46lushCollSegIDsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12-\n\x05value\x18\x02 \x01(\x0b\x32\x1e.milvus.proto.schema.LongArray:\x02\x38\x01\x1a\x34\n\x12\x43ollSealTimesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01\x1a\x32\n\x10\x43ollFlushTsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x04:\x02\x38\x01\"\x9b\x03\n\x0cQueryRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x0c\n\x04\x65xpr\x18\x04 \x01(\t\x12\x15\n\routput_fields\x18\x05 \x03(\t\x12\x17\n\x0fpartition_names\x18\x06 \x03(\t\x12\x18\n\x10travel_timestamp\x18\x07 \x01(\x04\x12\x1b\n\x13guarantee_timestamp\x18\x08 \x01(\x04\x12\x37\n\x0cquery_params\x18\t \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\x1b\n\x13not_return_all_meta\x18\n \x01(\x08\x12@\n\x11\x63onsistency_level\x18\x0b \x01(\x0e\x32%.milvus.proto.common.ConsistencyLevel\x12\x1f\n\x17use_default_consistency\x18\x0c \x01(\x08:\x07\xca>\x04\x10\x10\x18\x03\"\xa0\x01\n\x0cQueryResults\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x33\n\x0b\x66ields_data\x18\x02 \x03(\x0b\x32\x1e.milvus.proto.schema.FieldData\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x15\n\routput_fields\x18\x04 \x03(\t\"}\n\tVectorIDs\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x12\n\nfield_name\x18\x02 \x01(\t\x12*\n\x08id_array\x18\x03 \x01(\x0b\x32\x18.milvus.proto.schema.IDs\x12\x17\n\x0fpartition_names\x18\x04 \x03(\t\"\x83\x01\n\x0cVectorsArray\x12\x32\n\x08id_array\x18\x01 \x01(\x0b\x32\x1e.milvus.proto.milvus.VectorIDsH\x00\x12\x36\n\ndata_array\x18\x02 \x01(\x0b\x32 .milvus.proto.schema.VectorFieldH\x00\x42\x07\n\x05\x61rray\"\xdd\x01\n\x13\x43\x61lcDistanceRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x32\n\x07op_left\x18\x02 \x01(\x0b\x32!.milvus.proto.milvus.VectorsArray\x12\x33\n\x08op_right\x18\x03 \x01(\x0b\x32!.milvus.proto.milvus.VectorsArray\x12\x31\n\x06params\x18\x04 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\"\xb5\x01\n\x13\x43\x61lcDistanceResults\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x31\n\x08int_dist\x18\x02 \x01(\x0b\x32\x1d.milvus.proto.schema.IntArrayH\x00\x12\x35\n\nfloat_dist\x18\x03 \x01(\x0b\x32\x1f.milvus.proto.schema.FloatArrayH\x00\x42\x07\n\x05\x61rray\"b\n\x0f\x46lushAllRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t:\x12\xca>\x0f\x08\x01\x10&\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"U\n\x10\x46lushAllResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x14\n\x0c\x66lush_all_ts\x18\x02 \x01(\x04\"\x99\x01\n\x15PersistentSegmentInfo\x12\x11\n\tsegmentID\x18\x01 \x01(\x03\x12\x14\n\x0c\x63ollectionID\x18\x02 \x01(\x03\x12\x13\n\x0bpartitionID\x18\x03 \x01(\x03\x12\x10\n\x08num_rows\x18\x04 \x01(\x03\x12\x30\n\x05state\x18\x05 \x01(\x0e\x32!.milvus.proto.common.SegmentState\"u\n\x1fGetPersistentSegmentInfoRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0e\n\x06\x64\x62Name\x18\x02 \x01(\t\x12\x16\n\x0e\x63ollectionName\x18\x03 \x01(\t\"\x8a\x01\n GetPersistentSegmentInfoResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x39\n\x05infos\x18\x02 \x03(\x0b\x32*.milvus.proto.milvus.PersistentSegmentInfo\"\xf0\x01\n\x10QuerySegmentInfo\x12\x11\n\tsegmentID\x18\x01 \x01(\x03\x12\x14\n\x0c\x63ollectionID\x18\x02 \x01(\x03\x12\x13\n\x0bpartitionID\x18\x03 \x01(\x03\x12\x10\n\x08mem_size\x18\x04 \x01(\x03\x12\x10\n\x08num_rows\x18\x05 \x01(\x03\x12\x12\n\nindex_name\x18\x06 \x01(\t\x12\x0f\n\x07indexID\x18\x07 \x01(\x03\x12\x12\n\x06nodeID\x18\x08 \x01(\x03\x42\x02\x18\x01\x12\x30\n\x05state\x18\t \x01(\x0e\x32!.milvus.proto.common.SegmentState\x12\x0f\n\x07nodeIds\x18\n \x03(\x03\"p\n\x1aGetQuerySegmentInfoRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0e\n\x06\x64\x62Name\x18\x02 \x01(\t\x12\x16\n\x0e\x63ollectionName\x18\x03 \x01(\t\"\x80\x01\n\x1bGetQuerySegmentInfoResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x34\n\x05infos\x18\x02 \x03(\x0b\x32%.milvus.proto.milvus.QuerySegmentInfo\"$\n\x0c\x44ummyRequest\x12\x14\n\x0crequest_type\x18\x01 \x01(\t\"!\n\rDummyResponse\x12\x10\n\x08response\x18\x01 \x01(\t\"\x15\n\x13RegisterLinkRequest\"r\n\x14RegisterLinkResponse\x12-\n\x07\x61\x64\x64ress\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.Address\x12+\n\x06status\x18\x02 \x01(\x0b\x32\x1b.milvus.proto.common.Status\"P\n\x11GetMetricsRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07request\x18\x02 \x01(\t\"k\n\x12GetMetricsResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x10\n\x08response\x18\x02 \x01(\t\x12\x16\n\x0e\x63omponent_name\x18\x03 \x01(\t\"\x98\x01\n\rComponentInfo\x12\x0e\n\x06nodeID\x18\x01 \x01(\x03\x12\x0c\n\x04role\x18\x02 \x01(\t\x12\x32\n\nstate_code\x18\x03 \x01(\x0e\x32\x1e.milvus.proto.common.StateCode\x12\x35\n\nextra_info\x18\x04 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\"\xb2\x01\n\x0f\x43omponentStates\x12\x31\n\x05state\x18\x01 \x01(\x0b\x32\".milvus.proto.milvus.ComponentInfo\x12?\n\x13subcomponent_states\x18\x02 \x03(\x0b\x32\".milvus.proto.milvus.ComponentInfo\x12+\n\x06status\x18\x03 \x01(\x0b\x32\x1b.milvus.proto.common.Status\"\x1b\n\x19GetComponentStatesRequest\"\xb6\x01\n\x12LoadBalanceRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x12\n\nsrc_nodeID\x18\x02 \x01(\x03\x12\x13\n\x0b\x64st_nodeIDs\x18\x03 \x03(\x03\x12\x19\n\x11sealed_segmentIDs\x18\x04 \x03(\x03\x12\x16\n\x0e\x63ollectionName\x18\x05 \x01(\t\x12\x0f\n\x07\x64\x62_name\x18\x06 \x01(\t:\x07\xca>\x04\x10\x11\x18\x05\"e\n\x17ManualCompactionRequest\x12\x14\n\x0c\x63ollectionID\x18\x01 \x01(\x03\x12\x12\n\ntimetravel\x18\x02 \x01(\x04\x12\x17\n\x0fmajorCompaction\x18\x03 \x01(\x08:\x07\xca>\x04\x10\x07\x18\x01\"z\n\x18ManualCompactionResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x14\n\x0c\x63ompactionID\x18\x02 \x01(\x03\x12\x1b\n\x13\x63ompactionPlanCount\x18\x03 \x01(\x05\"1\n\x19GetCompactionStateRequest\x12\x14\n\x0c\x63ompactionID\x18\x01 \x01(\x03\"\xdd\x01\n\x1aGetCompactionStateResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x33\n\x05state\x18\x02 \x01(\x0e\x32$.milvus.proto.common.CompactionState\x12\x17\n\x0f\x65xecutingPlanNo\x18\x03 \x01(\x03\x12\x15\n\rtimeoutPlanNo\x18\x04 \x01(\x03\x12\x17\n\x0f\x63ompletedPlanNo\x18\x05 \x01(\x03\x12\x14\n\x0c\x66\x61iledPlanNo\x18\x06 \x01(\x03\"1\n\x19GetCompactionPlansRequest\x12\x14\n\x0c\x63ompactionID\x18\x01 \x01(\x03\"\xbc\x01\n\x1aGetCompactionPlansResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x33\n\x05state\x18\x02 \x01(\x0e\x32$.milvus.proto.common.CompactionState\x12<\n\nmergeInfos\x18\x03 \x03(\x0b\x32(.milvus.proto.milvus.CompactionMergeInfo\"6\n\x13\x43ompactionMergeInfo\x12\x0f\n\x07sources\x18\x01 \x03(\x03\x12\x0e\n\x06target\x18\x02 \x01(\x03\"o\n\x14GetFlushStateRequest\x12\x12\n\nsegmentIDs\x18\x01 \x03(\x03\x12\x10\n\x08\x66lush_ts\x18\x02 \x01(\x04\x12\x0f\n\x07\x64\x62_name\x18\x03 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x04 \x01(\t:\x07\xca>\x04\x10+\x18\x04\"U\n\x15GetFlushStateResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x0f\n\x07\x66lushed\x18\x02 \x01(\x08\"l\n\x17GetFlushAllStateRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x14\n\x0c\x66lush_all_ts\x18\x02 \x01(\x04\x12\x0f\n\x07\x64\x62_name\x18\x03 \x01(\t\"X\n\x18GetFlushAllStateResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x0f\n\x07\x66lushed\x18\x02 \x01(\x08\"\xe0\x01\n\rImportRequest\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x16\n\x0epartition_name\x18\x02 \x01(\t\x12\x15\n\rchannel_names\x18\x03 \x03(\t\x12\x11\n\trow_based\x18\x04 \x01(\x08\x12\r\n\x05\x66iles\x18\x05 \x03(\t\x12\x32\n\x07options\x18\x06 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\x0f\n\x07\x64\x62_name\x18\x07 \x01(\t\x12\x17\n\x0f\x63lustering_info\x18\x08 \x01(\x0c:\x07\xca>\x04\x10\x12\x18\x01\"L\n\x0eImportResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\r\n\x05tasks\x18\x02 \x03(\x03\"%\n\x15GetImportStateRequest\x12\x0c\n\x04task\x18\x01 \x01(\x03\"\x97\x02\n\x16GetImportStateResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12/\n\x05state\x18\x02 \x01(\x0e\x32 .milvus.proto.common.ImportState\x12\x11\n\trow_count\x18\x03 \x01(\x03\x12\x0f\n\x07id_list\x18\x04 \x03(\x03\x12\x30\n\x05infos\x18\x05 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\n\n\x02id\x18\x06 \x01(\x03\x12\x15\n\rcollection_id\x18\x07 \x01(\x03\x12\x13\n\x0bsegment_ids\x18\x08 \x03(\x03\x12\x11\n\tcreate_ts\x18\t \x01(\x03\"Q\n\x16ListImportTasksRequest\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\r\n\x05limit\x18\x02 \x01(\x03\x12\x0f\n\x07\x64\x62_name\x18\x03 \x01(\t\"\x82\x01\n\x17ListImportTasksResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12:\n\x05tasks\x18\x02 \x03(\x0b\x32+.milvus.proto.milvus.GetImportStateResponse\"\x9a\x01\n\x12GetReplicasRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x14\n\x0c\x63ollectionID\x18\x02 \x01(\x03\x12\x18\n\x10with_shard_nodes\x18\x03 \x01(\x08\x12\x17\n\x0f\x63ollection_name\x18\x04 \x01(\t\x12\x0f\n\x07\x64\x62_name\x18\x05 \x01(\t\"v\n\x13GetReplicasResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x32\n\x08replicas\x18\x02 \x03(\x0b\x32 .milvus.proto.milvus.ReplicaInfo\"\xc1\x02\n\x0bReplicaInfo\x12\x11\n\treplicaID\x18\x01 \x01(\x03\x12\x14\n\x0c\x63ollectionID\x18\x02 \x01(\x03\x12\x15\n\rpartition_ids\x18\x03 \x03(\x03\x12\x39\n\x0eshard_replicas\x18\x04 \x03(\x0b\x32!.milvus.proto.milvus.ShardReplica\x12\x10\n\x08node_ids\x18\x05 \x03(\x03\x12\x1b\n\x13resource_group_name\x18\x06 \x01(\t\x12P\n\x11num_outbound_node\x18\x07 \x03(\x0b\x32\x35.milvus.proto.milvus.ReplicaInfo.NumOutboundNodeEntry\x1a\x36\n\x14NumOutboundNodeEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"`\n\x0cShardReplica\x12\x10\n\x08leaderID\x18\x01 \x01(\x03\x12\x13\n\x0bleader_addr\x18\x02 \x01(\t\x12\x17\n\x0f\x64m_channel_name\x18\x03 \x01(\t\x12\x10\n\x08node_ids\x18\x04 \x03(\x03\"\xbe\x01\n\x17\x43reateCredentialRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x10\n\x08username\x18\x02 \x01(\t\x12\x10\n\x08password\x18\x03 \x01(\t\x12\x1e\n\x16\x63reated_utc_timestamps\x18\x04 \x01(\x04\x12\x1f\n\x17modified_utc_timestamps\x18\x05 \x01(\x04:\x12\xca>\x0f\x08\x01\x10\x13\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\xcd\x01\n\x17UpdateCredentialRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x10\n\x08username\x18\x02 \x01(\t\x12\x13\n\x0boldPassword\x18\x03 \x01(\t\x12\x13\n\x0bnewPassword\x18\x04 \x01(\t\x12\x1e\n\x16\x63reated_utc_timestamps\x18\x05 \x01(\x04\x12\x1f\n\x17modified_utc_timestamps\x18\x06 \x01(\x04:\t\xca>\x06\x08\x02\x10\x14\x18\x02\"k\n\x17\x44\x65leteCredentialRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x10\n\x08username\x18\x02 \x01(\t:\x12\xca>\x0f\x08\x01\x10\x15\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"W\n\x15ListCredUsersResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x11\n\tusernames\x18\x02 \x03(\t\"V\n\x14ListCredUsersRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase:\x12\xca>\x0f\x08\x01\x10\x16\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\x1a\n\nRoleEntity\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1a\n\nUserEntity\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x84\x01\n\x11\x43reateRoleRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12/\n\x06\x65ntity\x18\x02 \x01(\x0b\x32\x1f.milvus.proto.milvus.RoleEntity:\x12\xca>\x0f\x08\x01\x10\x13\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"d\n\x0f\x44ropRoleRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x11\n\trole_name\x18\x02 \x01(\t:\x12\xca>\x0f\x08\x01\x10\x15\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\xb5\x01\n\x16OperateUserRoleRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x10\n\x08username\x18\x02 \x01(\t\x12\x11\n\trole_name\x18\x03 \x01(\t\x12\x36\n\x04type\x18\x04 \x01(\x0e\x32(.milvus.proto.milvus.OperateUserRoleType:\x12\xca>\x0f\x08\x01\x10\x17\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\x9d\x01\n\x11SelectRoleRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12-\n\x04role\x18\x02 \x01(\x0b\x32\x1f.milvus.proto.milvus.RoleEntity\x12\x19\n\x11include_user_info\x18\x03 \x01(\x08:\x12\xca>\x0f\x08\x01\x10\x16\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"k\n\nRoleResult\x12-\n\x04role\x18\x01 \x01(\x0b\x32\x1f.milvus.proto.milvus.RoleEntity\x12.\n\x05users\x18\x02 \x03(\x0b\x32\x1f.milvus.proto.milvus.UserEntity\"s\n\x12SelectRoleResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x30\n\x07results\x18\x02 \x03(\x0b\x32\x1f.milvus.proto.milvus.RoleResult\"\x94\x01\n\x11SelectUserRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12-\n\x04user\x18\x02 \x01(\x0b\x32\x1f.milvus.proto.milvus.UserEntity\x12\x19\n\x11include_role_info\x18\x03 \x01(\x08:\t\xca>\x06\x08\x02\x10\x18\x18\x02\"k\n\nUserResult\x12-\n\x04user\x18\x01 \x01(\x0b\x32\x1f.milvus.proto.milvus.UserEntity\x12.\n\x05roles\x18\x02 \x03(\x0b\x32\x1f.milvus.proto.milvus.RoleEntity\"s\n\x12SelectUserResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x30\n\x07results\x18\x02 \x03(\x0b\x32\x1f.milvus.proto.milvus.UserResult\"\x1c\n\x0cObjectEntity\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1f\n\x0fPrivilegeEntity\x12\x0c\n\x04name\x18\x01 \x01(\t\"w\n\rGrantorEntity\x12-\n\x04user\x18\x01 \x01(\x0b\x32\x1f.milvus.proto.milvus.UserEntity\x12\x37\n\tprivilege\x18\x02 \x01(\x0b\x32$.milvus.proto.milvus.PrivilegeEntity\"L\n\x14GrantPrivilegeEntity\x12\x34\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\".milvus.proto.milvus.GrantorEntity\"\xca\x01\n\x0bGrantEntity\x12-\n\x04role\x18\x01 \x01(\x0b\x32\x1f.milvus.proto.milvus.RoleEntity\x12\x31\n\x06object\x18\x02 \x01(\x0b\x32!.milvus.proto.milvus.ObjectEntity\x12\x13\n\x0bobject_name\x18\x03 \x01(\t\x12\x33\n\x07grantor\x18\x04 \x01(\x0b\x32\".milvus.proto.milvus.GrantorEntity\x12\x0f\n\x07\x64\x62_name\x18\x05 \x01(\t\"\x86\x01\n\x12SelectGrantRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x30\n\x06\x65ntity\x18\x02 \x01(\x0b\x32 .milvus.proto.milvus.GrantEntity:\x12\xca>\x0f\x08\x01\x10\x16\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"v\n\x13SelectGrantResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x32\n\x08\x65ntities\x18\x02 \x03(\x0b\x32 .milvus.proto.milvus.GrantEntity\"\xc4\x01\n\x17OperatePrivilegeRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x30\n\x06\x65ntity\x18\x02 \x01(\x0b\x32 .milvus.proto.milvus.GrantEntity\x12\x37\n\x04type\x18\x03 \x01(\x0e\x32).milvus.proto.milvus.OperatePrivilegeType:\x12\xca>\x0f\x08\x01\x10\x17\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\x93\x01\n\x19GetLoadingProgressRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x17\n\x0f\x63ollection_name\x18\x02 \x01(\t\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\x12\x0f\n\x07\x64\x62_name\x18\x04 \x01(\t:\x07\xca>\x04\x10\x05\x18\x02\"u\n\x1aGetLoadingProgressResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x10\n\x08progress\x18\x02 \x01(\x03\x12\x18\n\x10refresh_progress\x18\x03 \x01(\x03\"\x8d\x01\n\x13GetLoadStateRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x17\n\x0f\x63ollection_name\x18\x02 \x01(\t\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\x12\x0f\n\x07\x64\x62_name\x18\x04 \x01(\t:\x07\xca>\x04\x10\x05\x18\x02\"r\n\x14GetLoadStateResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12-\n\x05state\x18\x02 \x01(\x0e\x32\x1e.milvus.proto.common.LoadState\"\x1c\n\tMilvusExt\x12\x0f\n\x07version\x18\x01 \x01(\t\"\x13\n\x11GetVersionRequest\"R\n\x12GetVersionResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x0f\n\x07version\x18\x02 \x01(\t\"\x14\n\x12\x43heckHealthRequest\"\x9d\x01\n\x13\x43heckHealthResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x11\n\tisHealthy\x18\x02 \x01(\x08\x12\x0f\n\x07reasons\x18\x03 \x03(\t\x12\x35\n\x0cquota_states\x18\x04 \x03(\x0e\x32\x1f.milvus.proto.milvus.QuotaState\"\xaa\x01\n\x1a\x43reateResourceGroupRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x16\n\x0eresource_group\x18\x02 \x01(\t\x12\x34\n\x06\x63onfig\x18\x03 \x01(\x0b\x32$.milvus.proto.rg.ResourceGroupConfig:\x12\xca>\x0f\x08\x01\x10\x1a\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\x99\x02\n\x1bUpdateResourceGroupsRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12]\n\x0fresource_groups\x18\x02 \x03(\x0b\x32\x44.milvus.proto.milvus.UpdateResourceGroupsRequest.ResourceGroupsEntry\x1a[\n\x13ResourceGroupsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x33\n\x05value\x18\x02 \x01(\x0b\x32$.milvus.proto.rg.ResourceGroupConfig:\x02\x38\x01:\x12\xca>\x0f\x08\x01\x10\x30\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"r\n\x18\x44ropResourceGroupRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x16\n\x0eresource_group\x18\x02 \x01(\t:\x12\xca>\x0f\x08\x01\x10\x1b\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\xa5\x01\n\x13TransferNodeRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x1d\n\x15source_resource_group\x18\x02 \x01(\t\x12\x1d\n\x15target_resource_group\x18\x03 \x01(\t\x12\x10\n\x08num_node\x18\x04 \x01(\x05:\x12\xca>\x0f\x08\x01\x10\x1e\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\xd5\x01\n\x16TransferReplicaRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x1d\n\x15source_resource_group\x18\x02 \x01(\t\x12\x1d\n\x15target_resource_group\x18\x03 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x04 \x01(\t\x12\x13\n\x0bnum_replica\x18\x05 \x01(\x03\x12\x0f\n\x07\x64\x62_name\x18\x06 \x01(\t:\x12\xca>\x0f\x08\x01\x10\x1f\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"[\n\x19ListResourceGroupsRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase:\x12\xca>\x0f\x08\x01\x10\x1d\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"b\n\x1aListResourceGroupsResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x17\n\x0fresource_groups\x18\x02 \x03(\t\"v\n\x1c\x44\x65scribeResourceGroupRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x16\n\x0eresource_group\x18\x02 \x01(\t:\x12\xca>\x0f\x08\x01\x10\x1c\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\x88\x01\n\x1d\x44\x65scribeResourceGroupResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12:\n\x0eresource_group\x18\x02 \x01(\x0b\x32\".milvus.proto.milvus.ResourceGroup\"\xd6\x04\n\rResourceGroup\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08\x63\x61pacity\x18\x02 \x01(\x05\x12\x1a\n\x12num_available_node\x18\x03 \x01(\x05\x12T\n\x12num_loaded_replica\x18\x04 \x03(\x0b\x32\x38.milvus.proto.milvus.ResourceGroup.NumLoadedReplicaEntry\x12R\n\x11num_outgoing_node\x18\x05 \x03(\x0b\x32\x37.milvus.proto.milvus.ResourceGroup.NumOutgoingNodeEntry\x12R\n\x11num_incoming_node\x18\x06 \x03(\x0b\x32\x37.milvus.proto.milvus.ResourceGroup.NumIncomingNodeEntry\x12\x34\n\x06\x63onfig\x18\x07 \x01(\x0b\x32$.milvus.proto.rg.ResourceGroupConfig\x12,\n\x05nodes\x18\x08 \x03(\x0b\x32\x1d.milvus.proto.common.NodeInfo\x1a\x37\n\x15NumLoadedReplicaEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x36\n\x14NumOutgoingNodeEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x36\n\x14NumIncomingNodeEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"\x9f\x01\n\x17RenameCollectionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x0f\n\x07oldName\x18\x03 \x01(\t\x12\x0f\n\x07newName\x18\x04 \x01(\t\x12\x11\n\tnewDBName\x18\x05 \x01(\t:\x12\xca>\x0f\x08\x01\x10\"\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\xa1\x01\n\x19GetIndexStatisticsRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x12\n\nindex_name\x18\x04 \x01(\t\x12\x11\n\ttimestamp\x18\x05 \x01(\x04:\x07\xca>\x04\x10\x0c\x18\x03\"\x8c\x01\n\x1aGetIndexStatisticsResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x41\n\x12index_descriptions\x18\x02 \x03(\x0b\x32%.milvus.proto.milvus.IndexDescription\"r\n\x0e\x43onnectRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x34\n\x0b\x63lient_info\x18\x02 \x01(\x0b\x32\x1f.milvus.proto.common.ClientInfo\"\x88\x01\n\x0f\x43onnectResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x34\n\x0bserver_info\x18\x02 \x01(\x0b\x32\x1f.milvus.proto.common.ServerInfo\x12\x12\n\nidentifier\x18\x03 \x01(\x03\"C\n\x15\x41llocTimestampRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\"X\n\x16\x41llocTimestampResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x11\n\ttimestamp\x18\x02 \x01(\x04\"h\n\x15\x43reateDatabaseRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t:\x12\xca>\x0f\x08\x01\x10#\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"f\n\x13\x44ropDatabaseRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t:\x12\xca>\x0f\x08\x01\x10$\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"B\n\x14ListDatabasesRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\"q\n\x15ListDatabasesResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x10\n\x08\x64\x62_names\x18\x02 \x03(\t\x12\x19\n\x11\x63reated_timestamp\x18\x03 \x03(\x04\"\xad\x01\n\x14\x41lterDatabaseRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\r\n\x05\x64\x62_id\x18\x03 \x01(\t\x12\x35\n\nproperties\x18\x04 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair:\x12\xca>\x0f\x08\x01\x10\x31\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\xf5\x01\n\x17ReplicateMessageRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x14\n\x0c\x63hannel_name\x18\x02 \x01(\t\x12\x0f\n\x07\x42\x65ginTs\x18\x03 \x01(\x04\x12\r\n\x05\x45ndTs\x18\x04 \x01(\x04\x12\x0c\n\x04Msgs\x18\x05 \x03(\x0c\x12\x35\n\x0eStartPositions\x18\x06 \x03(\x0b\x32\x1d.milvus.proto.msg.MsgPosition\x12\x33\n\x0c\x45ndPositions\x18\x07 \x03(\x0b\x32\x1d.milvus.proto.msg.MsgPosition\"Y\n\x18ReplicateMessageResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x10\n\x08position\x18\x02 \x01(\t\"b\n\x15ImportAuthPlaceholder\x12\x0f\n\x07\x64\x62_name\x18\x01 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x02 \x01(\t\x12\x16\n\x0epartition_name\x18\x03 \x01(\t:\x07\xca>\x04\x10\x12\x18\x01\"<\n GetImportProgressAuthPlaceholder\x12\x0f\n\x07\x64\x62_name\x18\x01 \x01(\t:\x07\xca>\x04\x10\x12\x18\x01\"O\n\x1aListImportsAuthPlaceholder\x12\x0f\n\x07\x64\x62_name\x18\x03 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t:\x07\xca>\x04\x10\x12\x18\x01*%\n\x08ShowType\x12\x07\n\x03\x41ll\x10\x00\x12\x0c\n\x08InMemory\x10\x01\x1a\x02\x18\x01*@\n\x13OperateUserRoleType\x12\x11\n\rAddUserToRole\x10\x00\x12\x16\n\x12RemoveUserFromRole\x10\x01*-\n\x14OperatePrivilegeType\x12\t\n\x05Grant\x10\x00\x12\n\n\x06Revoke\x10\x01*]\n\nQuotaState\x12\x0b\n\x07Unknown\x10\x00\x12\x0f\n\x0bReadLimited\x10\x02\x12\x10\n\x0cWriteLimited\x10\x03\x12\x0e\n\nDenyToRead\x10\x04\x12\x0f\n\x0b\x44\x65nyToWrite\x10\x05\x32\xc7\x43\n\rMilvusService\x12_\n\x10\x43reateCollection\x12,.milvus.proto.milvus.CreateCollectionRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12[\n\x0e\x44ropCollection\x12*.milvus.proto.milvus.DropCollectionRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12_\n\rHasCollection\x12).milvus.proto.milvus.HasCollectionRequest\x1a!.milvus.proto.milvus.BoolResponse\"\x00\x12[\n\x0eLoadCollection\x12*.milvus.proto.milvus.LoadCollectionRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12\x61\n\x11ReleaseCollection\x12-.milvus.proto.milvus.ReleaseCollectionRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12w\n\x12\x44\x65scribeCollection\x12..milvus.proto.milvus.DescribeCollectionRequest\x1a/.milvus.proto.milvus.DescribeCollectionResponse\"\x00\x12\x86\x01\n\x17GetCollectionStatistics\x12\x33.milvus.proto.milvus.GetCollectionStatisticsRequest\x1a\x34.milvus.proto.milvus.GetCollectionStatisticsResponse\"\x00\x12n\n\x0fShowCollections\x12+.milvus.proto.milvus.ShowCollectionsRequest\x1a,.milvus.proto.milvus.ShowCollectionsResponse\"\x00\x12]\n\x0f\x41lterCollection\x12+.milvus.proto.milvus.AlterCollectionRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12]\n\x0f\x43reatePartition\x12+.milvus.proto.milvus.CreatePartitionRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12Y\n\rDropPartition\x12).milvus.proto.milvus.DropPartitionRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12]\n\x0cHasPartition\x12(.milvus.proto.milvus.HasPartitionRequest\x1a!.milvus.proto.milvus.BoolResponse\"\x00\x12[\n\x0eLoadPartitions\x12*.milvus.proto.milvus.LoadPartitionsRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12\x61\n\x11ReleasePartitions\x12-.milvus.proto.milvus.ReleasePartitionsRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12\x83\x01\n\x16GetPartitionStatistics\x12\x32.milvus.proto.milvus.GetPartitionStatisticsRequest\x1a\x33.milvus.proto.milvus.GetPartitionStatisticsResponse\"\x00\x12k\n\x0eShowPartitions\x12*.milvus.proto.milvus.ShowPartitionsRequest\x1a+.milvus.proto.milvus.ShowPartitionsResponse\"\x00\x12w\n\x12GetLoadingProgress\x12..milvus.proto.milvus.GetLoadingProgressRequest\x1a/.milvus.proto.milvus.GetLoadingProgressResponse\"\x00\x12\x65\n\x0cGetLoadState\x12(.milvus.proto.milvus.GetLoadStateRequest\x1a).milvus.proto.milvus.GetLoadStateResponse\"\x00\x12U\n\x0b\x43reateAlias\x12\'.milvus.proto.milvus.CreateAliasRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12Q\n\tDropAlias\x12%.milvus.proto.milvus.DropAliasRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12S\n\nAlterAlias\x12&.milvus.proto.milvus.AlterAliasRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12h\n\rDescribeAlias\x12).milvus.proto.milvus.DescribeAliasRequest\x1a*.milvus.proto.milvus.DescribeAliasResponse\"\x00\x12\x62\n\x0bListAliases\x12\'.milvus.proto.milvus.ListAliasesRequest\x1a(.milvus.proto.milvus.ListAliasesResponse\"\x00\x12U\n\x0b\x43reateIndex\x12\'.milvus.proto.milvus.CreateIndexRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12S\n\nAlterIndex\x12&.milvus.proto.milvus.AlterIndexRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12h\n\rDescribeIndex\x12).milvus.proto.milvus.DescribeIndexRequest\x1a*.milvus.proto.milvus.DescribeIndexResponse\"\x00\x12w\n\x12GetIndexStatistics\x12..milvus.proto.milvus.GetIndexStatisticsRequest\x1a/.milvus.proto.milvus.GetIndexStatisticsResponse\"\x00\x12k\n\rGetIndexState\x12).milvus.proto.milvus.GetIndexStateRequest\x1a*.milvus.proto.milvus.GetIndexStateResponse\"\x03\x88\x02\x01\x12\x83\x01\n\x15GetIndexBuildProgress\x12\x31.milvus.proto.milvus.GetIndexBuildProgressRequest\x1a\x32.milvus.proto.milvus.GetIndexBuildProgressResponse\"\x03\x88\x02\x01\x12Q\n\tDropIndex\x12%.milvus.proto.milvus.DropIndexRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12S\n\x06Insert\x12\".milvus.proto.milvus.InsertRequest\x1a#.milvus.proto.milvus.MutationResult\"\x00\x12S\n\x06\x44\x65lete\x12\".milvus.proto.milvus.DeleteRequest\x1a#.milvus.proto.milvus.MutationResult\"\x00\x12S\n\x06Upsert\x12\".milvus.proto.milvus.UpsertRequest\x1a#.milvus.proto.milvus.MutationResult\"\x00\x12R\n\x06Search\x12\".milvus.proto.milvus.SearchRequest\x1a\".milvus.proto.milvus.SearchResults\"\x00\x12^\n\x0cHybridSearch\x12(.milvus.proto.milvus.HybridSearchRequest\x1a\".milvus.proto.milvus.SearchResults\"\x00\x12P\n\x05\x46lush\x12!.milvus.proto.milvus.FlushRequest\x1a\".milvus.proto.milvus.FlushResponse\"\x00\x12O\n\x05Query\x12!.milvus.proto.milvus.QueryRequest\x1a!.milvus.proto.milvus.QueryResults\"\x00\x12\x64\n\x0c\x43\x61lcDistance\x12(.milvus.proto.milvus.CalcDistanceRequest\x1a(.milvus.proto.milvus.CalcDistanceResults\"\x00\x12Y\n\x08\x46lushAll\x12$.milvus.proto.milvus.FlushAllRequest\x1a%.milvus.proto.milvus.FlushAllResponse\"\x00\x12h\n\rGetFlushState\x12).milvus.proto.milvus.GetFlushStateRequest\x1a*.milvus.proto.milvus.GetFlushStateResponse\"\x00\x12q\n\x10GetFlushAllState\x12,.milvus.proto.milvus.GetFlushAllStateRequest\x1a-.milvus.proto.milvus.GetFlushAllStateResponse\"\x00\x12\x89\x01\n\x18GetPersistentSegmentInfo\x12\x34.milvus.proto.milvus.GetPersistentSegmentInfoRequest\x1a\x35.milvus.proto.milvus.GetPersistentSegmentInfoResponse\"\x00\x12z\n\x13GetQuerySegmentInfo\x12/.milvus.proto.milvus.GetQuerySegmentInfoRequest\x1a\x30.milvus.proto.milvus.GetQuerySegmentInfoResponse\"\x00\x12\x62\n\x0bGetReplicas\x12\'.milvus.proto.milvus.GetReplicasRequest\x1a(.milvus.proto.milvus.GetReplicasResponse\"\x00\x12P\n\x05\x44ummy\x12!.milvus.proto.milvus.DummyRequest\x1a\".milvus.proto.milvus.DummyResponse\"\x00\x12\x65\n\x0cRegisterLink\x12(.milvus.proto.milvus.RegisterLinkRequest\x1a).milvus.proto.milvus.RegisterLinkResponse\"\x00\x12_\n\nGetMetrics\x12&.milvus.proto.milvus.GetMetricsRequest\x1a\'.milvus.proto.milvus.GetMetricsResponse\"\x00\x12l\n\x12GetComponentStates\x12..milvus.proto.milvus.GetComponentStatesRequest\x1a$.milvus.proto.milvus.ComponentStates\"\x00\x12U\n\x0bLoadBalance\x12\'.milvus.proto.milvus.LoadBalanceRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12w\n\x12GetCompactionState\x12..milvus.proto.milvus.GetCompactionStateRequest\x1a/.milvus.proto.milvus.GetCompactionStateResponse\"\x00\x12q\n\x10ManualCompaction\x12,.milvus.proto.milvus.ManualCompactionRequest\x1a-.milvus.proto.milvus.ManualCompactionResponse\"\x00\x12\x80\x01\n\x1bGetCompactionStateWithPlans\x12..milvus.proto.milvus.GetCompactionPlansRequest\x1a/.milvus.proto.milvus.GetCompactionPlansResponse\"\x00\x12S\n\x06Import\x12\".milvus.proto.milvus.ImportRequest\x1a#.milvus.proto.milvus.ImportResponse\"\x00\x12k\n\x0eGetImportState\x12*.milvus.proto.milvus.GetImportStateRequest\x1a+.milvus.proto.milvus.GetImportStateResponse\"\x00\x12n\n\x0fListImportTasks\x12+.milvus.proto.milvus.ListImportTasksRequest\x1a,.milvus.proto.milvus.ListImportTasksResponse\"\x00\x12_\n\x10\x43reateCredential\x12,.milvus.proto.milvus.CreateCredentialRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12_\n\x10UpdateCredential\x12,.milvus.proto.milvus.UpdateCredentialRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12_\n\x10\x44\x65leteCredential\x12,.milvus.proto.milvus.DeleteCredentialRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12h\n\rListCredUsers\x12).milvus.proto.milvus.ListCredUsersRequest\x1a*.milvus.proto.milvus.ListCredUsersResponse\"\x00\x12S\n\nCreateRole\x12&.milvus.proto.milvus.CreateRoleRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12O\n\x08\x44ropRole\x12$.milvus.proto.milvus.DropRoleRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12]\n\x0fOperateUserRole\x12+.milvus.proto.milvus.OperateUserRoleRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12_\n\nSelectRole\x12&.milvus.proto.milvus.SelectRoleRequest\x1a\'.milvus.proto.milvus.SelectRoleResponse\"\x00\x12_\n\nSelectUser\x12&.milvus.proto.milvus.SelectUserRequest\x1a\'.milvus.proto.milvus.SelectUserResponse\"\x00\x12_\n\x10OperatePrivilege\x12,.milvus.proto.milvus.OperatePrivilegeRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12\x62\n\x0bSelectGrant\x12\'.milvus.proto.milvus.SelectGrantRequest\x1a(.milvus.proto.milvus.SelectGrantResponse\"\x00\x12_\n\nGetVersion\x12&.milvus.proto.milvus.GetVersionRequest\x1a\'.milvus.proto.milvus.GetVersionResponse\"\x00\x12\x62\n\x0b\x43heckHealth\x12\'.milvus.proto.milvus.CheckHealthRequest\x1a(.milvus.proto.milvus.CheckHealthResponse\"\x00\x12\x65\n\x13\x43reateResourceGroup\x12/.milvus.proto.milvus.CreateResourceGroupRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12\x61\n\x11\x44ropResourceGroup\x12-.milvus.proto.milvus.DropResourceGroupRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12g\n\x14UpdateResourceGroups\x12\x30.milvus.proto.milvus.UpdateResourceGroupsRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12W\n\x0cTransferNode\x12(.milvus.proto.milvus.TransferNodeRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12]\n\x0fTransferReplica\x12+.milvus.proto.milvus.TransferReplicaRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12w\n\x12ListResourceGroups\x12..milvus.proto.milvus.ListResourceGroupsRequest\x1a/.milvus.proto.milvus.ListResourceGroupsResponse\"\x00\x12\x80\x01\n\x15\x44\x65scribeResourceGroup\x12\x31.milvus.proto.milvus.DescribeResourceGroupRequest\x1a\x32.milvus.proto.milvus.DescribeResourceGroupResponse\"\x00\x12_\n\x10RenameCollection\x12,.milvus.proto.milvus.RenameCollectionRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12u\n\x12ListIndexedSegment\x12-.milvus.proto.feder.ListIndexedSegmentRequest\x1a..milvus.proto.feder.ListIndexedSegmentResponse\"\x00\x12\x87\x01\n\x18\x44\x65scribeSegmentIndexData\x12\x33.milvus.proto.feder.DescribeSegmentIndexDataRequest\x1a\x34.milvus.proto.feder.DescribeSegmentIndexDataResponse\"\x00\x12V\n\x07\x43onnect\x12#.milvus.proto.milvus.ConnectRequest\x1a$.milvus.proto.milvus.ConnectResponse\"\x00\x12k\n\x0e\x41llocTimestamp\x12*.milvus.proto.milvus.AllocTimestampRequest\x1a+.milvus.proto.milvus.AllocTimestampResponse\"\x00\x12[\n\x0e\x43reateDatabase\x12*.milvus.proto.milvus.CreateDatabaseRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12W\n\x0c\x44ropDatabase\x12(.milvus.proto.milvus.DropDatabaseRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12h\n\rListDatabases\x12).milvus.proto.milvus.ListDatabasesRequest\x1a*.milvus.proto.milvus.ListDatabasesResponse\"\x00\x12Y\n\rAlterDatabase\x12).milvus.proto.milvus.AlterDatabaseRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12q\n\x10ReplicateMessage\x12,.milvus.proto.milvus.ReplicateMessageRequest\x1a-.milvus.proto.milvus.ReplicateMessageResponse\"\x00\x32u\n\x0cProxyService\x12\x65\n\x0cRegisterLink\x12(.milvus.proto.milvus.RegisterLinkRequest\x1a).milvus.proto.milvus.RegisterLinkResponse\"\x00:U\n\x0emilvus_ext_obj\x12\x1c.google.protobuf.FileOptions\x18\xe9\x07 \x01(\x0b\x32\x1e.milvus.proto.milvus.MilvusExtBm\n\x0eio.milvus.grpcB\x0bMilvusProtoP\x01Z4github.com/milvus-io/milvus-proto/go-api/v2/milvuspb\xa0\x01\x01\xaa\x02\x12Milvus.Client.Grpcb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0cmilvus.proto\x12\x13milvus.proto.milvus\x1a\x0c\x63ommon.proto\x1a\x08rg.proto\x1a\x0cschema.proto\x1a\x0b\x66\x65\x64\x65r.proto\x1a\tmsg.proto\x1a google/protobuf/descriptor.proto\"\x8d\x01\n\x12\x43reateAliasRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\r\n\x05\x61lias\x18\x04 \x01(\t:\x12\xca>\x0f\x08\x01\x10,\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"r\n\x10\x44ropAliasRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\r\n\x05\x61lias\x18\x03 \x01(\t:\x12\xca>\x0f\x08\x01\x10-\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\x8c\x01\n\x11\x41lterAliasRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\r\n\x05\x61lias\x18\x04 \x01(\t:\x12\xca>\x0f\x08\x01\x10,\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"v\n\x14\x44\x65scribeAliasRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\r\n\x05\x61lias\x18\x03 \x01(\t:\x12\xca>\x0f\x08\x01\x10.\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"x\n\x15\x44\x65scribeAliasResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\r\n\x05\x61lias\x18\x03 \x01(\t\x12\x12\n\ncollection\x18\x04 \x01(\t\"~\n\x12ListAliasesRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t:\x12\xca>\x0f\x08\x01\x10/\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"}\n\x13ListAliasesResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x0f\n\x07\x61liases\x18\x04 \x03(\t\"\xb8\x02\n\x17\x43reateCollectionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x0e\n\x06schema\x18\x04 \x01(\x0c\x12\x12\n\nshards_num\x18\x05 \x01(\x05\x12@\n\x11\x63onsistency_level\x18\x06 \x01(\x0e\x32%.milvus.proto.common.ConsistencyLevel\x12\x35\n\nproperties\x18\x07 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\x16\n\x0enum_partitions\x18\x08 \x01(\x03:\x12\xca>\x0f\x08\x01\x10\x01\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\x81\x01\n\x15\x44ropCollectionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t:\x12\xca>\x0f\x08\x01\x10\x02\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\xe4\x01\n\x16\x41lterCollectionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x14\n\x0c\x63ollectionID\x18\x04 \x01(\x03\x12\x35\n\nproperties\x18\x05 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\x13\n\x0b\x64\x65lete_keys\x18\x06 \x03(\t:\x12\xca>\x0f\x08\x01\x10\x01\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\xd2\x01\n\x1b\x41lterCollectionFieldRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x12\n\nfield_name\x18\x04 \x01(\t\x12\x35\n\nproperties\x18\x05 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair:\x12\xca>\x0f\x08\x01\x10\x01\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\x80\x01\n\x14HasCollectionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x12\n\ntime_stamp\x18\x04 \x01(\x04\"J\n\x0c\x42oolResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\r\n\x05value\x18\x02 \x01(\x08\"L\n\x0eStringResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\r\n\x05value\x18\x02 \x01(\t\"\xaf\x01\n\x19\x44\x65scribeCollectionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x14\n\x0c\x63ollectionID\x18\x04 \x01(\x03\x12\x12\n\ntime_stamp\x18\x05 \x01(\x04:\x12\xca>\x0f\x08\x01\x10\x03\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\xb9\x04\n\x1a\x44\x65scribeCollectionResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x35\n\x06schema\x18\x02 \x01(\x0b\x32%.milvus.proto.schema.CollectionSchema\x12\x14\n\x0c\x63ollectionID\x18\x03 \x01(\x03\x12\x1d\n\x15virtual_channel_names\x18\x04 \x03(\t\x12\x1e\n\x16physical_channel_names\x18\x05 \x03(\t\x12\x19\n\x11\x63reated_timestamp\x18\x06 \x01(\x04\x12\x1d\n\x15\x63reated_utc_timestamp\x18\x07 \x01(\x04\x12\x12\n\nshards_num\x18\x08 \x01(\x05\x12\x0f\n\x07\x61liases\x18\t \x03(\t\x12\x39\n\x0fstart_positions\x18\n \x03(\x0b\x32 .milvus.proto.common.KeyDataPair\x12@\n\x11\x63onsistency_level\x18\x0b \x01(\x0e\x32%.milvus.proto.common.ConsistencyLevel\x12\x17\n\x0f\x63ollection_name\x18\x0c \x01(\t\x12\x35\n\nproperties\x18\r \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\x0f\n\x07\x64\x62_name\x18\x0e \x01(\t\x12\x16\n\x0enum_partitions\x18\x0f \x01(\x03\x12\r\n\x05\x64\x62_id\x18\x10 \x01(\x03\"\xee\x01\n\x15LoadCollectionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x16\n\x0ereplica_number\x18\x04 \x01(\x05\x12\x17\n\x0fresource_groups\x18\x05 \x03(\t\x12\x0f\n\x07refresh\x18\x06 \x01(\x08\x12\x13\n\x0bload_fields\x18\x07 \x03(\t\x12\x1f\n\x17skip_load_dynamic_field\x18\x08 \x01(\x08:\x07\xca>\x04\x10\x05\x18\x03\"y\n\x18ReleaseCollectionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t:\x07\xca>\x04\x10\x06\x18\x03\"\xab\x01\n\x14GetStatisticsRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x17\n\x0fpartition_names\x18\x04 \x03(\t\x12\x1b\n\x13guarantee_timestamp\x18\x05 \x01(\x04:\x07\xca>\x04\x10\n\x18\x03\"v\n\x15GetStatisticsResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x30\n\x05stats\x18\x02 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\"\x7f\n\x1eGetCollectionStatisticsRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t:\x07\xca>\x04\x10\n\x18\x03\"\x80\x01\n\x1fGetCollectionStatisticsResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x30\n\x05stats\x18\x02 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\"\xb4\x01\n\x16ShowCollectionsRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x12\n\ntime_stamp\x18\x03 \x01(\x04\x12+\n\x04type\x18\x04 \x01(\x0e\x32\x1d.milvus.proto.milvus.ShowType\x12\x1c\n\x10\x63ollection_names\x18\x05 \x03(\tB\x02\x18\x01\"\xf7\x01\n\x17ShowCollectionsResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x18\n\x10\x63ollection_names\x18\x02 \x03(\t\x12\x16\n\x0e\x63ollection_ids\x18\x03 \x03(\x03\x12\x1a\n\x12\x63reated_timestamps\x18\x04 \x03(\x04\x12\x1e\n\x16\x63reated_utc_timestamps\x18\x05 \x03(\x04\x12 \n\x14inMemory_percentages\x18\x06 \x03(\x03\x42\x02\x18\x01\x12\x1f\n\x17query_service_available\x18\x07 \x03(\x08\"\x8f\x01\n\x16\x43reatePartitionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x16\n\x0epartition_name\x18\x04 \x01(\t:\x07\xca>\x04\x10\'\x18\x03\"\x8d\x01\n\x14\x44ropPartitionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x16\n\x0epartition_name\x18\x04 \x01(\t:\x07\xca>\x04\x10(\x18\x03\"\x8c\x01\n\x13HasPartitionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x16\n\x0epartition_name\x18\x04 \x01(\t:\x07\xca>\x04\x10*\x18\x03\"\x87\x02\n\x15LoadPartitionsRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x17\n\x0fpartition_names\x18\x04 \x03(\t\x12\x16\n\x0ereplica_number\x18\x05 \x01(\x05\x12\x17\n\x0fresource_groups\x18\x06 \x03(\t\x12\x0f\n\x07refresh\x18\x07 \x01(\x08\x12\x13\n\x0bload_fields\x18\x08 \x03(\t\x12\x1f\n\x17skip_load_dynamic_field\x18\t \x01(\x08:\x07\xca>\x04\x10\x05\x18\x03\"\x92\x01\n\x18ReleasePartitionsRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x17\n\x0fpartition_names\x18\x04 \x03(\t:\x07\xca>\x04\x10\x06\x18\x03\"\x8d\x01\n\x1dGetPartitionStatisticsRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x16\n\x0epartition_name\x18\x04 \x01(\t\"\x7f\n\x1eGetPartitionStatisticsResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x30\n\x05stats\x18\x02 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\"\xd6\x01\n\x15ShowPartitionsRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x14\n\x0c\x63ollectionID\x18\x04 \x01(\x03\x12\x17\n\x0fpartition_names\x18\x05 \x03(\t\x12/\n\x04type\x18\x06 \x01(\x0e\x32\x1d.milvus.proto.milvus.ShowTypeB\x02\x18\x01:\x07\xca>\x04\x10)\x18\x03\"\xd2\x01\n\x16ShowPartitionsResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x17\n\x0fpartition_names\x18\x02 \x03(\t\x12\x14\n\x0cpartitionIDs\x18\x03 \x03(\x03\x12\x1a\n\x12\x63reated_timestamps\x18\x04 \x03(\x04\x12\x1e\n\x16\x63reated_utc_timestamps\x18\x05 \x03(\x04\x12 \n\x14inMemory_percentages\x18\x06 \x03(\x03\x42\x02\x18\x01\"m\n\x16\x44\x65scribeSegmentRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x14\n\x0c\x63ollectionID\x18\x02 \x01(\x03\x12\x11\n\tsegmentID\x18\x03 \x01(\x03\"\x8f\x01\n\x17\x44\x65scribeSegmentResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x0f\n\x07indexID\x18\x02 \x01(\x03\x12\x0f\n\x07\x62uildID\x18\x03 \x01(\x03\x12\x14\n\x0c\x65nable_index\x18\x04 \x01(\x08\x12\x0f\n\x07\x66ieldID\x18\x05 \x01(\x03\"l\n\x13ShowSegmentsRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x14\n\x0c\x63ollectionID\x18\x02 \x01(\x03\x12\x13\n\x0bpartitionID\x18\x03 \x01(\x03\"W\n\x14ShowSegmentsResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x12\n\nsegmentIDs\x18\x02 \x03(\x03\"\xd4\x01\n\x12\x43reateIndexRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x12\n\nfield_name\x18\x04 \x01(\t\x12\x37\n\x0c\x65xtra_params\x18\x05 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\x12\n\nindex_name\x18\x06 \x01(\t:\x07\xca>\x04\x10\x0b\x18\x03\"\xd4\x01\n\x11\x41lterIndexRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x12\n\nindex_name\x18\x04 \x01(\t\x12\x37\n\x0c\x65xtra_params\x18\x05 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\x13\n\x0b\x64\x65lete_keys\x18\x06 \x03(\t:\x07\xca>\x04\x10\x0b\x18\x03\"\xb0\x01\n\x14\x44\x65scribeIndexRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x12\n\nfield_name\x18\x04 \x01(\t\x12\x12\n\nindex_name\x18\x05 \x01(\t\x12\x11\n\ttimestamp\x18\x06 \x01(\x04:\x07\xca>\x04\x10\x0c\x18\x03\"\x95\x02\n\x10IndexDescription\x12\x12\n\nindex_name\x18\x01 \x01(\t\x12\x0f\n\x07indexID\x18\x02 \x01(\x03\x12\x31\n\x06params\x18\x03 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\x12\n\nfield_name\x18\x04 \x01(\t\x12\x14\n\x0cindexed_rows\x18\x05 \x01(\x03\x12\x12\n\ntotal_rows\x18\x06 \x01(\x03\x12.\n\x05state\x18\x07 \x01(\x0e\x32\x1f.milvus.proto.common.IndexState\x12\x1f\n\x17index_state_fail_reason\x18\x08 \x01(\t\x12\x1a\n\x12pending_index_rows\x18\t \x01(\x03\"\x87\x01\n\x15\x44\x65scribeIndexResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x41\n\x12index_descriptions\x18\x02 \x03(\x0b\x32%.milvus.proto.milvus.IndexDescription\"\xa5\x01\n\x1cGetIndexBuildProgressRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x12\n\nfield_name\x18\x04 \x01(\t\x12\x12\n\nindex_name\x18\x05 \x01(\t:\x07\xca>\x04\x10\x0c\x18\x03\"v\n\x1dGetIndexBuildProgressResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x14\n\x0cindexed_rows\x18\x02 \x01(\x03\x12\x12\n\ntotal_rows\x18\x03 \x01(\x03\"\x9d\x01\n\x14GetIndexStateRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x12\n\nfield_name\x18\x04 \x01(\t\x12\x12\n\nindex_name\x18\x05 \x01(\t:\x07\xca>\x04\x10\x0c\x18\x03\"\x89\x01\n\x15GetIndexStateResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12.\n\x05state\x18\x02 \x01(\x0e\x32\x1f.milvus.proto.common.IndexState\x12\x13\n\x0b\x66\x61il_reason\x18\x03 \x01(\t\"\x99\x01\n\x10\x44ropIndexRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x12\n\nfield_name\x18\x04 \x01(\t\x12\x12\n\nindex_name\x18\x05 \x01(\t:\x07\xca>\x04\x10\r\x18\x03\"\xe0\x01\n\rInsertRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x16\n\x0epartition_name\x18\x04 \x01(\t\x12\x33\n\x0b\x66ields_data\x18\x05 \x03(\x0b\x32\x1e.milvus.proto.schema.FieldData\x12\x11\n\thash_keys\x18\x06 \x03(\r\x12\x10\n\x08num_rows\x18\x07 \x01(\r:\x07\xca>\x04\x10\x08\x18\x03\"\xe0\x01\n\rUpsertRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x16\n\x0epartition_name\x18\x04 \x01(\t\x12\x33\n\x0b\x66ields_data\x18\x05 \x03(\x0b\x32\x1e.milvus.proto.schema.FieldData\x12\x11\n\thash_keys\x18\x06 \x03(\r\x12\x10\n\x08num_rows\x18\x07 \x01(\r:\x07\xca>\x04\x10\x19\x18\x03\"\xf0\x01\n\x0eMutationResult\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12%\n\x03IDs\x18\x02 \x01(\x0b\x32\x18.milvus.proto.schema.IDs\x12\x12\n\nsucc_index\x18\x03 \x03(\r\x12\x11\n\terr_index\x18\x04 \x03(\r\x12\x14\n\x0c\x61\x63knowledged\x18\x05 \x01(\x08\x12\x12\n\ninsert_cnt\x18\x06 \x01(\x03\x12\x12\n\ndelete_cnt\x18\x07 \x01(\x03\x12\x12\n\nupsert_cnt\x18\x08 \x01(\x03\x12\x11\n\ttimestamp\x18\t \x01(\x04\"\xa2\x03\n\rDeleteRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x16\n\x0epartition_name\x18\x04 \x01(\t\x12\x0c\n\x04\x65xpr\x18\x05 \x01(\t\x12\x11\n\thash_keys\x18\x06 \x03(\r\x12@\n\x11\x63onsistency_level\x18\x07 \x01(\x0e\x32%.milvus.proto.common.ConsistencyLevel\x12X\n\x14\x65xpr_template_values\x18\x08 \x03(\x0b\x32:.milvus.proto.milvus.DeleteRequest.ExprTemplateValuesEntry\x1a]\n\x17\x45xprTemplateValuesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x31\n\x05value\x18\x02 \x01(\x0b\x32\".milvus.proto.schema.TemplateValue:\x02\x38\x01:\x07\xca>\x04\x10\t\x18\x03\"\xec\x02\n\x10SubSearchRequest\x12\x0b\n\x03\x64sl\x18\x01 \x01(\t\x12\x19\n\x11placeholder_group\x18\x02 \x01(\x0c\x12.\n\x08\x64sl_type\x18\x03 \x01(\x0e\x32\x1c.milvus.proto.common.DslType\x12\x38\n\rsearch_params\x18\x04 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\n\n\x02nq\x18\x05 \x01(\x03\x12[\n\x14\x65xpr_template_values\x18\x06 \x03(\x0b\x32=.milvus.proto.milvus.SubSearchRequest.ExprTemplateValuesEntry\x1a]\n\x17\x45xprTemplateValuesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x31\n\x05value\x18\x02 \x01(\x0b\x32\".milvus.proto.schema.TemplateValue:\x02\x38\x01\"\x85\x06\n\rSearchRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x17\n\x0fpartition_names\x18\x04 \x03(\t\x12\x0b\n\x03\x64sl\x18\x05 \x01(\t\x12\x19\n\x11placeholder_group\x18\x06 \x01(\x0c\x12.\n\x08\x64sl_type\x18\x07 \x01(\x0e\x32\x1c.milvus.proto.common.DslType\x12\x15\n\routput_fields\x18\x08 \x03(\t\x12\x38\n\rsearch_params\x18\t \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\x18\n\x10travel_timestamp\x18\n \x01(\x04\x12\x1b\n\x13guarantee_timestamp\x18\x0b \x01(\x04\x12\n\n\x02nq\x18\x0c \x01(\x03\x12\x1b\n\x13not_return_all_meta\x18\r \x01(\x08\x12@\n\x11\x63onsistency_level\x18\x0e \x01(\x0e\x32%.milvus.proto.common.ConsistencyLevel\x12\x1f\n\x17use_default_consistency\x18\x0f \x01(\x08\x12\x1e\n\x16search_by_primary_keys\x18\x10 \x01(\x08\x12\x37\n\x08sub_reqs\x18\x11 \x03(\x0b\x32%.milvus.proto.milvus.SubSearchRequest\x12X\n\x14\x65xpr_template_values\x18\x12 \x03(\x0b\x32:.milvus.proto.milvus.SearchRequest.ExprTemplateValuesEntry\x1a]\n\x17\x45xprTemplateValuesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x31\n\x05value\x18\x02 \x01(\x0b\x32\".milvus.proto.schema.TemplateValue:\x02\x38\x01:\x07\xca>\x04\x10\x0e\x18\x03\"5\n\x04Hits\x12\x0b\n\x03IDs\x18\x01 \x03(\x03\x12\x10\n\x08row_data\x18\x02 \x03(\x0c\x12\x0e\n\x06scores\x18\x03 \x03(\x02\"\xa1\x01\n\rSearchResults\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x36\n\x07results\x18\x02 \x01(\x0b\x32%.milvus.proto.schema.SearchResultData\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x12\n\nsession_ts\x18\x04 \x01(\x04\"\xc9\x03\n\x13HybridSearchRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x17\n\x0fpartition_names\x18\x04 \x03(\t\x12\x34\n\x08requests\x18\x05 \x03(\x0b\x32\".milvus.proto.milvus.SearchRequest\x12\x36\n\x0brank_params\x18\x06 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\x18\n\x10travel_timestamp\x18\x07 \x01(\x04\x12\x1b\n\x13guarantee_timestamp\x18\x08 \x01(\x04\x12\x1b\n\x13not_return_all_meta\x18\t \x01(\x08\x12\x15\n\routput_fields\x18\n \x03(\t\x12@\n\x11\x63onsistency_level\x18\x0b \x01(\x0e\x32%.milvus.proto.common.ConsistencyLevel\x12\x1f\n\x17use_default_consistency\x18\x0c \x01(\x08:\x07\xca>\x04\x10\x0e\x18\x03\"n\n\x0c\x46lushRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x18\n\x10\x63ollection_names\x18\x03 \x03(\t:\x07\xca>\x04\x10\x0f \x03\"\xb6\x06\n\rFlushResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12G\n\x0b\x63oll_segIDs\x18\x03 \x03(\x0b\x32\x32.milvus.proto.milvus.FlushResponse.CollSegIDsEntry\x12R\n\x11\x66lush_coll_segIDs\x18\x04 \x03(\x0b\x32\x37.milvus.proto.milvus.FlushResponse.FlushCollSegIDsEntry\x12N\n\x0f\x63oll_seal_times\x18\x05 \x03(\x0b\x32\x35.milvus.proto.milvus.FlushResponse.CollSealTimesEntry\x12J\n\rcoll_flush_ts\x18\x06 \x03(\x0b\x32\x33.milvus.proto.milvus.FlushResponse.CollFlushTsEntry\x12G\n\x0b\x63hannel_cps\x18\x07 \x03(\x0b\x32\x32.milvus.proto.milvus.FlushResponse.ChannelCpsEntry\x1aQ\n\x0f\x43ollSegIDsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12-\n\x05value\x18\x02 \x01(\x0b\x32\x1e.milvus.proto.schema.LongArray:\x02\x38\x01\x1aV\n\x14\x46lushCollSegIDsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12-\n\x05value\x18\x02 \x01(\x0b\x32\x1e.milvus.proto.schema.LongArray:\x02\x38\x01\x1a\x34\n\x12\x43ollSealTimesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01\x1a\x32\n\x10\x43ollFlushTsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x04:\x02\x38\x01\x1aP\n\x0f\x43hannelCpsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12,\n\x05value\x18\x02 \x01(\x0b\x32\x1d.milvus.proto.msg.MsgPosition:\x02\x38\x01\"\xd3\x04\n\x0cQueryRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x0c\n\x04\x65xpr\x18\x04 \x01(\t\x12\x15\n\routput_fields\x18\x05 \x03(\t\x12\x17\n\x0fpartition_names\x18\x06 \x03(\t\x12\x18\n\x10travel_timestamp\x18\x07 \x01(\x04\x12\x1b\n\x13guarantee_timestamp\x18\x08 \x01(\x04\x12\x37\n\x0cquery_params\x18\t \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\x1b\n\x13not_return_all_meta\x18\n \x01(\x08\x12@\n\x11\x63onsistency_level\x18\x0b \x01(\x0e\x32%.milvus.proto.common.ConsistencyLevel\x12\x1f\n\x17use_default_consistency\x18\x0c \x01(\x08\x12W\n\x14\x65xpr_template_values\x18\r \x03(\x0b\x32\x39.milvus.proto.milvus.QueryRequest.ExprTemplateValuesEntry\x1a]\n\x17\x45xprTemplateValuesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x31\n\x05value\x18\x02 \x01(\x0b\x32\".milvus.proto.schema.TemplateValue:\x02\x38\x01:\x07\xca>\x04\x10\x10\x18\x03\"\xb4\x01\n\x0cQueryResults\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x33\n\x0b\x66ields_data\x18\x02 \x03(\x0b\x32\x1e.milvus.proto.schema.FieldData\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x15\n\routput_fields\x18\x04 \x03(\t\x12\x12\n\nsession_ts\x18\x05 \x01(\x04\"}\n\tVectorIDs\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x12\n\nfield_name\x18\x02 \x01(\t\x12*\n\x08id_array\x18\x03 \x01(\x0b\x32\x18.milvus.proto.schema.IDs\x12\x17\n\x0fpartition_names\x18\x04 \x03(\t\"\x83\x01\n\x0cVectorsArray\x12\x32\n\x08id_array\x18\x01 \x01(\x0b\x32\x1e.milvus.proto.milvus.VectorIDsH\x00\x12\x36\n\ndata_array\x18\x02 \x01(\x0b\x32 .milvus.proto.schema.VectorFieldH\x00\x42\x07\n\x05\x61rray\"\xdd\x01\n\x13\x43\x61lcDistanceRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x32\n\x07op_left\x18\x02 \x01(\x0b\x32!.milvus.proto.milvus.VectorsArray\x12\x33\n\x08op_right\x18\x03 \x01(\x0b\x32!.milvus.proto.milvus.VectorsArray\x12\x31\n\x06params\x18\x04 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\"\xb5\x01\n\x13\x43\x61lcDistanceResults\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x31\n\x08int_dist\x18\x02 \x01(\x0b\x32\x1d.milvus.proto.schema.IntArrayH\x00\x12\x35\n\nfloat_dist\x18\x03 \x01(\x0b\x32\x1f.milvus.proto.schema.FloatArrayH\x00\x42\x07\n\x05\x61rray\"b\n\x0f\x46lushAllRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t:\x12\xca>\x0f\x08\x01\x10&\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"U\n\x10\x46lushAllResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x14\n\x0c\x66lush_all_ts\x18\x02 \x01(\x04\"\xde\x01\n\x15PersistentSegmentInfo\x12\x11\n\tsegmentID\x18\x01 \x01(\x03\x12\x14\n\x0c\x63ollectionID\x18\x02 \x01(\x03\x12\x13\n\x0bpartitionID\x18\x03 \x01(\x03\x12\x10\n\x08num_rows\x18\x04 \x01(\x03\x12\x30\n\x05state\x18\x05 \x01(\x0e\x32!.milvus.proto.common.SegmentState\x12\x30\n\x05level\x18\x06 \x01(\x0e\x32!.milvus.proto.common.SegmentLevel\x12\x11\n\tis_sorted\x18\x07 \x01(\x08\"u\n\x1fGetPersistentSegmentInfoRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0e\n\x06\x64\x62Name\x18\x02 \x01(\t\x12\x16\n\x0e\x63ollectionName\x18\x03 \x01(\t\"\x8a\x01\n GetPersistentSegmentInfoResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x39\n\x05infos\x18\x02 \x03(\x0b\x32*.milvus.proto.milvus.PersistentSegmentInfo\"\xb5\x02\n\x10QuerySegmentInfo\x12\x11\n\tsegmentID\x18\x01 \x01(\x03\x12\x14\n\x0c\x63ollectionID\x18\x02 \x01(\x03\x12\x13\n\x0bpartitionID\x18\x03 \x01(\x03\x12\x10\n\x08mem_size\x18\x04 \x01(\x03\x12\x10\n\x08num_rows\x18\x05 \x01(\x03\x12\x12\n\nindex_name\x18\x06 \x01(\t\x12\x0f\n\x07indexID\x18\x07 \x01(\x03\x12\x12\n\x06nodeID\x18\x08 \x01(\x03\x42\x02\x18\x01\x12\x30\n\x05state\x18\t \x01(\x0e\x32!.milvus.proto.common.SegmentState\x12\x0f\n\x07nodeIds\x18\n \x03(\x03\x12\x30\n\x05level\x18\x0b \x01(\x0e\x32!.milvus.proto.common.SegmentLevel\x12\x11\n\tis_sorted\x18\x0c \x01(\x08\"p\n\x1aGetQuerySegmentInfoRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0e\n\x06\x64\x62Name\x18\x02 \x01(\t\x12\x16\n\x0e\x63ollectionName\x18\x03 \x01(\t\"\x80\x01\n\x1bGetQuerySegmentInfoResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x34\n\x05infos\x18\x02 \x03(\x0b\x32%.milvus.proto.milvus.QuerySegmentInfo\"$\n\x0c\x44ummyRequest\x12\x14\n\x0crequest_type\x18\x01 \x01(\t\"!\n\rDummyResponse\x12\x10\n\x08response\x18\x01 \x01(\t\"\x15\n\x13RegisterLinkRequest\"r\n\x14RegisterLinkResponse\x12-\n\x07\x61\x64\x64ress\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.Address\x12+\n\x06status\x18\x02 \x01(\x0b\x32\x1b.milvus.proto.common.Status\"P\n\x11GetMetricsRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07request\x18\x02 \x01(\t\"k\n\x12GetMetricsResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x10\n\x08response\x18\x02 \x01(\t\x12\x16\n\x0e\x63omponent_name\x18\x03 \x01(\t\"\x98\x01\n\rComponentInfo\x12\x0e\n\x06nodeID\x18\x01 \x01(\x03\x12\x0c\n\x04role\x18\x02 \x01(\t\x12\x32\n\nstate_code\x18\x03 \x01(\x0e\x32\x1e.milvus.proto.common.StateCode\x12\x35\n\nextra_info\x18\x04 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\"\xb2\x01\n\x0f\x43omponentStates\x12\x31\n\x05state\x18\x01 \x01(\x0b\x32\".milvus.proto.milvus.ComponentInfo\x12?\n\x13subcomponent_states\x18\x02 \x03(\x0b\x32\".milvus.proto.milvus.ComponentInfo\x12+\n\x06status\x18\x03 \x01(\x0b\x32\x1b.milvus.proto.common.Status\"\x1b\n\x19GetComponentStatesRequest\"\xb6\x01\n\x12LoadBalanceRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x12\n\nsrc_nodeID\x18\x02 \x01(\x03\x12\x13\n\x0b\x64st_nodeIDs\x18\x03 \x03(\x03\x12\x19\n\x11sealed_segmentIDs\x18\x04 \x03(\x03\x12\x16\n\x0e\x63ollectionName\x18\x05 \x01(\t\x12\x0f\n\x07\x64\x62_name\x18\x06 \x01(\t:\x07\xca>\x04\x10\x11\x18\x05\"e\n\x17ManualCompactionRequest\x12\x14\n\x0c\x63ollectionID\x18\x01 \x01(\x03\x12\x12\n\ntimetravel\x18\x02 \x01(\x04\x12\x17\n\x0fmajorCompaction\x18\x03 \x01(\x08:\x07\xca>\x04\x10\x07\x18\x01\"z\n\x18ManualCompactionResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x14\n\x0c\x63ompactionID\x18\x02 \x01(\x03\x12\x1b\n\x13\x63ompactionPlanCount\x18\x03 \x01(\x05\"1\n\x19GetCompactionStateRequest\x12\x14\n\x0c\x63ompactionID\x18\x01 \x01(\x03\"\xdd\x01\n\x1aGetCompactionStateResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x33\n\x05state\x18\x02 \x01(\x0e\x32$.milvus.proto.common.CompactionState\x12\x17\n\x0f\x65xecutingPlanNo\x18\x03 \x01(\x03\x12\x15\n\rtimeoutPlanNo\x18\x04 \x01(\x03\x12\x17\n\x0f\x63ompletedPlanNo\x18\x05 \x01(\x03\x12\x14\n\x0c\x66\x61iledPlanNo\x18\x06 \x01(\x03\"1\n\x19GetCompactionPlansRequest\x12\x14\n\x0c\x63ompactionID\x18\x01 \x01(\x03\"\xbc\x01\n\x1aGetCompactionPlansResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x33\n\x05state\x18\x02 \x01(\x0e\x32$.milvus.proto.common.CompactionState\x12<\n\nmergeInfos\x18\x03 \x03(\x0b\x32(.milvus.proto.milvus.CompactionMergeInfo\"6\n\x13\x43ompactionMergeInfo\x12\x0f\n\x07sources\x18\x01 \x03(\x03\x12\x0e\n\x06target\x18\x02 \x01(\x03\"o\n\x14GetFlushStateRequest\x12\x12\n\nsegmentIDs\x18\x01 \x03(\x03\x12\x10\n\x08\x66lush_ts\x18\x02 \x01(\x04\x12\x0f\n\x07\x64\x62_name\x18\x03 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x04 \x01(\t:\x07\xca>\x04\x10+\x18\x04\"U\n\x15GetFlushStateResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x0f\n\x07\x66lushed\x18\x02 \x01(\x08\"l\n\x17GetFlushAllStateRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x14\n\x0c\x66lush_all_ts\x18\x02 \x01(\x04\x12\x0f\n\x07\x64\x62_name\x18\x03 \x01(\t\"X\n\x18GetFlushAllStateResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x0f\n\x07\x66lushed\x18\x02 \x01(\x08\"\xe0\x01\n\rImportRequest\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x16\n\x0epartition_name\x18\x02 \x01(\t\x12\x15\n\rchannel_names\x18\x03 \x03(\t\x12\x11\n\trow_based\x18\x04 \x01(\x08\x12\r\n\x05\x66iles\x18\x05 \x03(\t\x12\x32\n\x07options\x18\x06 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\x0f\n\x07\x64\x62_name\x18\x07 \x01(\t\x12\x17\n\x0f\x63lustering_info\x18\x08 \x01(\x0c:\x07\xca>\x04\x10\x12\x18\x01\"L\n\x0eImportResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\r\n\x05tasks\x18\x02 \x03(\x03\"%\n\x15GetImportStateRequest\x12\x0c\n\x04task\x18\x01 \x01(\x03\"\x97\x02\n\x16GetImportStateResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12/\n\x05state\x18\x02 \x01(\x0e\x32 .milvus.proto.common.ImportState\x12\x11\n\trow_count\x18\x03 \x01(\x03\x12\x0f\n\x07id_list\x18\x04 \x03(\x03\x12\x30\n\x05infos\x18\x05 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\n\n\x02id\x18\x06 \x01(\x03\x12\x15\n\rcollection_id\x18\x07 \x01(\x03\x12\x13\n\x0bsegment_ids\x18\x08 \x03(\x03\x12\x11\n\tcreate_ts\x18\t \x01(\x03\"Q\n\x16ListImportTasksRequest\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\r\n\x05limit\x18\x02 \x01(\x03\x12\x0f\n\x07\x64\x62_name\x18\x03 \x01(\t\"\x82\x01\n\x17ListImportTasksResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12:\n\x05tasks\x18\x02 \x03(\x0b\x32+.milvus.proto.milvus.GetImportStateResponse\"\x9a\x01\n\x12GetReplicasRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x14\n\x0c\x63ollectionID\x18\x02 \x01(\x03\x12\x18\n\x10with_shard_nodes\x18\x03 \x01(\x08\x12\x17\n\x0f\x63ollection_name\x18\x04 \x01(\t\x12\x0f\n\x07\x64\x62_name\x18\x05 \x01(\t\"v\n\x13GetReplicasResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x32\n\x08replicas\x18\x02 \x03(\x0b\x32 .milvus.proto.milvus.ReplicaInfo\"\xc1\x02\n\x0bReplicaInfo\x12\x11\n\treplicaID\x18\x01 \x01(\x03\x12\x14\n\x0c\x63ollectionID\x18\x02 \x01(\x03\x12\x15\n\rpartition_ids\x18\x03 \x03(\x03\x12\x39\n\x0eshard_replicas\x18\x04 \x03(\x0b\x32!.milvus.proto.milvus.ShardReplica\x12\x10\n\x08node_ids\x18\x05 \x03(\x03\x12\x1b\n\x13resource_group_name\x18\x06 \x01(\t\x12P\n\x11num_outbound_node\x18\x07 \x03(\x0b\x32\x35.milvus.proto.milvus.ReplicaInfo.NumOutboundNodeEntry\x1a\x36\n\x14NumOutboundNodeEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"`\n\x0cShardReplica\x12\x10\n\x08leaderID\x18\x01 \x01(\x03\x12\x13\n\x0bleader_addr\x18\x02 \x01(\t\x12\x17\n\x0f\x64m_channel_name\x18\x03 \x01(\t\x12\x10\n\x08node_ids\x18\x04 \x03(\x03\"\xbe\x01\n\x17\x43reateCredentialRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x10\n\x08username\x18\x02 \x01(\t\x12\x10\n\x08password\x18\x03 \x01(\t\x12\x1e\n\x16\x63reated_utc_timestamps\x18\x04 \x01(\x04\x12\x1f\n\x17modified_utc_timestamps\x18\x05 \x01(\x04:\x12\xca>\x0f\x08\x01\x10\x13\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\xcd\x01\n\x17UpdateCredentialRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x10\n\x08username\x18\x02 \x01(\t\x12\x13\n\x0boldPassword\x18\x03 \x01(\t\x12\x13\n\x0bnewPassword\x18\x04 \x01(\t\x12\x1e\n\x16\x63reated_utc_timestamps\x18\x05 \x01(\x04\x12\x1f\n\x17modified_utc_timestamps\x18\x06 \x01(\x04:\t\xca>\x06\x08\x02\x10\x14\x18\x02\"k\n\x17\x44\x65leteCredentialRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x10\n\x08username\x18\x02 \x01(\t:\x12\xca>\x0f\x08\x01\x10\x15\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"W\n\x15ListCredUsersResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x11\n\tusernames\x18\x02 \x03(\t\"V\n\x14ListCredUsersRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase:\x12\xca>\x0f\x08\x01\x10\x16\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\x1a\n\nRoleEntity\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1a\n\nUserEntity\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x84\x01\n\x11\x43reateRoleRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12/\n\x06\x65ntity\x18\x02 \x01(\x0b\x32\x1f.milvus.proto.milvus.RoleEntity:\x12\xca>\x0f\x08\x01\x10\x13\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"x\n\x0f\x44ropRoleRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x11\n\trole_name\x18\x02 \x01(\t\x12\x12\n\nforce_drop\x18\x03 \x01(\x08:\x12\xca>\x0f\x08\x01\x10\x15\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"q\n\x1b\x43reatePrivilegeGroupRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x12\n\ngroup_name\x18\x02 \x01(\t:\x12\xca>\x0f\x08\x01\x10\x38\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"o\n\x19\x44ropPrivilegeGroupRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x12\n\ngroup_name\x18\x02 \x01(\t:\x12\xca>\x0f\x08\x01\x10\x39\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\\\n\x1aListPrivilegeGroupsRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase:\x12\xca>\x0f\x08\x01\x10:\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\x8d\x01\n\x1bListPrivilegeGroupsResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x41\n\x10privilege_groups\x18\x02 \x03(\x0b\x32\'.milvus.proto.milvus.PrivilegeGroupInfo\"\xea\x01\n\x1cOperatePrivilegeGroupRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x12\n\ngroup_name\x18\x02 \x01(\t\x12\x38\n\nprivileges\x18\x03 \x03(\x0b\x32$.milvus.proto.milvus.PrivilegeEntity\x12<\n\x04type\x18\x04 \x01(\x0e\x32..milvus.proto.milvus.OperatePrivilegeGroupType:\x12\xca>\x0f\x08\x01\x10;\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\xb5\x01\n\x16OperateUserRoleRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x10\n\x08username\x18\x02 \x01(\t\x12\x11\n\trole_name\x18\x03 \x01(\t\x12\x36\n\x04type\x18\x04 \x01(\x0e\x32(.milvus.proto.milvus.OperateUserRoleType:\x12\xca>\x0f\x08\x01\x10\x17\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"b\n\x12PrivilegeGroupInfo\x12\x12\n\ngroup_name\x18\x01 \x01(\t\x12\x38\n\nprivileges\x18\x02 \x03(\x0b\x32$.milvus.proto.milvus.PrivilegeEntity\"\x9d\x01\n\x11SelectRoleRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12-\n\x04role\x18\x02 \x01(\x0b\x32\x1f.milvus.proto.milvus.RoleEntity\x12\x19\n\x11include_user_info\x18\x03 \x01(\x08:\x12\xca>\x0f\x08\x01\x10\x16\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"k\n\nRoleResult\x12-\n\x04role\x18\x01 \x01(\x0b\x32\x1f.milvus.proto.milvus.RoleEntity\x12.\n\x05users\x18\x02 \x03(\x0b\x32\x1f.milvus.proto.milvus.UserEntity\"s\n\x12SelectRoleResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x30\n\x07results\x18\x02 \x03(\x0b\x32\x1f.milvus.proto.milvus.RoleResult\"\x94\x01\n\x11SelectUserRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12-\n\x04user\x18\x02 \x01(\x0b\x32\x1f.milvus.proto.milvus.UserEntity\x12\x19\n\x11include_role_info\x18\x03 \x01(\x08:\t\xca>\x06\x08\x02\x10\x18\x18\x02\"k\n\nUserResult\x12-\n\x04user\x18\x01 \x01(\x0b\x32\x1f.milvus.proto.milvus.UserEntity\x12.\n\x05roles\x18\x02 \x03(\x0b\x32\x1f.milvus.proto.milvus.RoleEntity\"s\n\x12SelectUserResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x30\n\x07results\x18\x02 \x03(\x0b\x32\x1f.milvus.proto.milvus.UserResult\"\x1c\n\x0cObjectEntity\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1f\n\x0fPrivilegeEntity\x12\x0c\n\x04name\x18\x01 \x01(\t\"w\n\rGrantorEntity\x12-\n\x04user\x18\x01 \x01(\x0b\x32\x1f.milvus.proto.milvus.UserEntity\x12\x37\n\tprivilege\x18\x02 \x01(\x0b\x32$.milvus.proto.milvus.PrivilegeEntity\"L\n\x14GrantPrivilegeEntity\x12\x34\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\".milvus.proto.milvus.GrantorEntity\"\xca\x01\n\x0bGrantEntity\x12-\n\x04role\x18\x01 \x01(\x0b\x32\x1f.milvus.proto.milvus.RoleEntity\x12\x31\n\x06object\x18\x02 \x01(\x0b\x32!.milvus.proto.milvus.ObjectEntity\x12\x13\n\x0bobject_name\x18\x03 \x01(\t\x12\x33\n\x07grantor\x18\x04 \x01(\x0b\x32\".milvus.proto.milvus.GrantorEntity\x12\x0f\n\x07\x64\x62_name\x18\x05 \x01(\t\"\x86\x01\n\x12SelectGrantRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x30\n\x06\x65ntity\x18\x02 \x01(\x0b\x32 .milvus.proto.milvus.GrantEntity:\x12\xca>\x0f\x08\x01\x10\x16\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"v\n\x13SelectGrantResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x32\n\x08\x65ntities\x18\x02 \x03(\x0b\x32 .milvus.proto.milvus.GrantEntity\"\xc4\x01\n\x17OperatePrivilegeRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x30\n\x06\x65ntity\x18\x02 \x01(\x0b\x32 .milvus.proto.milvus.GrantEntity\x12\x37\n\x04type\x18\x03 \x01(\x0e\x32).milvus.proto.milvus.OperatePrivilegeType:\x12\xca>\x0f\x08\x01\x10\x17\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"Z\n\x08UserInfo\x12\x0c\n\x04user\x18\x01 \x01(\t\x12\x10\n\x08password\x18\x02 \x01(\t\x12.\n\x05roles\x18\x03 \x03(\x0b\x32\x1f.milvus.proto.milvus.RoleEntity\"\xdd\x01\n\x08RBACMeta\x12,\n\x05users\x18\x01 \x03(\x0b\x32\x1d.milvus.proto.milvus.UserInfo\x12.\n\x05roles\x18\x02 \x03(\x0b\x32\x1f.milvus.proto.milvus.RoleEntity\x12\x30\n\x06grants\x18\x03 \x03(\x0b\x32 .milvus.proto.milvus.GrantEntity\x12\x41\n\x10privilege_groups\x18\x04 \x03(\x0b\x32\'.milvus.proto.milvus.PrivilegeGroupInfo\"W\n\x15\x42\x61\x63kupRBACMetaRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase:\x12\xca>\x0f\x08\x01\x10\x33\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"w\n\x16\x42\x61\x63kupRBACMetaResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x30\n\tRBAC_meta\x18\x02 \x01(\x0b\x32\x1d.milvus.proto.milvus.RBACMeta\"\x8a\x01\n\x16RestoreRBACMetaRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x30\n\tRBAC_meta\x18\x02 \x01(\x0b\x32\x1d.milvus.proto.milvus.RBACMeta:\x12\xca>\x0f\x08\x01\x10\x34\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\x93\x01\n\x19GetLoadingProgressRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x17\n\x0f\x63ollection_name\x18\x02 \x01(\t\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\x12\x0f\n\x07\x64\x62_name\x18\x04 \x01(\t:\x07\xca>\x04\x10!\x18\x02\"u\n\x1aGetLoadingProgressResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x10\n\x08progress\x18\x02 \x01(\x03\x12\x18\n\x10refresh_progress\x18\x03 \x01(\x03\"\x8d\x01\n\x13GetLoadStateRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x17\n\x0f\x63ollection_name\x18\x02 \x01(\t\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\x12\x0f\n\x07\x64\x62_name\x18\x04 \x01(\t:\x07\xca>\x04\x10!\x18\x02\"r\n\x14GetLoadStateResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12-\n\x05state\x18\x02 \x01(\x0e\x32\x1e.milvus.proto.common.LoadState\"\x1c\n\tMilvusExt\x12\x0f\n\x07version\x18\x01 \x01(\t\"\x13\n\x11GetVersionRequest\"R\n\x12GetVersionResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x0f\n\x07version\x18\x02 \x01(\t\"\x14\n\x12\x43heckHealthRequest\"\x9d\x01\n\x13\x43heckHealthResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x11\n\tisHealthy\x18\x02 \x01(\x08\x12\x0f\n\x07reasons\x18\x03 \x03(\t\x12\x35\n\x0cquota_states\x18\x04 \x03(\x0e\x32\x1f.milvus.proto.milvus.QuotaState\"\xaa\x01\n\x1a\x43reateResourceGroupRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x16\n\x0eresource_group\x18\x02 \x01(\t\x12\x34\n\x06\x63onfig\x18\x03 \x01(\x0b\x32$.milvus.proto.rg.ResourceGroupConfig:\x12\xca>\x0f\x08\x01\x10\x1a\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\x99\x02\n\x1bUpdateResourceGroupsRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12]\n\x0fresource_groups\x18\x02 \x03(\x0b\x32\x44.milvus.proto.milvus.UpdateResourceGroupsRequest.ResourceGroupsEntry\x1a[\n\x13ResourceGroupsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x33\n\x05value\x18\x02 \x01(\x0b\x32$.milvus.proto.rg.ResourceGroupConfig:\x02\x38\x01:\x12\xca>\x0f\x08\x01\x10\x30\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"r\n\x18\x44ropResourceGroupRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x16\n\x0eresource_group\x18\x02 \x01(\t:\x12\xca>\x0f\x08\x01\x10\x1b\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\xa5\x01\n\x13TransferNodeRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x1d\n\x15source_resource_group\x18\x02 \x01(\t\x12\x1d\n\x15target_resource_group\x18\x03 \x01(\t\x12\x10\n\x08num_node\x18\x04 \x01(\x05:\x12\xca>\x0f\x08\x01\x10\x1e\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\xd5\x01\n\x16TransferReplicaRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x1d\n\x15source_resource_group\x18\x02 \x01(\t\x12\x1d\n\x15target_resource_group\x18\x03 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x04 \x01(\t\x12\x13\n\x0bnum_replica\x18\x05 \x01(\x03\x12\x0f\n\x07\x64\x62_name\x18\x06 \x01(\t:\x12\xca>\x0f\x08\x01\x10\x1f\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"[\n\x19ListResourceGroupsRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase:\x12\xca>\x0f\x08\x01\x10\x1d\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"b\n\x1aListResourceGroupsResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x17\n\x0fresource_groups\x18\x02 \x03(\t\"v\n\x1c\x44\x65scribeResourceGroupRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x16\n\x0eresource_group\x18\x02 \x01(\t:\x12\xca>\x0f\x08\x01\x10\x1c\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\x88\x01\n\x1d\x44\x65scribeResourceGroupResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12:\n\x0eresource_group\x18\x02 \x01(\x0b\x32\".milvus.proto.milvus.ResourceGroup\"\xd6\x04\n\rResourceGroup\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08\x63\x61pacity\x18\x02 \x01(\x05\x12\x1a\n\x12num_available_node\x18\x03 \x01(\x05\x12T\n\x12num_loaded_replica\x18\x04 \x03(\x0b\x32\x38.milvus.proto.milvus.ResourceGroup.NumLoadedReplicaEntry\x12R\n\x11num_outgoing_node\x18\x05 \x03(\x0b\x32\x37.milvus.proto.milvus.ResourceGroup.NumOutgoingNodeEntry\x12R\n\x11num_incoming_node\x18\x06 \x03(\x0b\x32\x37.milvus.proto.milvus.ResourceGroup.NumIncomingNodeEntry\x12\x34\n\x06\x63onfig\x18\x07 \x01(\x0b\x32$.milvus.proto.rg.ResourceGroupConfig\x12,\n\x05nodes\x18\x08 \x03(\x0b\x32\x1d.milvus.proto.common.NodeInfo\x1a\x37\n\x15NumLoadedReplicaEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x36\n\x14NumOutgoingNodeEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x36\n\x14NumIncomingNodeEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"\x9f\x01\n\x17RenameCollectionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x0f\n\x07oldName\x18\x03 \x01(\t\x12\x0f\n\x07newName\x18\x04 \x01(\t\x12\x11\n\tnewDBName\x18\x05 \x01(\t:\x12\xca>\x0f\x08\x01\x10\"\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\xa1\x01\n\x19GetIndexStatisticsRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x12\n\nindex_name\x18\x04 \x01(\t\x12\x11\n\ttimestamp\x18\x05 \x01(\x04:\x07\xca>\x04\x10\x0c\x18\x03\"\x8c\x01\n\x1aGetIndexStatisticsResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x41\n\x12index_descriptions\x18\x02 \x03(\x0b\x32%.milvus.proto.milvus.IndexDescription\"r\n\x0e\x43onnectRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x34\n\x0b\x63lient_info\x18\x02 \x01(\x0b\x32\x1f.milvus.proto.common.ClientInfo\"\x88\x01\n\x0f\x43onnectResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x34\n\x0bserver_info\x18\x02 \x01(\x0b\x32\x1f.milvus.proto.common.ServerInfo\x12\x12\n\nidentifier\x18\x03 \x01(\x03\"C\n\x15\x41llocTimestampRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\"X\n\x16\x41llocTimestampResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x11\n\ttimestamp\x18\x02 \x01(\x04\"\x9f\x01\n\x15\x43reateDatabaseRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x35\n\nproperties\x18\x03 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair:\x12\xca>\x0f\x08\x01\x10#\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"f\n\x13\x44ropDatabaseRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t:\x12\xca>\x0f\x08\x01\x10$\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"B\n\x14ListDatabasesRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\"\x81\x01\n\x15ListDatabasesResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x10\n\x08\x64\x62_names\x18\x02 \x03(\t\x12\x19\n\x11\x63reated_timestamp\x18\x03 \x03(\x04\x12\x0e\n\x06\x64\x62_ids\x18\x04 \x03(\x03\"\xad\x01\n\x14\x41lterDatabaseRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\r\n\x05\x64\x62_id\x18\x03 \x01(\t\x12\x35\n\nproperties\x18\x04 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair:\x12\xca>\x0f\x08\x01\x10\x31\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"j\n\x17\x44\x65scribeDatabaseRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t:\x12\xca>\x0f\x08\x01\x10\x32\x18\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\xb8\x01\n\x18\x44\x65scribeDatabaseResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x0c\n\x04\x64\x62ID\x18\x03 \x01(\x03\x12\x19\n\x11\x63reated_timestamp\x18\x04 \x01(\x04\x12\x35\n\nproperties\x18\x05 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\"\xf5\x01\n\x17ReplicateMessageRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x14\n\x0c\x63hannel_name\x18\x02 \x01(\t\x12\x0f\n\x07\x42\x65ginTs\x18\x03 \x01(\x04\x12\r\n\x05\x45ndTs\x18\x04 \x01(\x04\x12\x0c\n\x04Msgs\x18\x05 \x03(\x0c\x12\x35\n\x0eStartPositions\x18\x06 \x03(\x0b\x32\x1d.milvus.proto.msg.MsgPosition\x12\x33\n\x0c\x45ndPositions\x18\x07 \x03(\x0b\x32\x1d.milvus.proto.msg.MsgPosition\"Y\n\x18ReplicateMessageResponse\x12+\n\x06status\x18\x01 \x01(\x0b\x32\x1b.milvus.proto.common.Status\x12\x10\n\x08position\x18\x02 \x01(\t\"b\n\x15ImportAuthPlaceholder\x12\x0f\n\x07\x64\x62_name\x18\x01 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x02 \x01(\t\x12\x16\n\x0epartition_name\x18\x03 \x01(\t:\x07\xca>\x04\x10\x12\x18\x01\"<\n GetImportProgressAuthPlaceholder\x12\x0f\n\x07\x64\x62_name\x18\x01 \x01(\t:\x07\xca>\x04\x10\x12\x18\x01\"O\n\x1aListImportsAuthPlaceholder\x12\x0f\n\x07\x64\x62_name\x18\x03 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t:\x07\xca>\x04\x10\x12\x18\x01*%\n\x08ShowType\x12\x07\n\x03\x41ll\x10\x00\x12\x0c\n\x08InMemory\x10\x01\x1a\x02\x18\x01*T\n\x19OperatePrivilegeGroupType\x12\x18\n\x14\x41\x64\x64PrivilegesToGroup\x10\x00\x12\x1d\n\x19RemovePrivilegesFromGroup\x10\x01*@\n\x13OperateUserRoleType\x12\x11\n\rAddUserToRole\x10\x00\x12\x16\n\x12RemoveUserFromRole\x10\x01*-\n\x14OperatePrivilegeType\x12\t\n\x05Grant\x10\x00\x12\n\n\x06Revoke\x10\x01*]\n\nQuotaState\x12\x0b\n\x07Unknown\x10\x00\x12\x0f\n\x0bReadLimited\x10\x02\x12\x10\n\x0cWriteLimited\x10\x03\x12\x0e\n\nDenyToRead\x10\x04\x12\x0f\n\x0b\x44\x65nyToWrite\x10\x05\x32\x9cJ\n\rMilvusService\x12_\n\x10\x43reateCollection\x12,.milvus.proto.milvus.CreateCollectionRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12[\n\x0e\x44ropCollection\x12*.milvus.proto.milvus.DropCollectionRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12_\n\rHasCollection\x12).milvus.proto.milvus.HasCollectionRequest\x1a!.milvus.proto.milvus.BoolResponse\"\x00\x12[\n\x0eLoadCollection\x12*.milvus.proto.milvus.LoadCollectionRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12\x61\n\x11ReleaseCollection\x12-.milvus.proto.milvus.ReleaseCollectionRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12w\n\x12\x44\x65scribeCollection\x12..milvus.proto.milvus.DescribeCollectionRequest\x1a/.milvus.proto.milvus.DescribeCollectionResponse\"\x00\x12\x86\x01\n\x17GetCollectionStatistics\x12\x33.milvus.proto.milvus.GetCollectionStatisticsRequest\x1a\x34.milvus.proto.milvus.GetCollectionStatisticsResponse\"\x00\x12n\n\x0fShowCollections\x12+.milvus.proto.milvus.ShowCollectionsRequest\x1a,.milvus.proto.milvus.ShowCollectionsResponse\"\x00\x12]\n\x0f\x41lterCollection\x12+.milvus.proto.milvus.AlterCollectionRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12g\n\x14\x41lterCollectionField\x12\x30.milvus.proto.milvus.AlterCollectionFieldRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12]\n\x0f\x43reatePartition\x12+.milvus.proto.milvus.CreatePartitionRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12Y\n\rDropPartition\x12).milvus.proto.milvus.DropPartitionRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12]\n\x0cHasPartition\x12(.milvus.proto.milvus.HasPartitionRequest\x1a!.milvus.proto.milvus.BoolResponse\"\x00\x12[\n\x0eLoadPartitions\x12*.milvus.proto.milvus.LoadPartitionsRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12\x61\n\x11ReleasePartitions\x12-.milvus.proto.milvus.ReleasePartitionsRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12\x83\x01\n\x16GetPartitionStatistics\x12\x32.milvus.proto.milvus.GetPartitionStatisticsRequest\x1a\x33.milvus.proto.milvus.GetPartitionStatisticsResponse\"\x00\x12k\n\x0eShowPartitions\x12*.milvus.proto.milvus.ShowPartitionsRequest\x1a+.milvus.proto.milvus.ShowPartitionsResponse\"\x00\x12w\n\x12GetLoadingProgress\x12..milvus.proto.milvus.GetLoadingProgressRequest\x1a/.milvus.proto.milvus.GetLoadingProgressResponse\"\x00\x12\x65\n\x0cGetLoadState\x12(.milvus.proto.milvus.GetLoadStateRequest\x1a).milvus.proto.milvus.GetLoadStateResponse\"\x00\x12U\n\x0b\x43reateAlias\x12\'.milvus.proto.milvus.CreateAliasRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12Q\n\tDropAlias\x12%.milvus.proto.milvus.DropAliasRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12S\n\nAlterAlias\x12&.milvus.proto.milvus.AlterAliasRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12h\n\rDescribeAlias\x12).milvus.proto.milvus.DescribeAliasRequest\x1a*.milvus.proto.milvus.DescribeAliasResponse\"\x00\x12\x62\n\x0bListAliases\x12\'.milvus.proto.milvus.ListAliasesRequest\x1a(.milvus.proto.milvus.ListAliasesResponse\"\x00\x12U\n\x0b\x43reateIndex\x12\'.milvus.proto.milvus.CreateIndexRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12S\n\nAlterIndex\x12&.milvus.proto.milvus.AlterIndexRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12h\n\rDescribeIndex\x12).milvus.proto.milvus.DescribeIndexRequest\x1a*.milvus.proto.milvus.DescribeIndexResponse\"\x00\x12w\n\x12GetIndexStatistics\x12..milvus.proto.milvus.GetIndexStatisticsRequest\x1a/.milvus.proto.milvus.GetIndexStatisticsResponse\"\x00\x12k\n\rGetIndexState\x12).milvus.proto.milvus.GetIndexStateRequest\x1a*.milvus.proto.milvus.GetIndexStateResponse\"\x03\x88\x02\x01\x12\x83\x01\n\x15GetIndexBuildProgress\x12\x31.milvus.proto.milvus.GetIndexBuildProgressRequest\x1a\x32.milvus.proto.milvus.GetIndexBuildProgressResponse\"\x03\x88\x02\x01\x12Q\n\tDropIndex\x12%.milvus.proto.milvus.DropIndexRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12S\n\x06Insert\x12\".milvus.proto.milvus.InsertRequest\x1a#.milvus.proto.milvus.MutationResult\"\x00\x12S\n\x06\x44\x65lete\x12\".milvus.proto.milvus.DeleteRequest\x1a#.milvus.proto.milvus.MutationResult\"\x00\x12S\n\x06Upsert\x12\".milvus.proto.milvus.UpsertRequest\x1a#.milvus.proto.milvus.MutationResult\"\x00\x12R\n\x06Search\x12\".milvus.proto.milvus.SearchRequest\x1a\".milvus.proto.milvus.SearchResults\"\x00\x12^\n\x0cHybridSearch\x12(.milvus.proto.milvus.HybridSearchRequest\x1a\".milvus.proto.milvus.SearchResults\"\x00\x12P\n\x05\x46lush\x12!.milvus.proto.milvus.FlushRequest\x1a\".milvus.proto.milvus.FlushResponse\"\x00\x12O\n\x05Query\x12!.milvus.proto.milvus.QueryRequest\x1a!.milvus.proto.milvus.QueryResults\"\x00\x12\x64\n\x0c\x43\x61lcDistance\x12(.milvus.proto.milvus.CalcDistanceRequest\x1a(.milvus.proto.milvus.CalcDistanceResults\"\x00\x12Y\n\x08\x46lushAll\x12$.milvus.proto.milvus.FlushAllRequest\x1a%.milvus.proto.milvus.FlushAllResponse\"\x00\x12h\n\rGetFlushState\x12).milvus.proto.milvus.GetFlushStateRequest\x1a*.milvus.proto.milvus.GetFlushStateResponse\"\x00\x12q\n\x10GetFlushAllState\x12,.milvus.proto.milvus.GetFlushAllStateRequest\x1a-.milvus.proto.milvus.GetFlushAllStateResponse\"\x00\x12\x89\x01\n\x18GetPersistentSegmentInfo\x12\x34.milvus.proto.milvus.GetPersistentSegmentInfoRequest\x1a\x35.milvus.proto.milvus.GetPersistentSegmentInfoResponse\"\x00\x12z\n\x13GetQuerySegmentInfo\x12/.milvus.proto.milvus.GetQuerySegmentInfoRequest\x1a\x30.milvus.proto.milvus.GetQuerySegmentInfoResponse\"\x00\x12\x62\n\x0bGetReplicas\x12\'.milvus.proto.milvus.GetReplicasRequest\x1a(.milvus.proto.milvus.GetReplicasResponse\"\x00\x12P\n\x05\x44ummy\x12!.milvus.proto.milvus.DummyRequest\x1a\".milvus.proto.milvus.DummyResponse\"\x00\x12\x65\n\x0cRegisterLink\x12(.milvus.proto.milvus.RegisterLinkRequest\x1a).milvus.proto.milvus.RegisterLinkResponse\"\x00\x12_\n\nGetMetrics\x12&.milvus.proto.milvus.GetMetricsRequest\x1a\'.milvus.proto.milvus.GetMetricsResponse\"\x00\x12l\n\x12GetComponentStates\x12..milvus.proto.milvus.GetComponentStatesRequest\x1a$.milvus.proto.milvus.ComponentStates\"\x00\x12U\n\x0bLoadBalance\x12\'.milvus.proto.milvus.LoadBalanceRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12w\n\x12GetCompactionState\x12..milvus.proto.milvus.GetCompactionStateRequest\x1a/.milvus.proto.milvus.GetCompactionStateResponse\"\x00\x12q\n\x10ManualCompaction\x12,.milvus.proto.milvus.ManualCompactionRequest\x1a-.milvus.proto.milvus.ManualCompactionResponse\"\x00\x12\x80\x01\n\x1bGetCompactionStateWithPlans\x12..milvus.proto.milvus.GetCompactionPlansRequest\x1a/.milvus.proto.milvus.GetCompactionPlansResponse\"\x00\x12S\n\x06Import\x12\".milvus.proto.milvus.ImportRequest\x1a#.milvus.proto.milvus.ImportResponse\"\x00\x12k\n\x0eGetImportState\x12*.milvus.proto.milvus.GetImportStateRequest\x1a+.milvus.proto.milvus.GetImportStateResponse\"\x00\x12n\n\x0fListImportTasks\x12+.milvus.proto.milvus.ListImportTasksRequest\x1a,.milvus.proto.milvus.ListImportTasksResponse\"\x00\x12_\n\x10\x43reateCredential\x12,.milvus.proto.milvus.CreateCredentialRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12_\n\x10UpdateCredential\x12,.milvus.proto.milvus.UpdateCredentialRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12_\n\x10\x44\x65leteCredential\x12,.milvus.proto.milvus.DeleteCredentialRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12h\n\rListCredUsers\x12).milvus.proto.milvus.ListCredUsersRequest\x1a*.milvus.proto.milvus.ListCredUsersResponse\"\x00\x12S\n\nCreateRole\x12&.milvus.proto.milvus.CreateRoleRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12O\n\x08\x44ropRole\x12$.milvus.proto.milvus.DropRoleRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12]\n\x0fOperateUserRole\x12+.milvus.proto.milvus.OperateUserRoleRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12_\n\nSelectRole\x12&.milvus.proto.milvus.SelectRoleRequest\x1a\'.milvus.proto.milvus.SelectRoleResponse\"\x00\x12_\n\nSelectUser\x12&.milvus.proto.milvus.SelectUserRequest\x1a\'.milvus.proto.milvus.SelectUserResponse\"\x00\x12_\n\x10OperatePrivilege\x12,.milvus.proto.milvus.OperatePrivilegeRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12\x62\n\x0bSelectGrant\x12\'.milvus.proto.milvus.SelectGrantRequest\x1a(.milvus.proto.milvus.SelectGrantResponse\"\x00\x12_\n\nGetVersion\x12&.milvus.proto.milvus.GetVersionRequest\x1a\'.milvus.proto.milvus.GetVersionResponse\"\x00\x12\x62\n\x0b\x43heckHealth\x12\'.milvus.proto.milvus.CheckHealthRequest\x1a(.milvus.proto.milvus.CheckHealthResponse\"\x00\x12\x65\n\x13\x43reateResourceGroup\x12/.milvus.proto.milvus.CreateResourceGroupRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12\x61\n\x11\x44ropResourceGroup\x12-.milvus.proto.milvus.DropResourceGroupRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12g\n\x14UpdateResourceGroups\x12\x30.milvus.proto.milvus.UpdateResourceGroupsRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12W\n\x0cTransferNode\x12(.milvus.proto.milvus.TransferNodeRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12]\n\x0fTransferReplica\x12+.milvus.proto.milvus.TransferReplicaRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12w\n\x12ListResourceGroups\x12..milvus.proto.milvus.ListResourceGroupsRequest\x1a/.milvus.proto.milvus.ListResourceGroupsResponse\"\x00\x12\x80\x01\n\x15\x44\x65scribeResourceGroup\x12\x31.milvus.proto.milvus.DescribeResourceGroupRequest\x1a\x32.milvus.proto.milvus.DescribeResourceGroupResponse\"\x00\x12_\n\x10RenameCollection\x12,.milvus.proto.milvus.RenameCollectionRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12u\n\x12ListIndexedSegment\x12-.milvus.proto.feder.ListIndexedSegmentRequest\x1a..milvus.proto.feder.ListIndexedSegmentResponse\"\x00\x12\x87\x01\n\x18\x44\x65scribeSegmentIndexData\x12\x33.milvus.proto.feder.DescribeSegmentIndexDataRequest\x1a\x34.milvus.proto.feder.DescribeSegmentIndexDataResponse\"\x00\x12V\n\x07\x43onnect\x12#.milvus.proto.milvus.ConnectRequest\x1a$.milvus.proto.milvus.ConnectResponse\"\x00\x12k\n\x0e\x41llocTimestamp\x12*.milvus.proto.milvus.AllocTimestampRequest\x1a+.milvus.proto.milvus.AllocTimestampResponse\"\x00\x12[\n\x0e\x43reateDatabase\x12*.milvus.proto.milvus.CreateDatabaseRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12W\n\x0c\x44ropDatabase\x12(.milvus.proto.milvus.DropDatabaseRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12h\n\rListDatabases\x12).milvus.proto.milvus.ListDatabasesRequest\x1a*.milvus.proto.milvus.ListDatabasesResponse\"\x00\x12Y\n\rAlterDatabase\x12).milvus.proto.milvus.AlterDatabaseRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12q\n\x10\x44\x65scribeDatabase\x12,.milvus.proto.milvus.DescribeDatabaseRequest\x1a-.milvus.proto.milvus.DescribeDatabaseResponse\"\x00\x12q\n\x10ReplicateMessage\x12,.milvus.proto.milvus.ReplicateMessageRequest\x1a-.milvus.proto.milvus.ReplicateMessageResponse\"\x00\x12g\n\nBackupRBAC\x12*.milvus.proto.milvus.BackupRBACMetaRequest\x1a+.milvus.proto.milvus.BackupRBACMetaResponse\"\x00\x12Y\n\x0bRestoreRBAC\x12+.milvus.proto.milvus.RestoreRBACMetaRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12g\n\x14\x43reatePrivilegeGroup\x12\x30.milvus.proto.milvus.CreatePrivilegeGroupRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12\x63\n\x12\x44ropPrivilegeGroup\x12..milvus.proto.milvus.DropPrivilegeGroupRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x12z\n\x13ListPrivilegeGroups\x12/.milvus.proto.milvus.ListPrivilegeGroupsRequest\x1a\x30.milvus.proto.milvus.ListPrivilegeGroupsResponse\"\x00\x12i\n\x15OperatePrivilegeGroup\x12\x31.milvus.proto.milvus.OperatePrivilegeGroupRequest\x1a\x1b.milvus.proto.common.Status\"\x00\x32u\n\x0cProxyService\x12\x65\n\x0cRegisterLink\x12(.milvus.proto.milvus.RegisterLinkRequest\x1a).milvus.proto.milvus.RegisterLinkResponse\"\x00:U\n\x0emilvus_ext_obj\x12\x1c.google.protobuf.FileOptions\x18\xe9\x07 \x01(\x0b\x32\x1e.milvus.proto.milvus.MilvusExtBm\n\x0eio.milvus.grpcB\x0bMilvusProtoP\x01Z4github.com/milvus-io/milvus-proto/go-api/v2/milvuspb\xa0\x01\x01\xaa\x02\x12Milvus.Client.Grpcb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -46,6 +46,8 @@ _globals['_DROPCOLLECTIONREQUEST']._serialized_options = b'\312>\017\010\001\020\002\030\377\377\377\377\377\377\377\377\377\001' _globals['_ALTERCOLLECTIONREQUEST']._options = None _globals['_ALTERCOLLECTIONREQUEST']._serialized_options = b'\312>\017\010\001\020\001\030\377\377\377\377\377\377\377\377\377\001' + _globals['_ALTERCOLLECTIONFIELDREQUEST']._options = None + _globals['_ALTERCOLLECTIONFIELDREQUEST']._serialized_options = b'\312>\017\010\001\020\001\030\377\377\377\377\377\377\377\377\377\001' _globals['_DESCRIBECOLLECTIONREQUEST']._options = None _globals['_DESCRIBECOLLECTIONREQUEST']._serialized_options = b'\312>\017\010\001\020\003\030\377\377\377\377\377\377\377\377\377\001' _globals['_LOADCOLLECTIONREQUEST']._options = None @@ -92,8 +94,14 @@ _globals['_INSERTREQUEST']._serialized_options = b'\312>\004\020\010\030\003' _globals['_UPSERTREQUEST']._options = None _globals['_UPSERTREQUEST']._serialized_options = b'\312>\004\020\031\030\003' + _globals['_DELETEREQUEST_EXPRTEMPLATEVALUESENTRY']._options = None + _globals['_DELETEREQUEST_EXPRTEMPLATEVALUESENTRY']._serialized_options = b'8\001' _globals['_DELETEREQUEST']._options = None _globals['_DELETEREQUEST']._serialized_options = b'\312>\004\020\t\030\003' + _globals['_SUBSEARCHREQUEST_EXPRTEMPLATEVALUESENTRY']._options = None + _globals['_SUBSEARCHREQUEST_EXPRTEMPLATEVALUESENTRY']._serialized_options = b'8\001' + _globals['_SEARCHREQUEST_EXPRTEMPLATEVALUESENTRY']._options = None + _globals['_SEARCHREQUEST_EXPRTEMPLATEVALUESENTRY']._serialized_options = b'8\001' _globals['_SEARCHREQUEST']._options = None _globals['_SEARCHREQUEST']._serialized_options = b'\312>\004\020\016\030\003' _globals['_HYBRIDSEARCHREQUEST']._options = None @@ -108,6 +116,10 @@ _globals['_FLUSHRESPONSE_COLLSEALTIMESENTRY']._serialized_options = b'8\001' _globals['_FLUSHRESPONSE_COLLFLUSHTSENTRY']._options = None _globals['_FLUSHRESPONSE_COLLFLUSHTSENTRY']._serialized_options = b'8\001' + _globals['_FLUSHRESPONSE_CHANNELCPSENTRY']._options = None + _globals['_FLUSHRESPONSE_CHANNELCPSENTRY']._serialized_options = b'8\001' + _globals['_QUERYREQUEST_EXPRTEMPLATEVALUESENTRY']._options = None + _globals['_QUERYREQUEST_EXPRTEMPLATEVALUESENTRY']._serialized_options = b'8\001' _globals['_QUERYREQUEST']._options = None _globals['_QUERYREQUEST']._serialized_options = b'\312>\004\020\020\030\003' _globals['_FLUSHALLREQUEST']._options = None @@ -136,6 +148,14 @@ _globals['_CREATEROLEREQUEST']._serialized_options = b'\312>\017\010\001\020\023\030\377\377\377\377\377\377\377\377\377\001' _globals['_DROPROLEREQUEST']._options = None _globals['_DROPROLEREQUEST']._serialized_options = b'\312>\017\010\001\020\025\030\377\377\377\377\377\377\377\377\377\001' + _globals['_CREATEPRIVILEGEGROUPREQUEST']._options = None + _globals['_CREATEPRIVILEGEGROUPREQUEST']._serialized_options = b'\312>\017\010\001\0208\030\377\377\377\377\377\377\377\377\377\001' + _globals['_DROPPRIVILEGEGROUPREQUEST']._options = None + _globals['_DROPPRIVILEGEGROUPREQUEST']._serialized_options = b'\312>\017\010\001\0209\030\377\377\377\377\377\377\377\377\377\001' + _globals['_LISTPRIVILEGEGROUPSREQUEST']._options = None + _globals['_LISTPRIVILEGEGROUPSREQUEST']._serialized_options = b'\312>\017\010\001\020:\030\377\377\377\377\377\377\377\377\377\001' + _globals['_OPERATEPRIVILEGEGROUPREQUEST']._options = None + _globals['_OPERATEPRIVILEGEGROUPREQUEST']._serialized_options = b'\312>\017\010\001\020;\030\377\377\377\377\377\377\377\377\377\001' _globals['_OPERATEUSERROLEREQUEST']._options = None _globals['_OPERATEUSERROLEREQUEST']._serialized_options = b'\312>\017\010\001\020\027\030\377\377\377\377\377\377\377\377\377\001' _globals['_SELECTROLEREQUEST']._options = None @@ -146,10 +166,14 @@ _globals['_SELECTGRANTREQUEST']._serialized_options = b'\312>\017\010\001\020\026\030\377\377\377\377\377\377\377\377\377\001' _globals['_OPERATEPRIVILEGEREQUEST']._options = None _globals['_OPERATEPRIVILEGEREQUEST']._serialized_options = b'\312>\017\010\001\020\027\030\377\377\377\377\377\377\377\377\377\001' + _globals['_BACKUPRBACMETAREQUEST']._options = None + _globals['_BACKUPRBACMETAREQUEST']._serialized_options = b'\312>\017\010\001\0203\030\377\377\377\377\377\377\377\377\377\001' + _globals['_RESTORERBACMETAREQUEST']._options = None + _globals['_RESTORERBACMETAREQUEST']._serialized_options = b'\312>\017\010\001\0204\030\377\377\377\377\377\377\377\377\377\001' _globals['_GETLOADINGPROGRESSREQUEST']._options = None - _globals['_GETLOADINGPROGRESSREQUEST']._serialized_options = b'\312>\004\020\005\030\002' + _globals['_GETLOADINGPROGRESSREQUEST']._serialized_options = b'\312>\004\020!\030\002' _globals['_GETLOADSTATEREQUEST']._options = None - _globals['_GETLOADSTATEREQUEST']._serialized_options = b'\312>\004\020\005\030\002' + _globals['_GETLOADSTATEREQUEST']._serialized_options = b'\312>\004\020!\030\002' _globals['_CREATERESOURCEGROUPREQUEST']._options = None _globals['_CREATERESOURCEGROUPREQUEST']._serialized_options = b'\312>\017\010\001\020\032\030\377\377\377\377\377\377\377\377\377\001' _globals['_UPDATERESOURCEGROUPSREQUEST_RESOURCEGROUPSENTRY']._options = None @@ -182,6 +206,8 @@ _globals['_DROPDATABASEREQUEST']._serialized_options = b'\312>\017\010\001\020$\030\377\377\377\377\377\377\377\377\377\001' _globals['_ALTERDATABASEREQUEST']._options = None _globals['_ALTERDATABASEREQUEST']._serialized_options = b'\312>\017\010\001\0201\030\377\377\377\377\377\377\377\377\377\001' + _globals['_DESCRIBEDATABASEREQUEST']._options = None + _globals['_DESCRIBEDATABASEREQUEST']._serialized_options = b'\312>\017\010\001\0202\030\377\377\377\377\377\377\377\377\377\001' _globals['_IMPORTAUTHPLACEHOLDER']._options = None _globals['_IMPORTAUTHPLACEHOLDER']._serialized_options = b'\312>\004\020\022\030\001' _globals['_GETIMPORTPROGRESSAUTHPLACEHOLDER']._options = None @@ -192,14 +218,16 @@ _globals['_MILVUSSERVICE'].methods_by_name['GetIndexState']._serialized_options = b'\210\002\001' _globals['_MILVUSSERVICE'].methods_by_name['GetIndexBuildProgress']._options = None _globals['_MILVUSSERVICE'].methods_by_name['GetIndexBuildProgress']._serialized_options = b'\210\002\001' - _globals['_SHOWTYPE']._serialized_start=24640 - _globals['_SHOWTYPE']._serialized_end=24677 - _globals['_OPERATEUSERROLETYPE']._serialized_start=24679 - _globals['_OPERATEUSERROLETYPE']._serialized_end=24743 - _globals['_OPERATEPRIVILEGETYPE']._serialized_start=24745 - _globals['_OPERATEPRIVILEGETYPE']._serialized_end=24790 - _globals['_QUOTASTATE']._serialized_start=24792 - _globals['_QUOTASTATE']._serialized_end=24885 + _globals['_SHOWTYPE']._serialized_start=27936 + _globals['_SHOWTYPE']._serialized_end=27973 + _globals['_OPERATEPRIVILEGEGROUPTYPE']._serialized_start=27975 + _globals['_OPERATEPRIVILEGEGROUPTYPE']._serialized_end=28059 + _globals['_OPERATEUSERROLETYPE']._serialized_start=28061 + _globals['_OPERATEUSERROLETYPE']._serialized_end=28125 + _globals['_OPERATEPRIVILEGETYPE']._serialized_start=28127 + _globals['_OPERATEPRIVILEGETYPE']._serialized_end=28172 + _globals['_QUOTASTATE']._serialized_start=28174 + _globals['_QUOTASTATE']._serialized_end=28267 _globals['_CREATEALIASREQUEST']._serialized_start=134 _globals['_CREATEALIASREQUEST']._serialized_end=275 _globals['_DROPALIASREQUEST']._serialized_start=277 @@ -219,331 +247,369 @@ _globals['_DROPCOLLECTIONREQUEST']._serialized_start=1349 _globals['_DROPCOLLECTIONREQUEST']._serialized_end=1478 _globals['_ALTERCOLLECTIONREQUEST']._serialized_start=1481 - _globals['_ALTERCOLLECTIONREQUEST']._serialized_end=1688 - _globals['_HASCOLLECTIONREQUEST']._serialized_start=1691 - _globals['_HASCOLLECTIONREQUEST']._serialized_end=1819 - _globals['_BOOLRESPONSE']._serialized_start=1821 - _globals['_BOOLRESPONSE']._serialized_end=1895 - _globals['_STRINGRESPONSE']._serialized_start=1897 - _globals['_STRINGRESPONSE']._serialized_end=1973 - _globals['_DESCRIBECOLLECTIONREQUEST']._serialized_start=1976 - _globals['_DESCRIBECOLLECTIONREQUEST']._serialized_end=2151 - _globals['_DESCRIBECOLLECTIONRESPONSE']._serialized_start=2154 - _globals['_DESCRIBECOLLECTIONRESPONSE']._serialized_end=2723 - _globals['_LOADCOLLECTIONREQUEST']._serialized_start=2726 - _globals['_LOADCOLLECTIONREQUEST']._serialized_end=2910 - _globals['_RELEASECOLLECTIONREQUEST']._serialized_start=2912 - _globals['_RELEASECOLLECTIONREQUEST']._serialized_end=3033 - _globals['_GETSTATISTICSREQUEST']._serialized_start=3036 - _globals['_GETSTATISTICSREQUEST']._serialized_end=3207 - _globals['_GETSTATISTICSRESPONSE']._serialized_start=3209 - _globals['_GETSTATISTICSRESPONSE']._serialized_end=3327 - _globals['_GETCOLLECTIONSTATISTICSREQUEST']._serialized_start=3329 - _globals['_GETCOLLECTIONSTATISTICSREQUEST']._serialized_end=3456 - _globals['_GETCOLLECTIONSTATISTICSRESPONSE']._serialized_start=3459 - _globals['_GETCOLLECTIONSTATISTICSRESPONSE']._serialized_end=3587 - _globals['_SHOWCOLLECTIONSREQUEST']._serialized_start=3590 - _globals['_SHOWCOLLECTIONSREQUEST']._serialized_end=3770 - _globals['_SHOWCOLLECTIONSRESPONSE']._serialized_start=3773 - _globals['_SHOWCOLLECTIONSRESPONSE']._serialized_end=4020 - _globals['_CREATEPARTITIONREQUEST']._serialized_start=4023 - _globals['_CREATEPARTITIONREQUEST']._serialized_end=4166 - _globals['_DROPPARTITIONREQUEST']._serialized_start=4169 - _globals['_DROPPARTITIONREQUEST']._serialized_end=4310 - _globals['_HASPARTITIONREQUEST']._serialized_start=4313 - _globals['_HASPARTITIONREQUEST']._serialized_end=4453 - _globals['_LOADPARTITIONSREQUEST']._serialized_start=4456 - _globals['_LOADPARTITIONSREQUEST']._serialized_end=4665 - _globals['_RELEASEPARTITIONSREQUEST']._serialized_start=4668 - _globals['_RELEASEPARTITIONSREQUEST']._serialized_end=4814 - _globals['_GETPARTITIONSTATISTICSREQUEST']._serialized_start=4817 - _globals['_GETPARTITIONSTATISTICSREQUEST']._serialized_end=4958 - _globals['_GETPARTITIONSTATISTICSRESPONSE']._serialized_start=4960 - _globals['_GETPARTITIONSTATISTICSRESPONSE']._serialized_end=5087 - _globals['_SHOWPARTITIONSREQUEST']._serialized_start=5090 - _globals['_SHOWPARTITIONSREQUEST']._serialized_end=5304 - _globals['_SHOWPARTITIONSRESPONSE']._serialized_start=5307 - _globals['_SHOWPARTITIONSRESPONSE']._serialized_end=5517 - _globals['_DESCRIBESEGMENTREQUEST']._serialized_start=5519 - _globals['_DESCRIBESEGMENTREQUEST']._serialized_end=5628 - _globals['_DESCRIBESEGMENTRESPONSE']._serialized_start=5631 - _globals['_DESCRIBESEGMENTRESPONSE']._serialized_end=5774 - _globals['_SHOWSEGMENTSREQUEST']._serialized_start=5776 - _globals['_SHOWSEGMENTSREQUEST']._serialized_end=5884 - _globals['_SHOWSEGMENTSRESPONSE']._serialized_start=5886 - _globals['_SHOWSEGMENTSRESPONSE']._serialized_end=5973 - _globals['_CREATEINDEXREQUEST']._serialized_start=5976 - _globals['_CREATEINDEXREQUEST']._serialized_end=6188 - _globals['_ALTERINDEXREQUEST']._serialized_start=6191 - _globals['_ALTERINDEXREQUEST']._serialized_end=6382 - _globals['_DESCRIBEINDEXREQUEST']._serialized_start=6385 - _globals['_DESCRIBEINDEXREQUEST']._serialized_end=6561 - _globals['_INDEXDESCRIPTION']._serialized_start=6564 - _globals['_INDEXDESCRIPTION']._serialized_end=6841 - _globals['_DESCRIBEINDEXRESPONSE']._serialized_start=6844 - _globals['_DESCRIBEINDEXRESPONSE']._serialized_end=6979 - _globals['_GETINDEXBUILDPROGRESSREQUEST']._serialized_start=6982 - _globals['_GETINDEXBUILDPROGRESSREQUEST']._serialized_end=7147 - _globals['_GETINDEXBUILDPROGRESSRESPONSE']._serialized_start=7149 - _globals['_GETINDEXBUILDPROGRESSRESPONSE']._serialized_end=7267 - _globals['_GETINDEXSTATEREQUEST']._serialized_start=7270 - _globals['_GETINDEXSTATEREQUEST']._serialized_end=7427 - _globals['_GETINDEXSTATERESPONSE']._serialized_start=7430 - _globals['_GETINDEXSTATERESPONSE']._serialized_end=7567 - _globals['_DROPINDEXREQUEST']._serialized_start=7570 - _globals['_DROPINDEXREQUEST']._serialized_end=7723 - _globals['_INSERTREQUEST']._serialized_start=7726 - _globals['_INSERTREQUEST']._serialized_end=7950 - _globals['_UPSERTREQUEST']._serialized_start=7953 - _globals['_UPSERTREQUEST']._serialized_end=8177 - _globals['_MUTATIONRESULT']._serialized_start=8180 - _globals['_MUTATIONRESULT']._serialized_end=8420 - _globals['_DELETEREQUEST']._serialized_start=8423 - _globals['_DELETEREQUEST']._serialized_end=8656 - _globals['_SUBSEARCHREQUEST']._serialized_start=8659 - _globals['_SUBSEARCHREQUEST']._serialized_end=8835 - _globals['_SEARCHREQUEST']._serialized_start=8838 - _globals['_SEARCHREQUEST']._serialized_end=9426 - _globals['_HITS']._serialized_start=9428 - _globals['_HITS']._serialized_end=9481 - _globals['_SEARCHRESULTS']._serialized_start=9484 - _globals['_SEARCHRESULTS']._serialized_end=9625 - _globals['_HYBRIDSEARCHREQUEST']._serialized_start=9628 - _globals['_HYBRIDSEARCHREQUEST']._serialized_end=10085 - _globals['_FLUSHREQUEST']._serialized_start=10087 - _globals['_FLUSHREQUEST']._serialized_end=10197 - _globals['_FLUSHRESPONSE']._serialized_start=10200 - _globals['_FLUSHRESPONSE']._serialized_end=10867 - _globals['_FLUSHRESPONSE_COLLSEGIDSENTRY']._serialized_start=10592 - _globals['_FLUSHRESPONSE_COLLSEGIDSENTRY']._serialized_end=10673 - _globals['_FLUSHRESPONSE_FLUSHCOLLSEGIDSENTRY']._serialized_start=10675 - _globals['_FLUSHRESPONSE_FLUSHCOLLSEGIDSENTRY']._serialized_end=10761 - _globals['_FLUSHRESPONSE_COLLSEALTIMESENTRY']._serialized_start=10763 - _globals['_FLUSHRESPONSE_COLLSEALTIMESENTRY']._serialized_end=10815 - _globals['_FLUSHRESPONSE_COLLFLUSHTSENTRY']._serialized_start=10817 - _globals['_FLUSHRESPONSE_COLLFLUSHTSENTRY']._serialized_end=10867 - _globals['_QUERYREQUEST']._serialized_start=10870 - _globals['_QUERYREQUEST']._serialized_end=11281 - _globals['_QUERYRESULTS']._serialized_start=11284 - _globals['_QUERYRESULTS']._serialized_end=11444 - _globals['_VECTORIDS']._serialized_start=11446 - _globals['_VECTORIDS']._serialized_end=11571 - _globals['_VECTORSARRAY']._serialized_start=11574 - _globals['_VECTORSARRAY']._serialized_end=11705 - _globals['_CALCDISTANCEREQUEST']._serialized_start=11708 - _globals['_CALCDISTANCEREQUEST']._serialized_end=11929 - _globals['_CALCDISTANCERESULTS']._serialized_start=11932 - _globals['_CALCDISTANCERESULTS']._serialized_end=12113 - _globals['_FLUSHALLREQUEST']._serialized_start=12115 - _globals['_FLUSHALLREQUEST']._serialized_end=12213 - _globals['_FLUSHALLRESPONSE']._serialized_start=12215 - _globals['_FLUSHALLRESPONSE']._serialized_end=12300 - _globals['_PERSISTENTSEGMENTINFO']._serialized_start=12303 - _globals['_PERSISTENTSEGMENTINFO']._serialized_end=12456 - _globals['_GETPERSISTENTSEGMENTINFOREQUEST']._serialized_start=12458 - _globals['_GETPERSISTENTSEGMENTINFOREQUEST']._serialized_end=12575 - _globals['_GETPERSISTENTSEGMENTINFORESPONSE']._serialized_start=12578 - _globals['_GETPERSISTENTSEGMENTINFORESPONSE']._serialized_end=12716 - _globals['_QUERYSEGMENTINFO']._serialized_start=12719 - _globals['_QUERYSEGMENTINFO']._serialized_end=12959 - _globals['_GETQUERYSEGMENTINFOREQUEST']._serialized_start=12961 - _globals['_GETQUERYSEGMENTINFOREQUEST']._serialized_end=13073 - _globals['_GETQUERYSEGMENTINFORESPONSE']._serialized_start=13076 - _globals['_GETQUERYSEGMENTINFORESPONSE']._serialized_end=13204 - _globals['_DUMMYREQUEST']._serialized_start=13206 - _globals['_DUMMYREQUEST']._serialized_end=13242 - _globals['_DUMMYRESPONSE']._serialized_start=13244 - _globals['_DUMMYRESPONSE']._serialized_end=13277 - _globals['_REGISTERLINKREQUEST']._serialized_start=13279 - _globals['_REGISTERLINKREQUEST']._serialized_end=13300 - _globals['_REGISTERLINKRESPONSE']._serialized_start=13302 - _globals['_REGISTERLINKRESPONSE']._serialized_end=13416 - _globals['_GETMETRICSREQUEST']._serialized_start=13418 - _globals['_GETMETRICSREQUEST']._serialized_end=13498 - _globals['_GETMETRICSRESPONSE']._serialized_start=13500 - _globals['_GETMETRICSRESPONSE']._serialized_end=13607 - _globals['_COMPONENTINFO']._serialized_start=13610 - _globals['_COMPONENTINFO']._serialized_end=13762 - _globals['_COMPONENTSTATES']._serialized_start=13765 - _globals['_COMPONENTSTATES']._serialized_end=13943 - _globals['_GETCOMPONENTSTATESREQUEST']._serialized_start=13945 - _globals['_GETCOMPONENTSTATESREQUEST']._serialized_end=13972 - _globals['_LOADBALANCEREQUEST']._serialized_start=13975 - _globals['_LOADBALANCEREQUEST']._serialized_end=14157 - _globals['_MANUALCOMPACTIONREQUEST']._serialized_start=14159 - _globals['_MANUALCOMPACTIONREQUEST']._serialized_end=14260 - _globals['_MANUALCOMPACTIONRESPONSE']._serialized_start=14262 - _globals['_MANUALCOMPACTIONRESPONSE']._serialized_end=14384 - _globals['_GETCOMPACTIONSTATEREQUEST']._serialized_start=14386 - _globals['_GETCOMPACTIONSTATEREQUEST']._serialized_end=14435 - _globals['_GETCOMPACTIONSTATERESPONSE']._serialized_start=14438 - _globals['_GETCOMPACTIONSTATERESPONSE']._serialized_end=14659 - _globals['_GETCOMPACTIONPLANSREQUEST']._serialized_start=14661 - _globals['_GETCOMPACTIONPLANSREQUEST']._serialized_end=14710 - _globals['_GETCOMPACTIONPLANSRESPONSE']._serialized_start=14713 - _globals['_GETCOMPACTIONPLANSRESPONSE']._serialized_end=14901 - _globals['_COMPACTIONMERGEINFO']._serialized_start=14903 - _globals['_COMPACTIONMERGEINFO']._serialized_end=14957 - _globals['_GETFLUSHSTATEREQUEST']._serialized_start=14959 - _globals['_GETFLUSHSTATEREQUEST']._serialized_end=15070 - _globals['_GETFLUSHSTATERESPONSE']._serialized_start=15072 - _globals['_GETFLUSHSTATERESPONSE']._serialized_end=15157 - _globals['_GETFLUSHALLSTATEREQUEST']._serialized_start=15159 - _globals['_GETFLUSHALLSTATEREQUEST']._serialized_end=15267 - _globals['_GETFLUSHALLSTATERESPONSE']._serialized_start=15269 - _globals['_GETFLUSHALLSTATERESPONSE']._serialized_end=15357 - _globals['_IMPORTREQUEST']._serialized_start=15360 - _globals['_IMPORTREQUEST']._serialized_end=15584 - _globals['_IMPORTRESPONSE']._serialized_start=15586 - _globals['_IMPORTRESPONSE']._serialized_end=15662 - _globals['_GETIMPORTSTATEREQUEST']._serialized_start=15664 - _globals['_GETIMPORTSTATEREQUEST']._serialized_end=15701 - _globals['_GETIMPORTSTATERESPONSE']._serialized_start=15704 - _globals['_GETIMPORTSTATERESPONSE']._serialized_end=15983 - _globals['_LISTIMPORTTASKSREQUEST']._serialized_start=15985 - _globals['_LISTIMPORTTASKSREQUEST']._serialized_end=16066 - _globals['_LISTIMPORTTASKSRESPONSE']._serialized_start=16069 - _globals['_LISTIMPORTTASKSRESPONSE']._serialized_end=16199 - _globals['_GETREPLICASREQUEST']._serialized_start=16202 - _globals['_GETREPLICASREQUEST']._serialized_end=16356 - _globals['_GETREPLICASRESPONSE']._serialized_start=16358 - _globals['_GETREPLICASRESPONSE']._serialized_end=16476 - _globals['_REPLICAINFO']._serialized_start=16479 - _globals['_REPLICAINFO']._serialized_end=16800 - _globals['_REPLICAINFO_NUMOUTBOUNDNODEENTRY']._serialized_start=16746 - _globals['_REPLICAINFO_NUMOUTBOUNDNODEENTRY']._serialized_end=16800 - _globals['_SHARDREPLICA']._serialized_start=16802 - _globals['_SHARDREPLICA']._serialized_end=16898 - _globals['_CREATECREDENTIALREQUEST']._serialized_start=16901 - _globals['_CREATECREDENTIALREQUEST']._serialized_end=17091 - _globals['_UPDATECREDENTIALREQUEST']._serialized_start=17094 - _globals['_UPDATECREDENTIALREQUEST']._serialized_end=17299 - _globals['_DELETECREDENTIALREQUEST']._serialized_start=17301 - _globals['_DELETECREDENTIALREQUEST']._serialized_end=17408 - _globals['_LISTCREDUSERSRESPONSE']._serialized_start=17410 - _globals['_LISTCREDUSERSRESPONSE']._serialized_end=17497 - _globals['_LISTCREDUSERSREQUEST']._serialized_start=17499 - _globals['_LISTCREDUSERSREQUEST']._serialized_end=17585 - _globals['_ROLEENTITY']._serialized_start=17587 - _globals['_ROLEENTITY']._serialized_end=17613 - _globals['_USERENTITY']._serialized_start=17615 - _globals['_USERENTITY']._serialized_end=17641 - _globals['_CREATEROLEREQUEST']._serialized_start=17644 - _globals['_CREATEROLEREQUEST']._serialized_end=17776 - _globals['_DROPROLEREQUEST']._serialized_start=17778 - _globals['_DROPROLEREQUEST']._serialized_end=17878 - _globals['_OPERATEUSERROLEREQUEST']._serialized_start=17881 - _globals['_OPERATEUSERROLEREQUEST']._serialized_end=18062 - _globals['_SELECTROLEREQUEST']._serialized_start=18065 - _globals['_SELECTROLEREQUEST']._serialized_end=18222 - _globals['_ROLERESULT']._serialized_start=18224 - _globals['_ROLERESULT']._serialized_end=18331 - _globals['_SELECTROLERESPONSE']._serialized_start=18333 - _globals['_SELECTROLERESPONSE']._serialized_end=18448 - _globals['_SELECTUSERREQUEST']._serialized_start=18451 - _globals['_SELECTUSERREQUEST']._serialized_end=18599 - _globals['_USERRESULT']._serialized_start=18601 - _globals['_USERRESULT']._serialized_end=18708 - _globals['_SELECTUSERRESPONSE']._serialized_start=18710 - _globals['_SELECTUSERRESPONSE']._serialized_end=18825 - _globals['_OBJECTENTITY']._serialized_start=18827 - _globals['_OBJECTENTITY']._serialized_end=18855 - _globals['_PRIVILEGEENTITY']._serialized_start=18857 - _globals['_PRIVILEGEENTITY']._serialized_end=18888 - _globals['_GRANTORENTITY']._serialized_start=18890 - _globals['_GRANTORENTITY']._serialized_end=19009 - _globals['_GRANTPRIVILEGEENTITY']._serialized_start=19011 - _globals['_GRANTPRIVILEGEENTITY']._serialized_end=19087 - _globals['_GRANTENTITY']._serialized_start=19090 - _globals['_GRANTENTITY']._serialized_end=19292 - _globals['_SELECTGRANTREQUEST']._serialized_start=19295 - _globals['_SELECTGRANTREQUEST']._serialized_end=19429 - _globals['_SELECTGRANTRESPONSE']._serialized_start=19431 - _globals['_SELECTGRANTRESPONSE']._serialized_end=19549 - _globals['_OPERATEPRIVILEGEREQUEST']._serialized_start=19552 - _globals['_OPERATEPRIVILEGEREQUEST']._serialized_end=19748 - _globals['_GETLOADINGPROGRESSREQUEST']._serialized_start=19751 - _globals['_GETLOADINGPROGRESSREQUEST']._serialized_end=19898 - _globals['_GETLOADINGPROGRESSRESPONSE']._serialized_start=19900 - _globals['_GETLOADINGPROGRESSRESPONSE']._serialized_end=20017 - _globals['_GETLOADSTATEREQUEST']._serialized_start=20020 - _globals['_GETLOADSTATEREQUEST']._serialized_end=20161 - _globals['_GETLOADSTATERESPONSE']._serialized_start=20163 - _globals['_GETLOADSTATERESPONSE']._serialized_end=20277 - _globals['_MILVUSEXT']._serialized_start=20279 - _globals['_MILVUSEXT']._serialized_end=20307 - _globals['_GETVERSIONREQUEST']._serialized_start=20309 - _globals['_GETVERSIONREQUEST']._serialized_end=20328 - _globals['_GETVERSIONRESPONSE']._serialized_start=20330 - _globals['_GETVERSIONRESPONSE']._serialized_end=20412 - _globals['_CHECKHEALTHREQUEST']._serialized_start=20414 - _globals['_CHECKHEALTHREQUEST']._serialized_end=20434 - _globals['_CHECKHEALTHRESPONSE']._serialized_start=20437 - _globals['_CHECKHEALTHRESPONSE']._serialized_end=20594 - _globals['_CREATERESOURCEGROUPREQUEST']._serialized_start=20597 - _globals['_CREATERESOURCEGROUPREQUEST']._serialized_end=20767 - _globals['_UPDATERESOURCEGROUPSREQUEST']._serialized_start=20770 - _globals['_UPDATERESOURCEGROUPSREQUEST']._serialized_end=21051 - _globals['_UPDATERESOURCEGROUPSREQUEST_RESOURCEGROUPSENTRY']._serialized_start=20940 - _globals['_UPDATERESOURCEGROUPSREQUEST_RESOURCEGROUPSENTRY']._serialized_end=21031 - _globals['_DROPRESOURCEGROUPREQUEST']._serialized_start=21053 - _globals['_DROPRESOURCEGROUPREQUEST']._serialized_end=21167 - _globals['_TRANSFERNODEREQUEST']._serialized_start=21170 - _globals['_TRANSFERNODEREQUEST']._serialized_end=21335 - _globals['_TRANSFERREPLICAREQUEST']._serialized_start=21338 - _globals['_TRANSFERREPLICAREQUEST']._serialized_end=21551 - _globals['_LISTRESOURCEGROUPSREQUEST']._serialized_start=21553 - _globals['_LISTRESOURCEGROUPSREQUEST']._serialized_end=21644 - _globals['_LISTRESOURCEGROUPSRESPONSE']._serialized_start=21646 - _globals['_LISTRESOURCEGROUPSRESPONSE']._serialized_end=21744 - _globals['_DESCRIBERESOURCEGROUPREQUEST']._serialized_start=21746 - _globals['_DESCRIBERESOURCEGROUPREQUEST']._serialized_end=21864 - _globals['_DESCRIBERESOURCEGROUPRESPONSE']._serialized_start=21867 - _globals['_DESCRIBERESOURCEGROUPRESPONSE']._serialized_end=22003 - _globals['_RESOURCEGROUP']._serialized_start=22006 - _globals['_RESOURCEGROUP']._serialized_end=22604 - _globals['_RESOURCEGROUP_NUMLOADEDREPLICAENTRY']._serialized_start=22437 - _globals['_RESOURCEGROUP_NUMLOADEDREPLICAENTRY']._serialized_end=22492 - _globals['_RESOURCEGROUP_NUMOUTGOINGNODEENTRY']._serialized_start=22494 - _globals['_RESOURCEGROUP_NUMOUTGOINGNODEENTRY']._serialized_end=22548 - _globals['_RESOURCEGROUP_NUMINCOMINGNODEENTRY']._serialized_start=22550 - _globals['_RESOURCEGROUP_NUMINCOMINGNODEENTRY']._serialized_end=22604 - _globals['_RENAMECOLLECTIONREQUEST']._serialized_start=22607 - _globals['_RENAMECOLLECTIONREQUEST']._serialized_end=22766 - _globals['_GETINDEXSTATISTICSREQUEST']._serialized_start=22769 - _globals['_GETINDEXSTATISTICSREQUEST']._serialized_end=22930 - _globals['_GETINDEXSTATISTICSRESPONSE']._serialized_start=22933 - _globals['_GETINDEXSTATISTICSRESPONSE']._serialized_end=23073 - _globals['_CONNECTREQUEST']._serialized_start=23075 - _globals['_CONNECTREQUEST']._serialized_end=23189 - _globals['_CONNECTRESPONSE']._serialized_start=23192 - _globals['_CONNECTRESPONSE']._serialized_end=23328 - _globals['_ALLOCTIMESTAMPREQUEST']._serialized_start=23330 - _globals['_ALLOCTIMESTAMPREQUEST']._serialized_end=23397 - _globals['_ALLOCTIMESTAMPRESPONSE']._serialized_start=23399 - _globals['_ALLOCTIMESTAMPRESPONSE']._serialized_end=23487 - _globals['_CREATEDATABASEREQUEST']._serialized_start=23489 - _globals['_CREATEDATABASEREQUEST']._serialized_end=23593 - _globals['_DROPDATABASEREQUEST']._serialized_start=23595 - _globals['_DROPDATABASEREQUEST']._serialized_end=23697 - _globals['_LISTDATABASESREQUEST']._serialized_start=23699 - _globals['_LISTDATABASESREQUEST']._serialized_end=23765 - _globals['_LISTDATABASESRESPONSE']._serialized_start=23767 - _globals['_LISTDATABASESRESPONSE']._serialized_end=23880 - _globals['_ALTERDATABASEREQUEST']._serialized_start=23883 - _globals['_ALTERDATABASEREQUEST']._serialized_end=24056 - _globals['_REPLICATEMESSAGEREQUEST']._serialized_start=24059 - _globals['_REPLICATEMESSAGEREQUEST']._serialized_end=24304 - _globals['_REPLICATEMESSAGERESPONSE']._serialized_start=24306 - _globals['_REPLICATEMESSAGERESPONSE']._serialized_end=24395 - _globals['_IMPORTAUTHPLACEHOLDER']._serialized_start=24397 - _globals['_IMPORTAUTHPLACEHOLDER']._serialized_end=24495 - _globals['_GETIMPORTPROGRESSAUTHPLACEHOLDER']._serialized_start=24497 - _globals['_GETIMPORTPROGRESSAUTHPLACEHOLDER']._serialized_end=24557 - _globals['_LISTIMPORTSAUTHPLACEHOLDER']._serialized_start=24559 - _globals['_LISTIMPORTSAUTHPLACEHOLDER']._serialized_end=24638 - _globals['_MILVUSSERVICE']._serialized_start=24888 - _globals['_MILVUSSERVICE']._serialized_end=33535 - _globals['_PROXYSERVICE']._serialized_start=33537 - _globals['_PROXYSERVICE']._serialized_end=33654 + _globals['_ALTERCOLLECTIONREQUEST']._serialized_end=1709 + _globals['_ALTERCOLLECTIONFIELDREQUEST']._serialized_start=1712 + _globals['_ALTERCOLLECTIONFIELDREQUEST']._serialized_end=1922 + _globals['_HASCOLLECTIONREQUEST']._serialized_start=1925 + _globals['_HASCOLLECTIONREQUEST']._serialized_end=2053 + _globals['_BOOLRESPONSE']._serialized_start=2055 + _globals['_BOOLRESPONSE']._serialized_end=2129 + _globals['_STRINGRESPONSE']._serialized_start=2131 + _globals['_STRINGRESPONSE']._serialized_end=2207 + _globals['_DESCRIBECOLLECTIONREQUEST']._serialized_start=2210 + _globals['_DESCRIBECOLLECTIONREQUEST']._serialized_end=2385 + _globals['_DESCRIBECOLLECTIONRESPONSE']._serialized_start=2388 + _globals['_DESCRIBECOLLECTIONRESPONSE']._serialized_end=2957 + _globals['_LOADCOLLECTIONREQUEST']._serialized_start=2960 + _globals['_LOADCOLLECTIONREQUEST']._serialized_end=3198 + _globals['_RELEASECOLLECTIONREQUEST']._serialized_start=3200 + _globals['_RELEASECOLLECTIONREQUEST']._serialized_end=3321 + _globals['_GETSTATISTICSREQUEST']._serialized_start=3324 + _globals['_GETSTATISTICSREQUEST']._serialized_end=3495 + _globals['_GETSTATISTICSRESPONSE']._serialized_start=3497 + _globals['_GETSTATISTICSRESPONSE']._serialized_end=3615 + _globals['_GETCOLLECTIONSTATISTICSREQUEST']._serialized_start=3617 + _globals['_GETCOLLECTIONSTATISTICSREQUEST']._serialized_end=3744 + _globals['_GETCOLLECTIONSTATISTICSRESPONSE']._serialized_start=3747 + _globals['_GETCOLLECTIONSTATISTICSRESPONSE']._serialized_end=3875 + _globals['_SHOWCOLLECTIONSREQUEST']._serialized_start=3878 + _globals['_SHOWCOLLECTIONSREQUEST']._serialized_end=4058 + _globals['_SHOWCOLLECTIONSRESPONSE']._serialized_start=4061 + _globals['_SHOWCOLLECTIONSRESPONSE']._serialized_end=4308 + _globals['_CREATEPARTITIONREQUEST']._serialized_start=4311 + _globals['_CREATEPARTITIONREQUEST']._serialized_end=4454 + _globals['_DROPPARTITIONREQUEST']._serialized_start=4457 + _globals['_DROPPARTITIONREQUEST']._serialized_end=4598 + _globals['_HASPARTITIONREQUEST']._serialized_start=4601 + _globals['_HASPARTITIONREQUEST']._serialized_end=4741 + _globals['_LOADPARTITIONSREQUEST']._serialized_start=4744 + _globals['_LOADPARTITIONSREQUEST']._serialized_end=5007 + _globals['_RELEASEPARTITIONSREQUEST']._serialized_start=5010 + _globals['_RELEASEPARTITIONSREQUEST']._serialized_end=5156 + _globals['_GETPARTITIONSTATISTICSREQUEST']._serialized_start=5159 + _globals['_GETPARTITIONSTATISTICSREQUEST']._serialized_end=5300 + _globals['_GETPARTITIONSTATISTICSRESPONSE']._serialized_start=5302 + _globals['_GETPARTITIONSTATISTICSRESPONSE']._serialized_end=5429 + _globals['_SHOWPARTITIONSREQUEST']._serialized_start=5432 + _globals['_SHOWPARTITIONSREQUEST']._serialized_end=5646 + _globals['_SHOWPARTITIONSRESPONSE']._serialized_start=5649 + _globals['_SHOWPARTITIONSRESPONSE']._serialized_end=5859 + _globals['_DESCRIBESEGMENTREQUEST']._serialized_start=5861 + _globals['_DESCRIBESEGMENTREQUEST']._serialized_end=5970 + _globals['_DESCRIBESEGMENTRESPONSE']._serialized_start=5973 + _globals['_DESCRIBESEGMENTRESPONSE']._serialized_end=6116 + _globals['_SHOWSEGMENTSREQUEST']._serialized_start=6118 + _globals['_SHOWSEGMENTSREQUEST']._serialized_end=6226 + _globals['_SHOWSEGMENTSRESPONSE']._serialized_start=6228 + _globals['_SHOWSEGMENTSRESPONSE']._serialized_end=6315 + _globals['_CREATEINDEXREQUEST']._serialized_start=6318 + _globals['_CREATEINDEXREQUEST']._serialized_end=6530 + _globals['_ALTERINDEXREQUEST']._serialized_start=6533 + _globals['_ALTERINDEXREQUEST']._serialized_end=6745 + _globals['_DESCRIBEINDEXREQUEST']._serialized_start=6748 + _globals['_DESCRIBEINDEXREQUEST']._serialized_end=6924 + _globals['_INDEXDESCRIPTION']._serialized_start=6927 + _globals['_INDEXDESCRIPTION']._serialized_end=7204 + _globals['_DESCRIBEINDEXRESPONSE']._serialized_start=7207 + _globals['_DESCRIBEINDEXRESPONSE']._serialized_end=7342 + _globals['_GETINDEXBUILDPROGRESSREQUEST']._serialized_start=7345 + _globals['_GETINDEXBUILDPROGRESSREQUEST']._serialized_end=7510 + _globals['_GETINDEXBUILDPROGRESSRESPONSE']._serialized_start=7512 + _globals['_GETINDEXBUILDPROGRESSRESPONSE']._serialized_end=7630 + _globals['_GETINDEXSTATEREQUEST']._serialized_start=7633 + _globals['_GETINDEXSTATEREQUEST']._serialized_end=7790 + _globals['_GETINDEXSTATERESPONSE']._serialized_start=7793 + _globals['_GETINDEXSTATERESPONSE']._serialized_end=7930 + _globals['_DROPINDEXREQUEST']._serialized_start=7933 + _globals['_DROPINDEXREQUEST']._serialized_end=8086 + _globals['_INSERTREQUEST']._serialized_start=8089 + _globals['_INSERTREQUEST']._serialized_end=8313 + _globals['_UPSERTREQUEST']._serialized_start=8316 + _globals['_UPSERTREQUEST']._serialized_end=8540 + _globals['_MUTATIONRESULT']._serialized_start=8543 + _globals['_MUTATIONRESULT']._serialized_end=8783 + _globals['_DELETEREQUEST']._serialized_start=8786 + _globals['_DELETEREQUEST']._serialized_end=9204 + _globals['_DELETEREQUEST_EXPRTEMPLATEVALUESENTRY']._serialized_start=9102 + _globals['_DELETEREQUEST_EXPRTEMPLATEVALUESENTRY']._serialized_end=9195 + _globals['_SUBSEARCHREQUEST']._serialized_start=9207 + _globals['_SUBSEARCHREQUEST']._serialized_end=9571 + _globals['_SUBSEARCHREQUEST_EXPRTEMPLATEVALUESENTRY']._serialized_start=9102 + _globals['_SUBSEARCHREQUEST_EXPRTEMPLATEVALUESENTRY']._serialized_end=9195 + _globals['_SEARCHREQUEST']._serialized_start=9574 + _globals['_SEARCHREQUEST']._serialized_end=10347 + _globals['_SEARCHREQUEST_EXPRTEMPLATEVALUESENTRY']._serialized_start=9102 + _globals['_SEARCHREQUEST_EXPRTEMPLATEVALUESENTRY']._serialized_end=9195 + _globals['_HITS']._serialized_start=10349 + _globals['_HITS']._serialized_end=10402 + _globals['_SEARCHRESULTS']._serialized_start=10405 + _globals['_SEARCHRESULTS']._serialized_end=10566 + _globals['_HYBRIDSEARCHREQUEST']._serialized_start=10569 + _globals['_HYBRIDSEARCHREQUEST']._serialized_end=11026 + _globals['_FLUSHREQUEST']._serialized_start=11028 + _globals['_FLUSHREQUEST']._serialized_end=11138 + _globals['_FLUSHRESPONSE']._serialized_start=11141 + _globals['_FLUSHRESPONSE']._serialized_end=11963 + _globals['_FLUSHRESPONSE_COLLSEGIDSENTRY']._serialized_start=11606 + _globals['_FLUSHRESPONSE_COLLSEGIDSENTRY']._serialized_end=11687 + _globals['_FLUSHRESPONSE_FLUSHCOLLSEGIDSENTRY']._serialized_start=11689 + _globals['_FLUSHRESPONSE_FLUSHCOLLSEGIDSENTRY']._serialized_end=11775 + _globals['_FLUSHRESPONSE_COLLSEALTIMESENTRY']._serialized_start=11777 + _globals['_FLUSHRESPONSE_COLLSEALTIMESENTRY']._serialized_end=11829 + _globals['_FLUSHRESPONSE_COLLFLUSHTSENTRY']._serialized_start=11831 + _globals['_FLUSHRESPONSE_COLLFLUSHTSENTRY']._serialized_end=11881 + _globals['_FLUSHRESPONSE_CHANNELCPSENTRY']._serialized_start=11883 + _globals['_FLUSHRESPONSE_CHANNELCPSENTRY']._serialized_end=11963 + _globals['_QUERYREQUEST']._serialized_start=11966 + _globals['_QUERYREQUEST']._serialized_end=12561 + _globals['_QUERYREQUEST_EXPRTEMPLATEVALUESENTRY']._serialized_start=9102 + _globals['_QUERYREQUEST_EXPRTEMPLATEVALUESENTRY']._serialized_end=9195 + _globals['_QUERYRESULTS']._serialized_start=12564 + _globals['_QUERYRESULTS']._serialized_end=12744 + _globals['_VECTORIDS']._serialized_start=12746 + _globals['_VECTORIDS']._serialized_end=12871 + _globals['_VECTORSARRAY']._serialized_start=12874 + _globals['_VECTORSARRAY']._serialized_end=13005 + _globals['_CALCDISTANCEREQUEST']._serialized_start=13008 + _globals['_CALCDISTANCEREQUEST']._serialized_end=13229 + _globals['_CALCDISTANCERESULTS']._serialized_start=13232 + _globals['_CALCDISTANCERESULTS']._serialized_end=13413 + _globals['_FLUSHALLREQUEST']._serialized_start=13415 + _globals['_FLUSHALLREQUEST']._serialized_end=13513 + _globals['_FLUSHALLRESPONSE']._serialized_start=13515 + _globals['_FLUSHALLRESPONSE']._serialized_end=13600 + _globals['_PERSISTENTSEGMENTINFO']._serialized_start=13603 + _globals['_PERSISTENTSEGMENTINFO']._serialized_end=13825 + _globals['_GETPERSISTENTSEGMENTINFOREQUEST']._serialized_start=13827 + _globals['_GETPERSISTENTSEGMENTINFOREQUEST']._serialized_end=13944 + _globals['_GETPERSISTENTSEGMENTINFORESPONSE']._serialized_start=13947 + _globals['_GETPERSISTENTSEGMENTINFORESPONSE']._serialized_end=14085 + _globals['_QUERYSEGMENTINFO']._serialized_start=14088 + _globals['_QUERYSEGMENTINFO']._serialized_end=14397 + _globals['_GETQUERYSEGMENTINFOREQUEST']._serialized_start=14399 + _globals['_GETQUERYSEGMENTINFOREQUEST']._serialized_end=14511 + _globals['_GETQUERYSEGMENTINFORESPONSE']._serialized_start=14514 + _globals['_GETQUERYSEGMENTINFORESPONSE']._serialized_end=14642 + _globals['_DUMMYREQUEST']._serialized_start=14644 + _globals['_DUMMYREQUEST']._serialized_end=14680 + _globals['_DUMMYRESPONSE']._serialized_start=14682 + _globals['_DUMMYRESPONSE']._serialized_end=14715 + _globals['_REGISTERLINKREQUEST']._serialized_start=14717 + _globals['_REGISTERLINKREQUEST']._serialized_end=14738 + _globals['_REGISTERLINKRESPONSE']._serialized_start=14740 + _globals['_REGISTERLINKRESPONSE']._serialized_end=14854 + _globals['_GETMETRICSREQUEST']._serialized_start=14856 + _globals['_GETMETRICSREQUEST']._serialized_end=14936 + _globals['_GETMETRICSRESPONSE']._serialized_start=14938 + _globals['_GETMETRICSRESPONSE']._serialized_end=15045 + _globals['_COMPONENTINFO']._serialized_start=15048 + _globals['_COMPONENTINFO']._serialized_end=15200 + _globals['_COMPONENTSTATES']._serialized_start=15203 + _globals['_COMPONENTSTATES']._serialized_end=15381 + _globals['_GETCOMPONENTSTATESREQUEST']._serialized_start=15383 + _globals['_GETCOMPONENTSTATESREQUEST']._serialized_end=15410 + _globals['_LOADBALANCEREQUEST']._serialized_start=15413 + _globals['_LOADBALANCEREQUEST']._serialized_end=15595 + _globals['_MANUALCOMPACTIONREQUEST']._serialized_start=15597 + _globals['_MANUALCOMPACTIONREQUEST']._serialized_end=15698 + _globals['_MANUALCOMPACTIONRESPONSE']._serialized_start=15700 + _globals['_MANUALCOMPACTIONRESPONSE']._serialized_end=15822 + _globals['_GETCOMPACTIONSTATEREQUEST']._serialized_start=15824 + _globals['_GETCOMPACTIONSTATEREQUEST']._serialized_end=15873 + _globals['_GETCOMPACTIONSTATERESPONSE']._serialized_start=15876 + _globals['_GETCOMPACTIONSTATERESPONSE']._serialized_end=16097 + _globals['_GETCOMPACTIONPLANSREQUEST']._serialized_start=16099 + _globals['_GETCOMPACTIONPLANSREQUEST']._serialized_end=16148 + _globals['_GETCOMPACTIONPLANSRESPONSE']._serialized_start=16151 + _globals['_GETCOMPACTIONPLANSRESPONSE']._serialized_end=16339 + _globals['_COMPACTIONMERGEINFO']._serialized_start=16341 + _globals['_COMPACTIONMERGEINFO']._serialized_end=16395 + _globals['_GETFLUSHSTATEREQUEST']._serialized_start=16397 + _globals['_GETFLUSHSTATEREQUEST']._serialized_end=16508 + _globals['_GETFLUSHSTATERESPONSE']._serialized_start=16510 + _globals['_GETFLUSHSTATERESPONSE']._serialized_end=16595 + _globals['_GETFLUSHALLSTATEREQUEST']._serialized_start=16597 + _globals['_GETFLUSHALLSTATEREQUEST']._serialized_end=16705 + _globals['_GETFLUSHALLSTATERESPONSE']._serialized_start=16707 + _globals['_GETFLUSHALLSTATERESPONSE']._serialized_end=16795 + _globals['_IMPORTREQUEST']._serialized_start=16798 + _globals['_IMPORTREQUEST']._serialized_end=17022 + _globals['_IMPORTRESPONSE']._serialized_start=17024 + _globals['_IMPORTRESPONSE']._serialized_end=17100 + _globals['_GETIMPORTSTATEREQUEST']._serialized_start=17102 + _globals['_GETIMPORTSTATEREQUEST']._serialized_end=17139 + _globals['_GETIMPORTSTATERESPONSE']._serialized_start=17142 + _globals['_GETIMPORTSTATERESPONSE']._serialized_end=17421 + _globals['_LISTIMPORTTASKSREQUEST']._serialized_start=17423 + _globals['_LISTIMPORTTASKSREQUEST']._serialized_end=17504 + _globals['_LISTIMPORTTASKSRESPONSE']._serialized_start=17507 + _globals['_LISTIMPORTTASKSRESPONSE']._serialized_end=17637 + _globals['_GETREPLICASREQUEST']._serialized_start=17640 + _globals['_GETREPLICASREQUEST']._serialized_end=17794 + _globals['_GETREPLICASRESPONSE']._serialized_start=17796 + _globals['_GETREPLICASRESPONSE']._serialized_end=17914 + _globals['_REPLICAINFO']._serialized_start=17917 + _globals['_REPLICAINFO']._serialized_end=18238 + _globals['_REPLICAINFO_NUMOUTBOUNDNODEENTRY']._serialized_start=18184 + _globals['_REPLICAINFO_NUMOUTBOUNDNODEENTRY']._serialized_end=18238 + _globals['_SHARDREPLICA']._serialized_start=18240 + _globals['_SHARDREPLICA']._serialized_end=18336 + _globals['_CREATECREDENTIALREQUEST']._serialized_start=18339 + _globals['_CREATECREDENTIALREQUEST']._serialized_end=18529 + _globals['_UPDATECREDENTIALREQUEST']._serialized_start=18532 + _globals['_UPDATECREDENTIALREQUEST']._serialized_end=18737 + _globals['_DELETECREDENTIALREQUEST']._serialized_start=18739 + _globals['_DELETECREDENTIALREQUEST']._serialized_end=18846 + _globals['_LISTCREDUSERSRESPONSE']._serialized_start=18848 + _globals['_LISTCREDUSERSRESPONSE']._serialized_end=18935 + _globals['_LISTCREDUSERSREQUEST']._serialized_start=18937 + _globals['_LISTCREDUSERSREQUEST']._serialized_end=19023 + _globals['_ROLEENTITY']._serialized_start=19025 + _globals['_ROLEENTITY']._serialized_end=19051 + _globals['_USERENTITY']._serialized_start=19053 + _globals['_USERENTITY']._serialized_end=19079 + _globals['_CREATEROLEREQUEST']._serialized_start=19082 + _globals['_CREATEROLEREQUEST']._serialized_end=19214 + _globals['_DROPROLEREQUEST']._serialized_start=19216 + _globals['_DROPROLEREQUEST']._serialized_end=19336 + _globals['_CREATEPRIVILEGEGROUPREQUEST']._serialized_start=19338 + _globals['_CREATEPRIVILEGEGROUPREQUEST']._serialized_end=19451 + _globals['_DROPPRIVILEGEGROUPREQUEST']._serialized_start=19453 + _globals['_DROPPRIVILEGEGROUPREQUEST']._serialized_end=19564 + _globals['_LISTPRIVILEGEGROUPSREQUEST']._serialized_start=19566 + _globals['_LISTPRIVILEGEGROUPSREQUEST']._serialized_end=19658 + _globals['_LISTPRIVILEGEGROUPSRESPONSE']._serialized_start=19661 + _globals['_LISTPRIVILEGEGROUPSRESPONSE']._serialized_end=19802 + _globals['_OPERATEPRIVILEGEGROUPREQUEST']._serialized_start=19805 + _globals['_OPERATEPRIVILEGEGROUPREQUEST']._serialized_end=20039 + _globals['_OPERATEUSERROLEREQUEST']._serialized_start=20042 + _globals['_OPERATEUSERROLEREQUEST']._serialized_end=20223 + _globals['_PRIVILEGEGROUPINFO']._serialized_start=20225 + _globals['_PRIVILEGEGROUPINFO']._serialized_end=20323 + _globals['_SELECTROLEREQUEST']._serialized_start=20326 + _globals['_SELECTROLEREQUEST']._serialized_end=20483 + _globals['_ROLERESULT']._serialized_start=20485 + _globals['_ROLERESULT']._serialized_end=20592 + _globals['_SELECTROLERESPONSE']._serialized_start=20594 + _globals['_SELECTROLERESPONSE']._serialized_end=20709 + _globals['_SELECTUSERREQUEST']._serialized_start=20712 + _globals['_SELECTUSERREQUEST']._serialized_end=20860 + _globals['_USERRESULT']._serialized_start=20862 + _globals['_USERRESULT']._serialized_end=20969 + _globals['_SELECTUSERRESPONSE']._serialized_start=20971 + _globals['_SELECTUSERRESPONSE']._serialized_end=21086 + _globals['_OBJECTENTITY']._serialized_start=21088 + _globals['_OBJECTENTITY']._serialized_end=21116 + _globals['_PRIVILEGEENTITY']._serialized_start=21118 + _globals['_PRIVILEGEENTITY']._serialized_end=21149 + _globals['_GRANTORENTITY']._serialized_start=21151 + _globals['_GRANTORENTITY']._serialized_end=21270 + _globals['_GRANTPRIVILEGEENTITY']._serialized_start=21272 + _globals['_GRANTPRIVILEGEENTITY']._serialized_end=21348 + _globals['_GRANTENTITY']._serialized_start=21351 + _globals['_GRANTENTITY']._serialized_end=21553 + _globals['_SELECTGRANTREQUEST']._serialized_start=21556 + _globals['_SELECTGRANTREQUEST']._serialized_end=21690 + _globals['_SELECTGRANTRESPONSE']._serialized_start=21692 + _globals['_SELECTGRANTRESPONSE']._serialized_end=21810 + _globals['_OPERATEPRIVILEGEREQUEST']._serialized_start=21813 + _globals['_OPERATEPRIVILEGEREQUEST']._serialized_end=22009 + _globals['_USERINFO']._serialized_start=22011 + _globals['_USERINFO']._serialized_end=22101 + _globals['_RBACMETA']._serialized_start=22104 + _globals['_RBACMETA']._serialized_end=22325 + _globals['_BACKUPRBACMETAREQUEST']._serialized_start=22327 + _globals['_BACKUPRBACMETAREQUEST']._serialized_end=22414 + _globals['_BACKUPRBACMETARESPONSE']._serialized_start=22416 + _globals['_BACKUPRBACMETARESPONSE']._serialized_end=22535 + _globals['_RESTORERBACMETAREQUEST']._serialized_start=22538 + _globals['_RESTORERBACMETAREQUEST']._serialized_end=22676 + _globals['_GETLOADINGPROGRESSREQUEST']._serialized_start=22679 + _globals['_GETLOADINGPROGRESSREQUEST']._serialized_end=22826 + _globals['_GETLOADINGPROGRESSRESPONSE']._serialized_start=22828 + _globals['_GETLOADINGPROGRESSRESPONSE']._serialized_end=22945 + _globals['_GETLOADSTATEREQUEST']._serialized_start=22948 + _globals['_GETLOADSTATEREQUEST']._serialized_end=23089 + _globals['_GETLOADSTATERESPONSE']._serialized_start=23091 + _globals['_GETLOADSTATERESPONSE']._serialized_end=23205 + _globals['_MILVUSEXT']._serialized_start=23207 + _globals['_MILVUSEXT']._serialized_end=23235 + _globals['_GETVERSIONREQUEST']._serialized_start=23237 + _globals['_GETVERSIONREQUEST']._serialized_end=23256 + _globals['_GETVERSIONRESPONSE']._serialized_start=23258 + _globals['_GETVERSIONRESPONSE']._serialized_end=23340 + _globals['_CHECKHEALTHREQUEST']._serialized_start=23342 + _globals['_CHECKHEALTHREQUEST']._serialized_end=23362 + _globals['_CHECKHEALTHRESPONSE']._serialized_start=23365 + _globals['_CHECKHEALTHRESPONSE']._serialized_end=23522 + _globals['_CREATERESOURCEGROUPREQUEST']._serialized_start=23525 + _globals['_CREATERESOURCEGROUPREQUEST']._serialized_end=23695 + _globals['_UPDATERESOURCEGROUPSREQUEST']._serialized_start=23698 + _globals['_UPDATERESOURCEGROUPSREQUEST']._serialized_end=23979 + _globals['_UPDATERESOURCEGROUPSREQUEST_RESOURCEGROUPSENTRY']._serialized_start=23868 + _globals['_UPDATERESOURCEGROUPSREQUEST_RESOURCEGROUPSENTRY']._serialized_end=23959 + _globals['_DROPRESOURCEGROUPREQUEST']._serialized_start=23981 + _globals['_DROPRESOURCEGROUPREQUEST']._serialized_end=24095 + _globals['_TRANSFERNODEREQUEST']._serialized_start=24098 + _globals['_TRANSFERNODEREQUEST']._serialized_end=24263 + _globals['_TRANSFERREPLICAREQUEST']._serialized_start=24266 + _globals['_TRANSFERREPLICAREQUEST']._serialized_end=24479 + _globals['_LISTRESOURCEGROUPSREQUEST']._serialized_start=24481 + _globals['_LISTRESOURCEGROUPSREQUEST']._serialized_end=24572 + _globals['_LISTRESOURCEGROUPSRESPONSE']._serialized_start=24574 + _globals['_LISTRESOURCEGROUPSRESPONSE']._serialized_end=24672 + _globals['_DESCRIBERESOURCEGROUPREQUEST']._serialized_start=24674 + _globals['_DESCRIBERESOURCEGROUPREQUEST']._serialized_end=24792 + _globals['_DESCRIBERESOURCEGROUPRESPONSE']._serialized_start=24795 + _globals['_DESCRIBERESOURCEGROUPRESPONSE']._serialized_end=24931 + _globals['_RESOURCEGROUP']._serialized_start=24934 + _globals['_RESOURCEGROUP']._serialized_end=25532 + _globals['_RESOURCEGROUP_NUMLOADEDREPLICAENTRY']._serialized_start=25365 + _globals['_RESOURCEGROUP_NUMLOADEDREPLICAENTRY']._serialized_end=25420 + _globals['_RESOURCEGROUP_NUMOUTGOINGNODEENTRY']._serialized_start=25422 + _globals['_RESOURCEGROUP_NUMOUTGOINGNODEENTRY']._serialized_end=25476 + _globals['_RESOURCEGROUP_NUMINCOMINGNODEENTRY']._serialized_start=25478 + _globals['_RESOURCEGROUP_NUMINCOMINGNODEENTRY']._serialized_end=25532 + _globals['_RENAMECOLLECTIONREQUEST']._serialized_start=25535 + _globals['_RENAMECOLLECTIONREQUEST']._serialized_end=25694 + _globals['_GETINDEXSTATISTICSREQUEST']._serialized_start=25697 + _globals['_GETINDEXSTATISTICSREQUEST']._serialized_end=25858 + _globals['_GETINDEXSTATISTICSRESPONSE']._serialized_start=25861 + _globals['_GETINDEXSTATISTICSRESPONSE']._serialized_end=26001 + _globals['_CONNECTREQUEST']._serialized_start=26003 + _globals['_CONNECTREQUEST']._serialized_end=26117 + _globals['_CONNECTRESPONSE']._serialized_start=26120 + _globals['_CONNECTRESPONSE']._serialized_end=26256 + _globals['_ALLOCTIMESTAMPREQUEST']._serialized_start=26258 + _globals['_ALLOCTIMESTAMPREQUEST']._serialized_end=26325 + _globals['_ALLOCTIMESTAMPRESPONSE']._serialized_start=26327 + _globals['_ALLOCTIMESTAMPRESPONSE']._serialized_end=26415 + _globals['_CREATEDATABASEREQUEST']._serialized_start=26418 + _globals['_CREATEDATABASEREQUEST']._serialized_end=26577 + _globals['_DROPDATABASEREQUEST']._serialized_start=26579 + _globals['_DROPDATABASEREQUEST']._serialized_end=26681 + _globals['_LISTDATABASESREQUEST']._serialized_start=26683 + _globals['_LISTDATABASESREQUEST']._serialized_end=26749 + _globals['_LISTDATABASESRESPONSE']._serialized_start=26752 + _globals['_LISTDATABASESRESPONSE']._serialized_end=26881 + _globals['_ALTERDATABASEREQUEST']._serialized_start=26884 + _globals['_ALTERDATABASEREQUEST']._serialized_end=27057 + _globals['_DESCRIBEDATABASEREQUEST']._serialized_start=27059 + _globals['_DESCRIBEDATABASEREQUEST']._serialized_end=27165 + _globals['_DESCRIBEDATABASERESPONSE']._serialized_start=27168 + _globals['_DESCRIBEDATABASERESPONSE']._serialized_end=27352 + _globals['_REPLICATEMESSAGEREQUEST']._serialized_start=27355 + _globals['_REPLICATEMESSAGEREQUEST']._serialized_end=27600 + _globals['_REPLICATEMESSAGERESPONSE']._serialized_start=27602 + _globals['_REPLICATEMESSAGERESPONSE']._serialized_end=27691 + _globals['_IMPORTAUTHPLACEHOLDER']._serialized_start=27693 + _globals['_IMPORTAUTHPLACEHOLDER']._serialized_end=27791 + _globals['_GETIMPORTPROGRESSAUTHPLACEHOLDER']._serialized_start=27793 + _globals['_GETIMPORTPROGRESSAUTHPLACEHOLDER']._serialized_end=27853 + _globals['_LISTIMPORTSAUTHPLACEHOLDER']._serialized_start=27855 + _globals['_LISTIMPORTSAUTHPLACEHOLDER']._serialized_end=27934 + _globals['_MILVUSSERVICE']._serialized_start=28270 + _globals['_MILVUSSERVICE']._serialized_end=37770 + _globals['_PROXYSERVICE']._serialized_start=37772 + _globals['_PROXYSERVICE']._serialized_end=37889 # @@protoc_insertion_point(module_scope) diff --git a/pymilvus/grpc_gen/milvus_pb2.pyi b/pymilvus/grpc_gen/milvus_pb2.pyi index 13fd35e1a..629a62338 100644 --- a/pymilvus/grpc_gen/milvus_pb2.pyi +++ b/pymilvus/grpc_gen/milvus_pb2.pyi @@ -17,6 +17,11 @@ class ShowType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): All: _ClassVar[ShowType] InMemory: _ClassVar[ShowType] +class OperatePrivilegeGroupType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + AddPrivilegesToGroup: _ClassVar[OperatePrivilegeGroupType] + RemovePrivilegesFromGroup: _ClassVar[OperatePrivilegeGroupType] + class OperateUserRoleType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): __slots__ = () AddUserToRole: _ClassVar[OperateUserRoleType] @@ -36,6 +41,8 @@ class QuotaState(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): DenyToWrite: _ClassVar[QuotaState] All: ShowType InMemory: ShowType +AddPrivilegesToGroup: OperatePrivilegeGroupType +RemovePrivilegesFromGroup: OperatePrivilegeGroupType AddUserToRole: OperateUserRoleType RemoveUserFromRole: OperateUserRoleType Grant: OperatePrivilegeType @@ -157,18 +164,34 @@ class DropCollectionRequest(_message.Message): def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., db_name: _Optional[str] = ..., collection_name: _Optional[str] = ...) -> None: ... class AlterCollectionRequest(_message.Message): - __slots__ = ("base", "db_name", "collection_name", "collectionID", "properties") + __slots__ = ("base", "db_name", "collection_name", "collectionID", "properties", "delete_keys") BASE_FIELD_NUMBER: _ClassVar[int] DB_NAME_FIELD_NUMBER: _ClassVar[int] COLLECTION_NAME_FIELD_NUMBER: _ClassVar[int] COLLECTIONID_FIELD_NUMBER: _ClassVar[int] PROPERTIES_FIELD_NUMBER: _ClassVar[int] + DELETE_KEYS_FIELD_NUMBER: _ClassVar[int] base: _common_pb2.MsgBase db_name: str collection_name: str collectionID: int properties: _containers.RepeatedCompositeFieldContainer[_common_pb2.KeyValuePair] - def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., db_name: _Optional[str] = ..., collection_name: _Optional[str] = ..., collectionID: _Optional[int] = ..., properties: _Optional[_Iterable[_Union[_common_pb2.KeyValuePair, _Mapping]]] = ...) -> None: ... + delete_keys: _containers.RepeatedScalarFieldContainer[str] + def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., db_name: _Optional[str] = ..., collection_name: _Optional[str] = ..., collectionID: _Optional[int] = ..., properties: _Optional[_Iterable[_Union[_common_pb2.KeyValuePair, _Mapping]]] = ..., delete_keys: _Optional[_Iterable[str]] = ...) -> None: ... + +class AlterCollectionFieldRequest(_message.Message): + __slots__ = ("base", "db_name", "collection_name", "field_name", "properties") + BASE_FIELD_NUMBER: _ClassVar[int] + DB_NAME_FIELD_NUMBER: _ClassVar[int] + COLLECTION_NAME_FIELD_NUMBER: _ClassVar[int] + FIELD_NAME_FIELD_NUMBER: _ClassVar[int] + PROPERTIES_FIELD_NUMBER: _ClassVar[int] + base: _common_pb2.MsgBase + db_name: str + collection_name: str + field_name: str + properties: _containers.RepeatedCompositeFieldContainer[_common_pb2.KeyValuePair] + def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., db_name: _Optional[str] = ..., collection_name: _Optional[str] = ..., field_name: _Optional[str] = ..., properties: _Optional[_Iterable[_Union[_common_pb2.KeyValuePair, _Mapping]]] = ...) -> None: ... class HasCollectionRequest(_message.Message): __slots__ = ("base", "db_name", "collection_name", "time_stamp") @@ -249,20 +272,24 @@ class DescribeCollectionResponse(_message.Message): def __init__(self, status: _Optional[_Union[_common_pb2.Status, _Mapping]] = ..., schema: _Optional[_Union[_schema_pb2.CollectionSchema, _Mapping]] = ..., collectionID: _Optional[int] = ..., virtual_channel_names: _Optional[_Iterable[str]] = ..., physical_channel_names: _Optional[_Iterable[str]] = ..., created_timestamp: _Optional[int] = ..., created_utc_timestamp: _Optional[int] = ..., shards_num: _Optional[int] = ..., aliases: _Optional[_Iterable[str]] = ..., start_positions: _Optional[_Iterable[_Union[_common_pb2.KeyDataPair, _Mapping]]] = ..., consistency_level: _Optional[_Union[_common_pb2.ConsistencyLevel, str]] = ..., collection_name: _Optional[str] = ..., properties: _Optional[_Iterable[_Union[_common_pb2.KeyValuePair, _Mapping]]] = ..., db_name: _Optional[str] = ..., num_partitions: _Optional[int] = ..., db_id: _Optional[int] = ...) -> None: ... class LoadCollectionRequest(_message.Message): - __slots__ = ("base", "db_name", "collection_name", "replica_number", "resource_groups", "refresh") + __slots__ = ("base", "db_name", "collection_name", "replica_number", "resource_groups", "refresh", "load_fields", "skip_load_dynamic_field") BASE_FIELD_NUMBER: _ClassVar[int] DB_NAME_FIELD_NUMBER: _ClassVar[int] COLLECTION_NAME_FIELD_NUMBER: _ClassVar[int] REPLICA_NUMBER_FIELD_NUMBER: _ClassVar[int] RESOURCE_GROUPS_FIELD_NUMBER: _ClassVar[int] REFRESH_FIELD_NUMBER: _ClassVar[int] + LOAD_FIELDS_FIELD_NUMBER: _ClassVar[int] + SKIP_LOAD_DYNAMIC_FIELD_FIELD_NUMBER: _ClassVar[int] base: _common_pb2.MsgBase db_name: str collection_name: str replica_number: int resource_groups: _containers.RepeatedScalarFieldContainer[str] refresh: bool - def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., db_name: _Optional[str] = ..., collection_name: _Optional[str] = ..., replica_number: _Optional[int] = ..., resource_groups: _Optional[_Iterable[str]] = ..., refresh: bool = ...) -> None: ... + load_fields: _containers.RepeatedScalarFieldContainer[str] + skip_load_dynamic_field: bool + def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., db_name: _Optional[str] = ..., collection_name: _Optional[str] = ..., replica_number: _Optional[int] = ..., resource_groups: _Optional[_Iterable[str]] = ..., refresh: bool = ..., load_fields: _Optional[_Iterable[str]] = ..., skip_load_dynamic_field: bool = ...) -> None: ... class ReleaseCollectionRequest(_message.Message): __slots__ = ("base", "db_name", "collection_name") @@ -383,7 +410,7 @@ class HasPartitionRequest(_message.Message): def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., db_name: _Optional[str] = ..., collection_name: _Optional[str] = ..., partition_name: _Optional[str] = ...) -> None: ... class LoadPartitionsRequest(_message.Message): - __slots__ = ("base", "db_name", "collection_name", "partition_names", "replica_number", "resource_groups", "refresh") + __slots__ = ("base", "db_name", "collection_name", "partition_names", "replica_number", "resource_groups", "refresh", "load_fields", "skip_load_dynamic_field") BASE_FIELD_NUMBER: _ClassVar[int] DB_NAME_FIELD_NUMBER: _ClassVar[int] COLLECTION_NAME_FIELD_NUMBER: _ClassVar[int] @@ -391,6 +418,8 @@ class LoadPartitionsRequest(_message.Message): REPLICA_NUMBER_FIELD_NUMBER: _ClassVar[int] RESOURCE_GROUPS_FIELD_NUMBER: _ClassVar[int] REFRESH_FIELD_NUMBER: _ClassVar[int] + LOAD_FIELDS_FIELD_NUMBER: _ClassVar[int] + SKIP_LOAD_DYNAMIC_FIELD_FIELD_NUMBER: _ClassVar[int] base: _common_pb2.MsgBase db_name: str collection_name: str @@ -398,7 +427,9 @@ class LoadPartitionsRequest(_message.Message): replica_number: int resource_groups: _containers.RepeatedScalarFieldContainer[str] refresh: bool - def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., db_name: _Optional[str] = ..., collection_name: _Optional[str] = ..., partition_names: _Optional[_Iterable[str]] = ..., replica_number: _Optional[int] = ..., resource_groups: _Optional[_Iterable[str]] = ..., refresh: bool = ...) -> None: ... + load_fields: _containers.RepeatedScalarFieldContainer[str] + skip_load_dynamic_field: bool + def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., db_name: _Optional[str] = ..., collection_name: _Optional[str] = ..., partition_names: _Optional[_Iterable[str]] = ..., replica_number: _Optional[int] = ..., resource_groups: _Optional[_Iterable[str]] = ..., refresh: bool = ..., load_fields: _Optional[_Iterable[str]] = ..., skip_load_dynamic_field: bool = ...) -> None: ... class ReleasePartitionsRequest(_message.Message): __slots__ = ("base", "db_name", "collection_name", "partition_names") @@ -523,18 +554,20 @@ class CreateIndexRequest(_message.Message): def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., db_name: _Optional[str] = ..., collection_name: _Optional[str] = ..., field_name: _Optional[str] = ..., extra_params: _Optional[_Iterable[_Union[_common_pb2.KeyValuePair, _Mapping]]] = ..., index_name: _Optional[str] = ...) -> None: ... class AlterIndexRequest(_message.Message): - __slots__ = ("base", "db_name", "collection_name", "index_name", "extra_params") + __slots__ = ("base", "db_name", "collection_name", "index_name", "extra_params", "delete_keys") BASE_FIELD_NUMBER: _ClassVar[int] DB_NAME_FIELD_NUMBER: _ClassVar[int] COLLECTION_NAME_FIELD_NUMBER: _ClassVar[int] INDEX_NAME_FIELD_NUMBER: _ClassVar[int] EXTRA_PARAMS_FIELD_NUMBER: _ClassVar[int] + DELETE_KEYS_FIELD_NUMBER: _ClassVar[int] base: _common_pb2.MsgBase db_name: str collection_name: str index_name: str extra_params: _containers.RepeatedCompositeFieldContainer[_common_pb2.KeyValuePair] - def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., db_name: _Optional[str] = ..., collection_name: _Optional[str] = ..., index_name: _Optional[str] = ..., extra_params: _Optional[_Iterable[_Union[_common_pb2.KeyValuePair, _Mapping]]] = ...) -> None: ... + delete_keys: _containers.RepeatedScalarFieldContainer[str] + def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., db_name: _Optional[str] = ..., collection_name: _Optional[str] = ..., index_name: _Optional[str] = ..., extra_params: _Optional[_Iterable[_Union[_common_pb2.KeyValuePair, _Mapping]]] = ..., delete_keys: _Optional[_Iterable[str]] = ...) -> None: ... class DescribeIndexRequest(_message.Message): __slots__ = ("base", "db_name", "collection_name", "field_name", "index_name", "timestamp") @@ -703,7 +736,14 @@ class MutationResult(_message.Message): def __init__(self, status: _Optional[_Union[_common_pb2.Status, _Mapping]] = ..., IDs: _Optional[_Union[_schema_pb2.IDs, _Mapping]] = ..., succ_index: _Optional[_Iterable[int]] = ..., err_index: _Optional[_Iterable[int]] = ..., acknowledged: bool = ..., insert_cnt: _Optional[int] = ..., delete_cnt: _Optional[int] = ..., upsert_cnt: _Optional[int] = ..., timestamp: _Optional[int] = ...) -> None: ... class DeleteRequest(_message.Message): - __slots__ = ("base", "db_name", "collection_name", "partition_name", "expr", "hash_keys", "consistency_level") + __slots__ = ("base", "db_name", "collection_name", "partition_name", "expr", "hash_keys", "consistency_level", "expr_template_values") + class ExprTemplateValuesEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: _schema_pb2.TemplateValue + def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[_schema_pb2.TemplateValue, _Mapping]] = ...) -> None: ... BASE_FIELD_NUMBER: _ClassVar[int] DB_NAME_FIELD_NUMBER: _ClassVar[int] COLLECTION_NAME_FIELD_NUMBER: _ClassVar[int] @@ -711,6 +751,7 @@ class DeleteRequest(_message.Message): EXPR_FIELD_NUMBER: _ClassVar[int] HASH_KEYS_FIELD_NUMBER: _ClassVar[int] CONSISTENCY_LEVEL_FIELD_NUMBER: _ClassVar[int] + EXPR_TEMPLATE_VALUES_FIELD_NUMBER: _ClassVar[int] base: _common_pb2.MsgBase db_name: str collection_name: str @@ -718,24 +759,41 @@ class DeleteRequest(_message.Message): expr: str hash_keys: _containers.RepeatedScalarFieldContainer[int] consistency_level: _common_pb2.ConsistencyLevel - def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., db_name: _Optional[str] = ..., collection_name: _Optional[str] = ..., partition_name: _Optional[str] = ..., expr: _Optional[str] = ..., hash_keys: _Optional[_Iterable[int]] = ..., consistency_level: _Optional[_Union[_common_pb2.ConsistencyLevel, str]] = ...) -> None: ... + expr_template_values: _containers.MessageMap[str, _schema_pb2.TemplateValue] + def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., db_name: _Optional[str] = ..., collection_name: _Optional[str] = ..., partition_name: _Optional[str] = ..., expr: _Optional[str] = ..., hash_keys: _Optional[_Iterable[int]] = ..., consistency_level: _Optional[_Union[_common_pb2.ConsistencyLevel, str]] = ..., expr_template_values: _Optional[_Mapping[str, _schema_pb2.TemplateValue]] = ...) -> None: ... class SubSearchRequest(_message.Message): - __slots__ = ("dsl", "placeholder_group", "dsl_type", "search_params", "nq") + __slots__ = ("dsl", "placeholder_group", "dsl_type", "search_params", "nq", "expr_template_values") + class ExprTemplateValuesEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: _schema_pb2.TemplateValue + def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[_schema_pb2.TemplateValue, _Mapping]] = ...) -> None: ... DSL_FIELD_NUMBER: _ClassVar[int] PLACEHOLDER_GROUP_FIELD_NUMBER: _ClassVar[int] DSL_TYPE_FIELD_NUMBER: _ClassVar[int] SEARCH_PARAMS_FIELD_NUMBER: _ClassVar[int] NQ_FIELD_NUMBER: _ClassVar[int] + EXPR_TEMPLATE_VALUES_FIELD_NUMBER: _ClassVar[int] dsl: str placeholder_group: bytes dsl_type: _common_pb2.DslType search_params: _containers.RepeatedCompositeFieldContainer[_common_pb2.KeyValuePair] nq: int - def __init__(self, dsl: _Optional[str] = ..., placeholder_group: _Optional[bytes] = ..., dsl_type: _Optional[_Union[_common_pb2.DslType, str]] = ..., search_params: _Optional[_Iterable[_Union[_common_pb2.KeyValuePair, _Mapping]]] = ..., nq: _Optional[int] = ...) -> None: ... + expr_template_values: _containers.MessageMap[str, _schema_pb2.TemplateValue] + def __init__(self, dsl: _Optional[str] = ..., placeholder_group: _Optional[bytes] = ..., dsl_type: _Optional[_Union[_common_pb2.DslType, str]] = ..., search_params: _Optional[_Iterable[_Union[_common_pb2.KeyValuePair, _Mapping]]] = ..., nq: _Optional[int] = ..., expr_template_values: _Optional[_Mapping[str, _schema_pb2.TemplateValue]] = ...) -> None: ... class SearchRequest(_message.Message): - __slots__ = ("base", "db_name", "collection_name", "partition_names", "dsl", "placeholder_group", "dsl_type", "output_fields", "search_params", "travel_timestamp", "guarantee_timestamp", "nq", "not_return_all_meta", "consistency_level", "use_default_consistency", "search_by_primary_keys", "sub_reqs") + __slots__ = ("base", "db_name", "collection_name", "partition_names", "dsl", "placeholder_group", "dsl_type", "output_fields", "search_params", "travel_timestamp", "guarantee_timestamp", "nq", "not_return_all_meta", "consistency_level", "use_default_consistency", "search_by_primary_keys", "sub_reqs", "expr_template_values") + class ExprTemplateValuesEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: _schema_pb2.TemplateValue + def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[_schema_pb2.TemplateValue, _Mapping]] = ...) -> None: ... BASE_FIELD_NUMBER: _ClassVar[int] DB_NAME_FIELD_NUMBER: _ClassVar[int] COLLECTION_NAME_FIELD_NUMBER: _ClassVar[int] @@ -753,6 +811,7 @@ class SearchRequest(_message.Message): USE_DEFAULT_CONSISTENCY_FIELD_NUMBER: _ClassVar[int] SEARCH_BY_PRIMARY_KEYS_FIELD_NUMBER: _ClassVar[int] SUB_REQS_FIELD_NUMBER: _ClassVar[int] + EXPR_TEMPLATE_VALUES_FIELD_NUMBER: _ClassVar[int] base: _common_pb2.MsgBase db_name: str collection_name: str @@ -770,7 +829,8 @@ class SearchRequest(_message.Message): use_default_consistency: bool search_by_primary_keys: bool sub_reqs: _containers.RepeatedCompositeFieldContainer[SubSearchRequest] - def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., db_name: _Optional[str] = ..., collection_name: _Optional[str] = ..., partition_names: _Optional[_Iterable[str]] = ..., dsl: _Optional[str] = ..., placeholder_group: _Optional[bytes] = ..., dsl_type: _Optional[_Union[_common_pb2.DslType, str]] = ..., output_fields: _Optional[_Iterable[str]] = ..., search_params: _Optional[_Iterable[_Union[_common_pb2.KeyValuePair, _Mapping]]] = ..., travel_timestamp: _Optional[int] = ..., guarantee_timestamp: _Optional[int] = ..., nq: _Optional[int] = ..., not_return_all_meta: bool = ..., consistency_level: _Optional[_Union[_common_pb2.ConsistencyLevel, str]] = ..., use_default_consistency: bool = ..., search_by_primary_keys: bool = ..., sub_reqs: _Optional[_Iterable[_Union[SubSearchRequest, _Mapping]]] = ...) -> None: ... + expr_template_values: _containers.MessageMap[str, _schema_pb2.TemplateValue] + def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., db_name: _Optional[str] = ..., collection_name: _Optional[str] = ..., partition_names: _Optional[_Iterable[str]] = ..., dsl: _Optional[str] = ..., placeholder_group: _Optional[bytes] = ..., dsl_type: _Optional[_Union[_common_pb2.DslType, str]] = ..., output_fields: _Optional[_Iterable[str]] = ..., search_params: _Optional[_Iterable[_Union[_common_pb2.KeyValuePair, _Mapping]]] = ..., travel_timestamp: _Optional[int] = ..., guarantee_timestamp: _Optional[int] = ..., nq: _Optional[int] = ..., not_return_all_meta: bool = ..., consistency_level: _Optional[_Union[_common_pb2.ConsistencyLevel, str]] = ..., use_default_consistency: bool = ..., search_by_primary_keys: bool = ..., sub_reqs: _Optional[_Iterable[_Union[SubSearchRequest, _Mapping]]] = ..., expr_template_values: _Optional[_Mapping[str, _schema_pb2.TemplateValue]] = ...) -> None: ... class Hits(_message.Message): __slots__ = ("IDs", "row_data", "scores") @@ -783,14 +843,16 @@ class Hits(_message.Message): def __init__(self, IDs: _Optional[_Iterable[int]] = ..., row_data: _Optional[_Iterable[bytes]] = ..., scores: _Optional[_Iterable[float]] = ...) -> None: ... class SearchResults(_message.Message): - __slots__ = ("status", "results", "collection_name") + __slots__ = ("status", "results", "collection_name", "session_ts") STATUS_FIELD_NUMBER: _ClassVar[int] RESULTS_FIELD_NUMBER: _ClassVar[int] COLLECTION_NAME_FIELD_NUMBER: _ClassVar[int] + SESSION_TS_FIELD_NUMBER: _ClassVar[int] status: _common_pb2.Status results: _schema_pb2.SearchResultData collection_name: str - def __init__(self, status: _Optional[_Union[_common_pb2.Status, _Mapping]] = ..., results: _Optional[_Union[_schema_pb2.SearchResultData, _Mapping]] = ..., collection_name: _Optional[str] = ...) -> None: ... + session_ts: int + def __init__(self, status: _Optional[_Union[_common_pb2.Status, _Mapping]] = ..., results: _Optional[_Union[_schema_pb2.SearchResultData, _Mapping]] = ..., collection_name: _Optional[str] = ..., session_ts: _Optional[int] = ...) -> None: ... class HybridSearchRequest(_message.Message): __slots__ = ("base", "db_name", "collection_name", "partition_names", "requests", "rank_params", "travel_timestamp", "guarantee_timestamp", "not_return_all_meta", "output_fields", "consistency_level", "use_default_consistency") @@ -831,7 +893,7 @@ class FlushRequest(_message.Message): def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., db_name: _Optional[str] = ..., collection_names: _Optional[_Iterable[str]] = ...) -> None: ... class FlushResponse(_message.Message): - __slots__ = ("status", "db_name", "coll_segIDs", "flush_coll_segIDs", "coll_seal_times", "coll_flush_ts") + __slots__ = ("status", "db_name", "coll_segIDs", "flush_coll_segIDs", "coll_seal_times", "coll_flush_ts", "channel_cps") class CollSegIDsEntry(_message.Message): __slots__ = ("key", "value") KEY_FIELD_NUMBER: _ClassVar[int] @@ -860,22 +922,38 @@ class FlushResponse(_message.Message): key: str value: int def __init__(self, key: _Optional[str] = ..., value: _Optional[int] = ...) -> None: ... + class ChannelCpsEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: _msg_pb2.MsgPosition + def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[_msg_pb2.MsgPosition, _Mapping]] = ...) -> None: ... STATUS_FIELD_NUMBER: _ClassVar[int] DB_NAME_FIELD_NUMBER: _ClassVar[int] COLL_SEGIDS_FIELD_NUMBER: _ClassVar[int] FLUSH_COLL_SEGIDS_FIELD_NUMBER: _ClassVar[int] COLL_SEAL_TIMES_FIELD_NUMBER: _ClassVar[int] COLL_FLUSH_TS_FIELD_NUMBER: _ClassVar[int] + CHANNEL_CPS_FIELD_NUMBER: _ClassVar[int] status: _common_pb2.Status db_name: str coll_segIDs: _containers.MessageMap[str, _schema_pb2.LongArray] flush_coll_segIDs: _containers.MessageMap[str, _schema_pb2.LongArray] coll_seal_times: _containers.ScalarMap[str, int] coll_flush_ts: _containers.ScalarMap[str, int] - def __init__(self, status: _Optional[_Union[_common_pb2.Status, _Mapping]] = ..., db_name: _Optional[str] = ..., coll_segIDs: _Optional[_Mapping[str, _schema_pb2.LongArray]] = ..., flush_coll_segIDs: _Optional[_Mapping[str, _schema_pb2.LongArray]] = ..., coll_seal_times: _Optional[_Mapping[str, int]] = ..., coll_flush_ts: _Optional[_Mapping[str, int]] = ...) -> None: ... + channel_cps: _containers.MessageMap[str, _msg_pb2.MsgPosition] + def __init__(self, status: _Optional[_Union[_common_pb2.Status, _Mapping]] = ..., db_name: _Optional[str] = ..., coll_segIDs: _Optional[_Mapping[str, _schema_pb2.LongArray]] = ..., flush_coll_segIDs: _Optional[_Mapping[str, _schema_pb2.LongArray]] = ..., coll_seal_times: _Optional[_Mapping[str, int]] = ..., coll_flush_ts: _Optional[_Mapping[str, int]] = ..., channel_cps: _Optional[_Mapping[str, _msg_pb2.MsgPosition]] = ...) -> None: ... class QueryRequest(_message.Message): - __slots__ = ("base", "db_name", "collection_name", "expr", "output_fields", "partition_names", "travel_timestamp", "guarantee_timestamp", "query_params", "not_return_all_meta", "consistency_level", "use_default_consistency") + __slots__ = ("base", "db_name", "collection_name", "expr", "output_fields", "partition_names", "travel_timestamp", "guarantee_timestamp", "query_params", "not_return_all_meta", "consistency_level", "use_default_consistency", "expr_template_values") + class ExprTemplateValuesEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: _schema_pb2.TemplateValue + def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[_schema_pb2.TemplateValue, _Mapping]] = ...) -> None: ... BASE_FIELD_NUMBER: _ClassVar[int] DB_NAME_FIELD_NUMBER: _ClassVar[int] COLLECTION_NAME_FIELD_NUMBER: _ClassVar[int] @@ -888,6 +966,7 @@ class QueryRequest(_message.Message): NOT_RETURN_ALL_META_FIELD_NUMBER: _ClassVar[int] CONSISTENCY_LEVEL_FIELD_NUMBER: _ClassVar[int] USE_DEFAULT_CONSISTENCY_FIELD_NUMBER: _ClassVar[int] + EXPR_TEMPLATE_VALUES_FIELD_NUMBER: _ClassVar[int] base: _common_pb2.MsgBase db_name: str collection_name: str @@ -900,19 +979,22 @@ class QueryRequest(_message.Message): not_return_all_meta: bool consistency_level: _common_pb2.ConsistencyLevel use_default_consistency: bool - def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., db_name: _Optional[str] = ..., collection_name: _Optional[str] = ..., expr: _Optional[str] = ..., output_fields: _Optional[_Iterable[str]] = ..., partition_names: _Optional[_Iterable[str]] = ..., travel_timestamp: _Optional[int] = ..., guarantee_timestamp: _Optional[int] = ..., query_params: _Optional[_Iterable[_Union[_common_pb2.KeyValuePair, _Mapping]]] = ..., not_return_all_meta: bool = ..., consistency_level: _Optional[_Union[_common_pb2.ConsistencyLevel, str]] = ..., use_default_consistency: bool = ...) -> None: ... + expr_template_values: _containers.MessageMap[str, _schema_pb2.TemplateValue] + def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., db_name: _Optional[str] = ..., collection_name: _Optional[str] = ..., expr: _Optional[str] = ..., output_fields: _Optional[_Iterable[str]] = ..., partition_names: _Optional[_Iterable[str]] = ..., travel_timestamp: _Optional[int] = ..., guarantee_timestamp: _Optional[int] = ..., query_params: _Optional[_Iterable[_Union[_common_pb2.KeyValuePair, _Mapping]]] = ..., not_return_all_meta: bool = ..., consistency_level: _Optional[_Union[_common_pb2.ConsistencyLevel, str]] = ..., use_default_consistency: bool = ..., expr_template_values: _Optional[_Mapping[str, _schema_pb2.TemplateValue]] = ...) -> None: ... class QueryResults(_message.Message): - __slots__ = ("status", "fields_data", "collection_name", "output_fields") + __slots__ = ("status", "fields_data", "collection_name", "output_fields", "session_ts") STATUS_FIELD_NUMBER: _ClassVar[int] FIELDS_DATA_FIELD_NUMBER: _ClassVar[int] COLLECTION_NAME_FIELD_NUMBER: _ClassVar[int] OUTPUT_FIELDS_FIELD_NUMBER: _ClassVar[int] + SESSION_TS_FIELD_NUMBER: _ClassVar[int] status: _common_pb2.Status fields_data: _containers.RepeatedCompositeFieldContainer[_schema_pb2.FieldData] collection_name: str output_fields: _containers.RepeatedScalarFieldContainer[str] - def __init__(self, status: _Optional[_Union[_common_pb2.Status, _Mapping]] = ..., fields_data: _Optional[_Iterable[_Union[_schema_pb2.FieldData, _Mapping]]] = ..., collection_name: _Optional[str] = ..., output_fields: _Optional[_Iterable[str]] = ...) -> None: ... + session_ts: int + def __init__(self, status: _Optional[_Union[_common_pb2.Status, _Mapping]] = ..., fields_data: _Optional[_Iterable[_Union[_schema_pb2.FieldData, _Mapping]]] = ..., collection_name: _Optional[str] = ..., output_fields: _Optional[_Iterable[str]] = ..., session_ts: _Optional[int] = ...) -> None: ... class VectorIDs(_message.Message): __slots__ = ("collection_name", "field_name", "id_array", "partition_names") @@ -973,18 +1055,22 @@ class FlushAllResponse(_message.Message): def __init__(self, status: _Optional[_Union[_common_pb2.Status, _Mapping]] = ..., flush_all_ts: _Optional[int] = ...) -> None: ... class PersistentSegmentInfo(_message.Message): - __slots__ = ("segmentID", "collectionID", "partitionID", "num_rows", "state") + __slots__ = ("segmentID", "collectionID", "partitionID", "num_rows", "state", "level", "is_sorted") SEGMENTID_FIELD_NUMBER: _ClassVar[int] COLLECTIONID_FIELD_NUMBER: _ClassVar[int] PARTITIONID_FIELD_NUMBER: _ClassVar[int] NUM_ROWS_FIELD_NUMBER: _ClassVar[int] STATE_FIELD_NUMBER: _ClassVar[int] + LEVEL_FIELD_NUMBER: _ClassVar[int] + IS_SORTED_FIELD_NUMBER: _ClassVar[int] segmentID: int collectionID: int partitionID: int num_rows: int state: _common_pb2.SegmentState - def __init__(self, segmentID: _Optional[int] = ..., collectionID: _Optional[int] = ..., partitionID: _Optional[int] = ..., num_rows: _Optional[int] = ..., state: _Optional[_Union[_common_pb2.SegmentState, str]] = ...) -> None: ... + level: _common_pb2.SegmentLevel + is_sorted: bool + def __init__(self, segmentID: _Optional[int] = ..., collectionID: _Optional[int] = ..., partitionID: _Optional[int] = ..., num_rows: _Optional[int] = ..., state: _Optional[_Union[_common_pb2.SegmentState, str]] = ..., level: _Optional[_Union[_common_pb2.SegmentLevel, str]] = ..., is_sorted: bool = ...) -> None: ... class GetPersistentSegmentInfoRequest(_message.Message): __slots__ = ("base", "dbName", "collectionName") @@ -1005,7 +1091,7 @@ class GetPersistentSegmentInfoResponse(_message.Message): def __init__(self, status: _Optional[_Union[_common_pb2.Status, _Mapping]] = ..., infos: _Optional[_Iterable[_Union[PersistentSegmentInfo, _Mapping]]] = ...) -> None: ... class QuerySegmentInfo(_message.Message): - __slots__ = ("segmentID", "collectionID", "partitionID", "mem_size", "num_rows", "index_name", "indexID", "nodeID", "state", "nodeIds") + __slots__ = ("segmentID", "collectionID", "partitionID", "mem_size", "num_rows", "index_name", "indexID", "nodeID", "state", "nodeIds", "level", "is_sorted") SEGMENTID_FIELD_NUMBER: _ClassVar[int] COLLECTIONID_FIELD_NUMBER: _ClassVar[int] PARTITIONID_FIELD_NUMBER: _ClassVar[int] @@ -1016,6 +1102,8 @@ class QuerySegmentInfo(_message.Message): NODEID_FIELD_NUMBER: _ClassVar[int] STATE_FIELD_NUMBER: _ClassVar[int] NODEIDS_FIELD_NUMBER: _ClassVar[int] + LEVEL_FIELD_NUMBER: _ClassVar[int] + IS_SORTED_FIELD_NUMBER: _ClassVar[int] segmentID: int collectionID: int partitionID: int @@ -1026,7 +1114,9 @@ class QuerySegmentInfo(_message.Message): nodeID: int state: _common_pb2.SegmentState nodeIds: _containers.RepeatedScalarFieldContainer[int] - def __init__(self, segmentID: _Optional[int] = ..., collectionID: _Optional[int] = ..., partitionID: _Optional[int] = ..., mem_size: _Optional[int] = ..., num_rows: _Optional[int] = ..., index_name: _Optional[str] = ..., indexID: _Optional[int] = ..., nodeID: _Optional[int] = ..., state: _Optional[_Union[_common_pb2.SegmentState, str]] = ..., nodeIds: _Optional[_Iterable[int]] = ...) -> None: ... + level: _common_pb2.SegmentLevel + is_sorted: bool + def __init__(self, segmentID: _Optional[int] = ..., collectionID: _Optional[int] = ..., partitionID: _Optional[int] = ..., mem_size: _Optional[int] = ..., num_rows: _Optional[int] = ..., index_name: _Optional[str] = ..., indexID: _Optional[int] = ..., nodeID: _Optional[int] = ..., state: _Optional[_Union[_common_pb2.SegmentState, str]] = ..., nodeIds: _Optional[_Iterable[int]] = ..., level: _Optional[_Union[_common_pb2.SegmentLevel, str]] = ..., is_sorted: bool = ...) -> None: ... class GetQuerySegmentInfoRequest(_message.Message): __slots__ = ("base", "dbName", "collectionName") @@ -1440,12 +1530,56 @@ class CreateRoleRequest(_message.Message): def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., entity: _Optional[_Union[RoleEntity, _Mapping]] = ...) -> None: ... class DropRoleRequest(_message.Message): - __slots__ = ("base", "role_name") + __slots__ = ("base", "role_name", "force_drop") BASE_FIELD_NUMBER: _ClassVar[int] ROLE_NAME_FIELD_NUMBER: _ClassVar[int] + FORCE_DROP_FIELD_NUMBER: _ClassVar[int] base: _common_pb2.MsgBase role_name: str - def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., role_name: _Optional[str] = ...) -> None: ... + force_drop: bool + def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., role_name: _Optional[str] = ..., force_drop: bool = ...) -> None: ... + +class CreatePrivilegeGroupRequest(_message.Message): + __slots__ = ("base", "group_name") + BASE_FIELD_NUMBER: _ClassVar[int] + GROUP_NAME_FIELD_NUMBER: _ClassVar[int] + base: _common_pb2.MsgBase + group_name: str + def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., group_name: _Optional[str] = ...) -> None: ... + +class DropPrivilegeGroupRequest(_message.Message): + __slots__ = ("base", "group_name") + BASE_FIELD_NUMBER: _ClassVar[int] + GROUP_NAME_FIELD_NUMBER: _ClassVar[int] + base: _common_pb2.MsgBase + group_name: str + def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., group_name: _Optional[str] = ...) -> None: ... + +class ListPrivilegeGroupsRequest(_message.Message): + __slots__ = ("base",) + BASE_FIELD_NUMBER: _ClassVar[int] + base: _common_pb2.MsgBase + def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ...) -> None: ... + +class ListPrivilegeGroupsResponse(_message.Message): + __slots__ = ("status", "privilege_groups") + STATUS_FIELD_NUMBER: _ClassVar[int] + PRIVILEGE_GROUPS_FIELD_NUMBER: _ClassVar[int] + status: _common_pb2.Status + privilege_groups: _containers.RepeatedCompositeFieldContainer[PrivilegeGroupInfo] + def __init__(self, status: _Optional[_Union[_common_pb2.Status, _Mapping]] = ..., privilege_groups: _Optional[_Iterable[_Union[PrivilegeGroupInfo, _Mapping]]] = ...) -> None: ... + +class OperatePrivilegeGroupRequest(_message.Message): + __slots__ = ("base", "group_name", "privileges", "type") + BASE_FIELD_NUMBER: _ClassVar[int] + GROUP_NAME_FIELD_NUMBER: _ClassVar[int] + PRIVILEGES_FIELD_NUMBER: _ClassVar[int] + TYPE_FIELD_NUMBER: _ClassVar[int] + base: _common_pb2.MsgBase + group_name: str + privileges: _containers.RepeatedCompositeFieldContainer[PrivilegeEntity] + type: OperatePrivilegeGroupType + def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., group_name: _Optional[str] = ..., privileges: _Optional[_Iterable[_Union[PrivilegeEntity, _Mapping]]] = ..., type: _Optional[_Union[OperatePrivilegeGroupType, str]] = ...) -> None: ... class OperateUserRoleRequest(_message.Message): __slots__ = ("base", "username", "role_name", "type") @@ -1459,6 +1593,14 @@ class OperateUserRoleRequest(_message.Message): type: OperateUserRoleType def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., username: _Optional[str] = ..., role_name: _Optional[str] = ..., type: _Optional[_Union[OperateUserRoleType, str]] = ...) -> None: ... +class PrivilegeGroupInfo(_message.Message): + __slots__ = ("group_name", "privileges") + GROUP_NAME_FIELD_NUMBER: _ClassVar[int] + PRIVILEGES_FIELD_NUMBER: _ClassVar[int] + group_name: str + privileges: _containers.RepeatedCompositeFieldContainer[PrivilegeEntity] + def __init__(self, group_name: _Optional[str] = ..., privileges: _Optional[_Iterable[_Union[PrivilegeEntity, _Mapping]]] = ...) -> None: ... + class SelectRoleRequest(_message.Message): __slots__ = ("base", "role", "include_user_info") BASE_FIELD_NUMBER: _ClassVar[int] @@ -1577,6 +1719,50 @@ class OperatePrivilegeRequest(_message.Message): type: OperatePrivilegeType def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., entity: _Optional[_Union[GrantEntity, _Mapping]] = ..., type: _Optional[_Union[OperatePrivilegeType, str]] = ...) -> None: ... +class UserInfo(_message.Message): + __slots__ = ("user", "password", "roles") + USER_FIELD_NUMBER: _ClassVar[int] + PASSWORD_FIELD_NUMBER: _ClassVar[int] + ROLES_FIELD_NUMBER: _ClassVar[int] + user: str + password: str + roles: _containers.RepeatedCompositeFieldContainer[RoleEntity] + def __init__(self, user: _Optional[str] = ..., password: _Optional[str] = ..., roles: _Optional[_Iterable[_Union[RoleEntity, _Mapping]]] = ...) -> None: ... + +class RBACMeta(_message.Message): + __slots__ = ("users", "roles", "grants", "privilege_groups") + USERS_FIELD_NUMBER: _ClassVar[int] + ROLES_FIELD_NUMBER: _ClassVar[int] + GRANTS_FIELD_NUMBER: _ClassVar[int] + PRIVILEGE_GROUPS_FIELD_NUMBER: _ClassVar[int] + users: _containers.RepeatedCompositeFieldContainer[UserInfo] + roles: _containers.RepeatedCompositeFieldContainer[RoleEntity] + grants: _containers.RepeatedCompositeFieldContainer[GrantEntity] + privilege_groups: _containers.RepeatedCompositeFieldContainer[PrivilegeGroupInfo] + def __init__(self, users: _Optional[_Iterable[_Union[UserInfo, _Mapping]]] = ..., roles: _Optional[_Iterable[_Union[RoleEntity, _Mapping]]] = ..., grants: _Optional[_Iterable[_Union[GrantEntity, _Mapping]]] = ..., privilege_groups: _Optional[_Iterable[_Union[PrivilegeGroupInfo, _Mapping]]] = ...) -> None: ... + +class BackupRBACMetaRequest(_message.Message): + __slots__ = ("base",) + BASE_FIELD_NUMBER: _ClassVar[int] + base: _common_pb2.MsgBase + def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ...) -> None: ... + +class BackupRBACMetaResponse(_message.Message): + __slots__ = ("status", "RBAC_meta") + STATUS_FIELD_NUMBER: _ClassVar[int] + RBAC_META_FIELD_NUMBER: _ClassVar[int] + status: _common_pb2.Status + RBAC_meta: RBACMeta + def __init__(self, status: _Optional[_Union[_common_pb2.Status, _Mapping]] = ..., RBAC_meta: _Optional[_Union[RBACMeta, _Mapping]] = ...) -> None: ... + +class RestoreRBACMetaRequest(_message.Message): + __slots__ = ("base", "RBAC_meta") + BASE_FIELD_NUMBER: _ClassVar[int] + RBAC_META_FIELD_NUMBER: _ClassVar[int] + base: _common_pb2.MsgBase + RBAC_meta: RBACMeta + def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., RBAC_meta: _Optional[_Union[RBACMeta, _Mapping]] = ...) -> None: ... + class GetLoadingProgressRequest(_message.Message): __slots__ = ("base", "collection_name", "partition_names", "db_name") BASE_FIELD_NUMBER: _ClassVar[int] @@ -1854,12 +2040,14 @@ class AllocTimestampResponse(_message.Message): def __init__(self, status: _Optional[_Union[_common_pb2.Status, _Mapping]] = ..., timestamp: _Optional[int] = ...) -> None: ... class CreateDatabaseRequest(_message.Message): - __slots__ = ("base", "db_name") + __slots__ = ("base", "db_name", "properties") BASE_FIELD_NUMBER: _ClassVar[int] DB_NAME_FIELD_NUMBER: _ClassVar[int] + PROPERTIES_FIELD_NUMBER: _ClassVar[int] base: _common_pb2.MsgBase db_name: str - def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., db_name: _Optional[str] = ...) -> None: ... + properties: _containers.RepeatedCompositeFieldContainer[_common_pb2.KeyValuePair] + def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., db_name: _Optional[str] = ..., properties: _Optional[_Iterable[_Union[_common_pb2.KeyValuePair, _Mapping]]] = ...) -> None: ... class DropDatabaseRequest(_message.Message): __slots__ = ("base", "db_name") @@ -1876,14 +2064,16 @@ class ListDatabasesRequest(_message.Message): def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ...) -> None: ... class ListDatabasesResponse(_message.Message): - __slots__ = ("status", "db_names", "created_timestamp") + __slots__ = ("status", "db_names", "created_timestamp", "db_ids") STATUS_FIELD_NUMBER: _ClassVar[int] DB_NAMES_FIELD_NUMBER: _ClassVar[int] CREATED_TIMESTAMP_FIELD_NUMBER: _ClassVar[int] + DB_IDS_FIELD_NUMBER: _ClassVar[int] status: _common_pb2.Status db_names: _containers.RepeatedScalarFieldContainer[str] created_timestamp: _containers.RepeatedScalarFieldContainer[int] - def __init__(self, status: _Optional[_Union[_common_pb2.Status, _Mapping]] = ..., db_names: _Optional[_Iterable[str]] = ..., created_timestamp: _Optional[_Iterable[int]] = ...) -> None: ... + db_ids: _containers.RepeatedScalarFieldContainer[int] + def __init__(self, status: _Optional[_Union[_common_pb2.Status, _Mapping]] = ..., db_names: _Optional[_Iterable[str]] = ..., created_timestamp: _Optional[_Iterable[int]] = ..., db_ids: _Optional[_Iterable[int]] = ...) -> None: ... class AlterDatabaseRequest(_message.Message): __slots__ = ("base", "db_name", "db_id", "properties") @@ -1897,6 +2087,28 @@ class AlterDatabaseRequest(_message.Message): properties: _containers.RepeatedCompositeFieldContainer[_common_pb2.KeyValuePair] def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., db_name: _Optional[str] = ..., db_id: _Optional[str] = ..., properties: _Optional[_Iterable[_Union[_common_pb2.KeyValuePair, _Mapping]]] = ...) -> None: ... +class DescribeDatabaseRequest(_message.Message): + __slots__ = ("base", "db_name") + BASE_FIELD_NUMBER: _ClassVar[int] + DB_NAME_FIELD_NUMBER: _ClassVar[int] + base: _common_pb2.MsgBase + db_name: str + def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., db_name: _Optional[str] = ...) -> None: ... + +class DescribeDatabaseResponse(_message.Message): + __slots__ = ("status", "db_name", "dbID", "created_timestamp", "properties") + STATUS_FIELD_NUMBER: _ClassVar[int] + DB_NAME_FIELD_NUMBER: _ClassVar[int] + DBID_FIELD_NUMBER: _ClassVar[int] + CREATED_TIMESTAMP_FIELD_NUMBER: _ClassVar[int] + PROPERTIES_FIELD_NUMBER: _ClassVar[int] + status: _common_pb2.Status + db_name: str + dbID: int + created_timestamp: int + properties: _containers.RepeatedCompositeFieldContainer[_common_pb2.KeyValuePair] + def __init__(self, status: _Optional[_Union[_common_pb2.Status, _Mapping]] = ..., db_name: _Optional[str] = ..., dbID: _Optional[int] = ..., created_timestamp: _Optional[int] = ..., properties: _Optional[_Iterable[_Union[_common_pb2.KeyValuePair, _Mapping]]] = ...) -> None: ... + class ReplicateMessageRequest(_message.Message): __slots__ = ("base", "channel_name", "BeginTs", "EndTs", "Msgs", "StartPositions", "EndPositions") BASE_FIELD_NUMBER: _ClassVar[int] diff --git a/pymilvus/grpc_gen/milvus_pb2_grpc.py b/pymilvus/grpc_gen/milvus_pb2_grpc.py index e72c64800..881f87ae5 100644 --- a/pymilvus/grpc_gen/milvus_pb2_grpc.py +++ b/pymilvus/grpc_gen/milvus_pb2_grpc.py @@ -61,6 +61,11 @@ def __init__(self, channel): request_serializer=milvus__pb2.AlterCollectionRequest.SerializeToString, response_deserializer=common__pb2.Status.FromString, ) + self.AlterCollectionField = channel.unary_unary( + '/milvus.proto.milvus.MilvusService/AlterCollectionField', + request_serializer=milvus__pb2.AlterCollectionFieldRequest.SerializeToString, + response_deserializer=common__pb2.Status.FromString, + ) self.CreatePartition = channel.unary_unary( '/milvus.proto.milvus.MilvusService/CreatePartition', request_serializer=milvus__pb2.CreatePartitionRequest.SerializeToString, @@ -436,11 +441,46 @@ def __init__(self, channel): request_serializer=milvus__pb2.AlterDatabaseRequest.SerializeToString, response_deserializer=common__pb2.Status.FromString, ) + self.DescribeDatabase = channel.unary_unary( + '/milvus.proto.milvus.MilvusService/DescribeDatabase', + request_serializer=milvus__pb2.DescribeDatabaseRequest.SerializeToString, + response_deserializer=milvus__pb2.DescribeDatabaseResponse.FromString, + ) self.ReplicateMessage = channel.unary_unary( '/milvus.proto.milvus.MilvusService/ReplicateMessage', request_serializer=milvus__pb2.ReplicateMessageRequest.SerializeToString, response_deserializer=milvus__pb2.ReplicateMessageResponse.FromString, ) + self.BackupRBAC = channel.unary_unary( + '/milvus.proto.milvus.MilvusService/BackupRBAC', + request_serializer=milvus__pb2.BackupRBACMetaRequest.SerializeToString, + response_deserializer=milvus__pb2.BackupRBACMetaResponse.FromString, + ) + self.RestoreRBAC = channel.unary_unary( + '/milvus.proto.milvus.MilvusService/RestoreRBAC', + request_serializer=milvus__pb2.RestoreRBACMetaRequest.SerializeToString, + response_deserializer=common__pb2.Status.FromString, + ) + self.CreatePrivilegeGroup = channel.unary_unary( + '/milvus.proto.milvus.MilvusService/CreatePrivilegeGroup', + request_serializer=milvus__pb2.CreatePrivilegeGroupRequest.SerializeToString, + response_deserializer=common__pb2.Status.FromString, + ) + self.DropPrivilegeGroup = channel.unary_unary( + '/milvus.proto.milvus.MilvusService/DropPrivilegeGroup', + request_serializer=milvus__pb2.DropPrivilegeGroupRequest.SerializeToString, + response_deserializer=common__pb2.Status.FromString, + ) + self.ListPrivilegeGroups = channel.unary_unary( + '/milvus.proto.milvus.MilvusService/ListPrivilegeGroups', + request_serializer=milvus__pb2.ListPrivilegeGroupsRequest.SerializeToString, + response_deserializer=milvus__pb2.ListPrivilegeGroupsResponse.FromString, + ) + self.OperatePrivilegeGroup = channel.unary_unary( + '/milvus.proto.milvus.MilvusService/OperatePrivilegeGroup', + request_serializer=milvus__pb2.OperatePrivilegeGroupRequest.SerializeToString, + response_deserializer=common__pb2.Status.FromString, + ) class MilvusServiceServicer(object): @@ -500,6 +540,12 @@ def AlterCollection(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def AlterCollectionField(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def CreatePartition(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) @@ -957,12 +1003,54 @@ def AlterDatabase(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def DescribeDatabase(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def ReplicateMessage(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def BackupRBAC(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RestoreRBAC(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CreatePrivilegeGroup(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DropPrivilegeGroup(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListPrivilegeGroups(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def OperatePrivilegeGroup(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def add_MilvusServiceServicer_to_server(servicer, server): rpc_method_handlers = { @@ -1011,6 +1099,11 @@ def add_MilvusServiceServicer_to_server(servicer, server): request_deserializer=milvus__pb2.AlterCollectionRequest.FromString, response_serializer=common__pb2.Status.SerializeToString, ), + 'AlterCollectionField': grpc.unary_unary_rpc_method_handler( + servicer.AlterCollectionField, + request_deserializer=milvus__pb2.AlterCollectionFieldRequest.FromString, + response_serializer=common__pb2.Status.SerializeToString, + ), 'CreatePartition': grpc.unary_unary_rpc_method_handler( servicer.CreatePartition, request_deserializer=milvus__pb2.CreatePartitionRequest.FromString, @@ -1386,11 +1479,46 @@ def add_MilvusServiceServicer_to_server(servicer, server): request_deserializer=milvus__pb2.AlterDatabaseRequest.FromString, response_serializer=common__pb2.Status.SerializeToString, ), + 'DescribeDatabase': grpc.unary_unary_rpc_method_handler( + servicer.DescribeDatabase, + request_deserializer=milvus__pb2.DescribeDatabaseRequest.FromString, + response_serializer=milvus__pb2.DescribeDatabaseResponse.SerializeToString, + ), 'ReplicateMessage': grpc.unary_unary_rpc_method_handler( servicer.ReplicateMessage, request_deserializer=milvus__pb2.ReplicateMessageRequest.FromString, response_serializer=milvus__pb2.ReplicateMessageResponse.SerializeToString, ), + 'BackupRBAC': grpc.unary_unary_rpc_method_handler( + servicer.BackupRBAC, + request_deserializer=milvus__pb2.BackupRBACMetaRequest.FromString, + response_serializer=milvus__pb2.BackupRBACMetaResponse.SerializeToString, + ), + 'RestoreRBAC': grpc.unary_unary_rpc_method_handler( + servicer.RestoreRBAC, + request_deserializer=milvus__pb2.RestoreRBACMetaRequest.FromString, + response_serializer=common__pb2.Status.SerializeToString, + ), + 'CreatePrivilegeGroup': grpc.unary_unary_rpc_method_handler( + servicer.CreatePrivilegeGroup, + request_deserializer=milvus__pb2.CreatePrivilegeGroupRequest.FromString, + response_serializer=common__pb2.Status.SerializeToString, + ), + 'DropPrivilegeGroup': grpc.unary_unary_rpc_method_handler( + servicer.DropPrivilegeGroup, + request_deserializer=milvus__pb2.DropPrivilegeGroupRequest.FromString, + response_serializer=common__pb2.Status.SerializeToString, + ), + 'ListPrivilegeGroups': grpc.unary_unary_rpc_method_handler( + servicer.ListPrivilegeGroups, + request_deserializer=milvus__pb2.ListPrivilegeGroupsRequest.FromString, + response_serializer=milvus__pb2.ListPrivilegeGroupsResponse.SerializeToString, + ), + 'OperatePrivilegeGroup': grpc.unary_unary_rpc_method_handler( + servicer.OperatePrivilegeGroup, + request_deserializer=milvus__pb2.OperatePrivilegeGroupRequest.FromString, + response_serializer=common__pb2.Status.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( 'milvus.proto.milvus.MilvusService', rpc_method_handlers) @@ -1554,6 +1682,23 @@ def AlterCollection(request, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + @staticmethod + def AlterCollectionField(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/milvus.proto.milvus.MilvusService/AlterCollectionField', + milvus__pb2.AlterCollectionFieldRequest.SerializeToString, + common__pb2.Status.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + @staticmethod def CreatePartition(request, target, @@ -2829,6 +2974,23 @@ def AlterDatabase(request, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + @staticmethod + def DescribeDatabase(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/milvus.proto.milvus.MilvusService/DescribeDatabase', + milvus__pb2.DescribeDatabaseRequest.SerializeToString, + milvus__pb2.DescribeDatabaseResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + @staticmethod def ReplicateMessage(request, target, @@ -2846,6 +3008,108 @@ def ReplicateMessage(request, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + @staticmethod + def BackupRBAC(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/milvus.proto.milvus.MilvusService/BackupRBAC', + milvus__pb2.BackupRBACMetaRequest.SerializeToString, + milvus__pb2.BackupRBACMetaResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def RestoreRBAC(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/milvus.proto.milvus.MilvusService/RestoreRBAC', + milvus__pb2.RestoreRBACMetaRequest.SerializeToString, + common__pb2.Status.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def CreatePrivilegeGroup(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/milvus.proto.milvus.MilvusService/CreatePrivilegeGroup', + milvus__pb2.CreatePrivilegeGroupRequest.SerializeToString, + common__pb2.Status.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def DropPrivilegeGroup(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/milvus.proto.milvus.MilvusService/DropPrivilegeGroup', + milvus__pb2.DropPrivilegeGroupRequest.SerializeToString, + common__pb2.Status.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ListPrivilegeGroups(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/milvus.proto.milvus.MilvusService/ListPrivilegeGroups', + milvus__pb2.ListPrivilegeGroupsRequest.SerializeToString, + milvus__pb2.ListPrivilegeGroupsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def OperatePrivilegeGroup(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/milvus.proto.milvus.MilvusService/OperatePrivilegeGroup', + milvus__pb2.OperatePrivilegeGroupRequest.SerializeToString, + common__pb2.Status.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + class ProxyServiceStub(object): """Missing associated documentation comment in .proto file.""" diff --git a/pymilvus/grpc_gen/msg_pb2.py b/pymilvus/grpc_gen/msg_pb2.py index 8673318fb..a7337fe5c 100644 --- a/pymilvus/grpc_gen/msg_pb2.py +++ b/pymilvus/grpc_gen/msg_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: msg.proto -# Protobuf Python Version: 4.25.0 +# Protobuf Python Version: 4.25.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool @@ -16,7 +16,7 @@ from . import schema_pb2 as schema__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\tmsg.proto\x12\x10milvus.proto.msg\x1a\x0c\x63ommon.proto\x1a\x0cschema.proto\"\xaa\x03\n\rInsertRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x11\n\tshardName\x18\x02 \x01(\t\x12\x0f\n\x07\x64\x62_name\x18\x03 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x04 \x01(\t\x12\x16\n\x0epartition_name\x18\x05 \x01(\t\x12\x0c\n\x04\x64\x62ID\x18\x06 \x01(\x03\x12\x14\n\x0c\x63ollectionID\x18\x07 \x01(\x03\x12\x13\n\x0bpartitionID\x18\x08 \x01(\x03\x12\x11\n\tsegmentID\x18\t \x01(\x03\x12\x12\n\ntimestamps\x18\n \x03(\x04\x12\x0e\n\x06rowIDs\x18\x0b \x03(\x03\x12+\n\x08row_data\x18\x0c \x03(\x0b\x32\x19.milvus.proto.common.Blob\x12\x33\n\x0b\x66ields_data\x18\r \x03(\x0b\x32\x1e.milvus.proto.schema.FieldData\x12\x10\n\x08num_rows\x18\x0e \x01(\x04\x12\x34\n\x07version\x18\x0f \x01(\x0e\x32#.milvus.proto.msg.InsertDataVersion\"\xbb\x02\n\rDeleteRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x11\n\tshardName\x18\x02 \x01(\t\x12\x0f\n\x07\x64\x62_name\x18\x03 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x04 \x01(\t\x12\x16\n\x0epartition_name\x18\x05 \x01(\t\x12\x0c\n\x04\x64\x62ID\x18\x06 \x01(\x03\x12\x14\n\x0c\x63ollectionID\x18\x07 \x01(\x03\x12\x13\n\x0bpartitionID\x18\x08 \x01(\x03\x12\x1a\n\x12int64_primary_keys\x18\t \x03(\x03\x12\x12\n\ntimestamps\x18\n \x03(\x04\x12\x10\n\x08num_rows\x18\x0b \x01(\x03\x12.\n\x0cprimary_keys\x18\x0c \x01(\x0b\x32\x18.milvus.proto.schema.IDs\"W\n\x0bMsgPosition\x12\x14\n\x0c\x63hannel_name\x18\x01 \x01(\t\x12\r\n\x05msgID\x18\x02 \x01(\x0c\x12\x10\n\x08msgGroup\x18\x03 \x01(\t\x12\x11\n\ttimestamp\x18\x04 \x01(\x04\"\x9f\x02\n\x17\x43reateCollectionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x16\n\x0e\x63ollectionName\x18\x03 \x01(\t\x12\x15\n\rpartitionName\x18\x04 \x01(\t\x12\x0c\n\x04\x64\x62ID\x18\x05 \x01(\x03\x12\x14\n\x0c\x63ollectionID\x18\x06 \x01(\x03\x12\x13\n\x0bpartitionID\x18\x07 \x01(\x03\x12\x0e\n\x06schema\x18\x08 \x01(\x0c\x12\x1b\n\x13virtualChannelNames\x18\t \x03(\t\x12\x1c\n\x14physicalChannelNames\x18\n \x03(\t\x12\x14\n\x0cpartitionIDs\x18\x0b \x03(\x03\"\x90\x01\n\x15\x44ropCollectionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x16\n\x0e\x63ollectionName\x18\x03 \x01(\t\x12\x0c\n\x04\x64\x62ID\x18\x04 \x01(\x03\x12\x14\n\x0c\x63ollectionID\x18\x05 \x01(\x03\"\xbf\x01\n\x16\x43reatePartitionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x16\n\x0epartition_name\x18\x04 \x01(\t\x12\x0c\n\x04\x64\x62ID\x18\x05 \x01(\x03\x12\x14\n\x0c\x63ollectionID\x18\x06 \x01(\x03\x12\x13\n\x0bpartitionID\x18\x07 \x01(\x03\"\xbd\x01\n\x14\x44ropPartitionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x16\n\x0epartition_name\x18\x04 \x01(\t\x12\x0c\n\x04\x64\x62ID\x18\x05 \x01(\x03\x12\x14\n\x0c\x63ollectionID\x18\x06 \x01(\x03\x12\x13\n\x0bpartitionID\x18\x07 \x01(\x03\"9\n\x0bTimeTickMsg\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\"\x9f\x01\n\rDataNodeTtMsg\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x14\n\x0c\x63hannel_name\x18\x02 \x01(\t\x12\x11\n\ttimestamp\x18\x03 \x01(\x04\x12\x39\n\x0esegments_stats\x18\x04 \x03(\x0b\x32!.milvus.proto.common.SegmentStats*2\n\x11InsertDataVersion\x12\x0c\n\x08RowBased\x10\x00\x12\x0f\n\x0b\x43olumnBased\x10\x01\x42\x33Z1github.com/milvus-io/milvus-proto/go-api/v2/msgpbb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\tmsg.proto\x12\x10milvus.proto.msg\x1a\x0c\x63ommon.proto\x1a\x0cschema.proto\"\xaa\x03\n\rInsertRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x11\n\tshardName\x18\x02 \x01(\t\x12\x0f\n\x07\x64\x62_name\x18\x03 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x04 \x01(\t\x12\x16\n\x0epartition_name\x18\x05 \x01(\t\x12\x0c\n\x04\x64\x62ID\x18\x06 \x01(\x03\x12\x14\n\x0c\x63ollectionID\x18\x07 \x01(\x03\x12\x13\n\x0bpartitionID\x18\x08 \x01(\x03\x12\x11\n\tsegmentID\x18\t \x01(\x03\x12\x12\n\ntimestamps\x18\n \x03(\x04\x12\x0e\n\x06rowIDs\x18\x0b \x03(\x03\x12+\n\x08row_data\x18\x0c \x03(\x0b\x32\x19.milvus.proto.common.Blob\x12\x33\n\x0b\x66ields_data\x18\r \x03(\x0b\x32\x1e.milvus.proto.schema.FieldData\x12\x10\n\x08num_rows\x18\x0e \x01(\x04\x12\x34\n\x07version\x18\x0f \x01(\x0e\x32#.milvus.proto.msg.InsertDataVersion\"\xcf\x02\n\rDeleteRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x11\n\tshardName\x18\x02 \x01(\t\x12\x0f\n\x07\x64\x62_name\x18\x03 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x04 \x01(\t\x12\x16\n\x0epartition_name\x18\x05 \x01(\t\x12\x0c\n\x04\x64\x62ID\x18\x06 \x01(\x03\x12\x14\n\x0c\x63ollectionID\x18\x07 \x01(\x03\x12\x13\n\x0bpartitionID\x18\x08 \x01(\x03\x12\x1a\n\x12int64_primary_keys\x18\t \x03(\x03\x12\x12\n\ntimestamps\x18\n \x03(\x04\x12\x10\n\x08num_rows\x18\x0b \x01(\x03\x12.\n\x0cprimary_keys\x18\x0c \x01(\x0b\x32\x18.milvus.proto.schema.IDs\x12\x12\n\nsegment_id\x18\r \x01(\x03\"W\n\x0bMsgPosition\x12\x14\n\x0c\x63hannel_name\x18\x01 \x01(\t\x12\r\n\x05msgID\x18\x02 \x01(\x0c\x12\x10\n\x08msgGroup\x18\x03 \x01(\t\x12\x11\n\ttimestamp\x18\x04 \x01(\x04\"\x9f\x02\n\x17\x43reateCollectionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x16\n\x0e\x63ollectionName\x18\x03 \x01(\t\x12\x15\n\rpartitionName\x18\x04 \x01(\t\x12\x0c\n\x04\x64\x62ID\x18\x05 \x01(\x03\x12\x14\n\x0c\x63ollectionID\x18\x06 \x01(\x03\x12\x13\n\x0bpartitionID\x18\x07 \x01(\x03\x12\x0e\n\x06schema\x18\x08 \x01(\x0c\x12\x1b\n\x13virtualChannelNames\x18\t \x03(\t\x12\x1c\n\x14physicalChannelNames\x18\n \x03(\t\x12\x14\n\x0cpartitionIDs\x18\x0b \x03(\x03\"\x90\x01\n\x15\x44ropCollectionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x16\n\x0e\x63ollectionName\x18\x03 \x01(\t\x12\x0c\n\x04\x64\x62ID\x18\x04 \x01(\x03\x12\x14\n\x0c\x63ollectionID\x18\x05 \x01(\x03\"\xbf\x01\n\x16\x43reatePartitionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x16\n\x0epartition_name\x18\x04 \x01(\t\x12\x0c\n\x04\x64\x62ID\x18\x05 \x01(\x03\x12\x14\n\x0c\x63ollectionID\x18\x06 \x01(\x03\x12\x13\n\x0bpartitionID\x18\x07 \x01(\x03\"\xbd\x01\n\x14\x44ropPartitionRequest\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x16\n\x0epartition_name\x18\x04 \x01(\t\x12\x0c\n\x04\x64\x62ID\x18\x05 \x01(\x03\x12\x14\n\x0c\x63ollectionID\x18\x06 \x01(\x03\x12\x13\n\x0bpartitionID\x18\x07 \x01(\x03\"9\n\x0bTimeTickMsg\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\"\x9f\x01\n\rDataNodeTtMsg\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase\x12\x14\n\x0c\x63hannel_name\x18\x02 \x01(\t\x12\x11\n\ttimestamp\x18\x03 \x01(\x04\x12\x39\n\x0esegments_stats\x18\x04 \x03(\x0b\x32!.milvus.proto.common.SegmentStats*2\n\x11InsertDataVersion\x12\x0c\n\x08RowBased\x10\x00\x12\x0f\n\x0b\x43olumnBased\x10\x01\x42\x33Z1github.com/milvus-io/milvus-proto/go-api/v2/msgpbb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -24,24 +24,24 @@ if _descriptor._USE_C_DESCRIPTORS == False: _globals['DESCRIPTOR']._options = None _globals['DESCRIPTOR']._serialized_options = b'Z1github.com/milvus-io/milvus-proto/go-api/v2/msgpb' - _globals['_INSERTDATAVERSION']._serialized_start=1939 - _globals['_INSERTDATAVERSION']._serialized_end=1989 + _globals['_INSERTDATAVERSION']._serialized_start=1959 + _globals['_INSERTDATAVERSION']._serialized_end=2009 _globals['_INSERTREQUEST']._serialized_start=60 _globals['_INSERTREQUEST']._serialized_end=486 _globals['_DELETEREQUEST']._serialized_start=489 - _globals['_DELETEREQUEST']._serialized_end=804 - _globals['_MSGPOSITION']._serialized_start=806 - _globals['_MSGPOSITION']._serialized_end=893 - _globals['_CREATECOLLECTIONREQUEST']._serialized_start=896 - _globals['_CREATECOLLECTIONREQUEST']._serialized_end=1183 - _globals['_DROPCOLLECTIONREQUEST']._serialized_start=1186 - _globals['_DROPCOLLECTIONREQUEST']._serialized_end=1330 - _globals['_CREATEPARTITIONREQUEST']._serialized_start=1333 - _globals['_CREATEPARTITIONREQUEST']._serialized_end=1524 - _globals['_DROPPARTITIONREQUEST']._serialized_start=1527 - _globals['_DROPPARTITIONREQUEST']._serialized_end=1716 - _globals['_TIMETICKMSG']._serialized_start=1718 - _globals['_TIMETICKMSG']._serialized_end=1775 - _globals['_DATANODETTMSG']._serialized_start=1778 - _globals['_DATANODETTMSG']._serialized_end=1937 + _globals['_DELETEREQUEST']._serialized_end=824 + _globals['_MSGPOSITION']._serialized_start=826 + _globals['_MSGPOSITION']._serialized_end=913 + _globals['_CREATECOLLECTIONREQUEST']._serialized_start=916 + _globals['_CREATECOLLECTIONREQUEST']._serialized_end=1203 + _globals['_DROPCOLLECTIONREQUEST']._serialized_start=1206 + _globals['_DROPCOLLECTIONREQUEST']._serialized_end=1350 + _globals['_CREATEPARTITIONREQUEST']._serialized_start=1353 + _globals['_CREATEPARTITIONREQUEST']._serialized_end=1544 + _globals['_DROPPARTITIONREQUEST']._serialized_start=1547 + _globals['_DROPPARTITIONREQUEST']._serialized_end=1736 + _globals['_TIMETICKMSG']._serialized_start=1738 + _globals['_TIMETICKMSG']._serialized_end=1795 + _globals['_DATANODETTMSG']._serialized_start=1798 + _globals['_DATANODETTMSG']._serialized_end=1957 # @@protoc_insertion_point(module_scope) diff --git a/pymilvus/grpc_gen/msg_pb2.pyi b/pymilvus/grpc_gen/msg_pb2.pyi index b3da2757c..f61f9488b 100644 --- a/pymilvus/grpc_gen/msg_pb2.pyi +++ b/pymilvus/grpc_gen/msg_pb2.pyi @@ -50,7 +50,7 @@ class InsertRequest(_message.Message): def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., shardName: _Optional[str] = ..., db_name: _Optional[str] = ..., collection_name: _Optional[str] = ..., partition_name: _Optional[str] = ..., dbID: _Optional[int] = ..., collectionID: _Optional[int] = ..., partitionID: _Optional[int] = ..., segmentID: _Optional[int] = ..., timestamps: _Optional[_Iterable[int]] = ..., rowIDs: _Optional[_Iterable[int]] = ..., row_data: _Optional[_Iterable[_Union[_common_pb2.Blob, _Mapping]]] = ..., fields_data: _Optional[_Iterable[_Union[_schema_pb2.FieldData, _Mapping]]] = ..., num_rows: _Optional[int] = ..., version: _Optional[_Union[InsertDataVersion, str]] = ...) -> None: ... class DeleteRequest(_message.Message): - __slots__ = ("base", "shardName", "db_name", "collection_name", "partition_name", "dbID", "collectionID", "partitionID", "int64_primary_keys", "timestamps", "num_rows", "primary_keys") + __slots__ = ("base", "shardName", "db_name", "collection_name", "partition_name", "dbID", "collectionID", "partitionID", "int64_primary_keys", "timestamps", "num_rows", "primary_keys", "segment_id") BASE_FIELD_NUMBER: _ClassVar[int] SHARDNAME_FIELD_NUMBER: _ClassVar[int] DB_NAME_FIELD_NUMBER: _ClassVar[int] @@ -63,6 +63,7 @@ class DeleteRequest(_message.Message): TIMESTAMPS_FIELD_NUMBER: _ClassVar[int] NUM_ROWS_FIELD_NUMBER: _ClassVar[int] PRIMARY_KEYS_FIELD_NUMBER: _ClassVar[int] + SEGMENT_ID_FIELD_NUMBER: _ClassVar[int] base: _common_pb2.MsgBase shardName: str db_name: str @@ -75,7 +76,8 @@ class DeleteRequest(_message.Message): timestamps: _containers.RepeatedScalarFieldContainer[int] num_rows: int primary_keys: _schema_pb2.IDs - def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., shardName: _Optional[str] = ..., db_name: _Optional[str] = ..., collection_name: _Optional[str] = ..., partition_name: _Optional[str] = ..., dbID: _Optional[int] = ..., collectionID: _Optional[int] = ..., partitionID: _Optional[int] = ..., int64_primary_keys: _Optional[_Iterable[int]] = ..., timestamps: _Optional[_Iterable[int]] = ..., num_rows: _Optional[int] = ..., primary_keys: _Optional[_Union[_schema_pb2.IDs, _Mapping]] = ...) -> None: ... + segment_id: int + def __init__(self, base: _Optional[_Union[_common_pb2.MsgBase, _Mapping]] = ..., shardName: _Optional[str] = ..., db_name: _Optional[str] = ..., collection_name: _Optional[str] = ..., partition_name: _Optional[str] = ..., dbID: _Optional[int] = ..., collectionID: _Optional[int] = ..., partitionID: _Optional[int] = ..., int64_primary_keys: _Optional[_Iterable[int]] = ..., timestamps: _Optional[_Iterable[int]] = ..., num_rows: _Optional[int] = ..., primary_keys: _Optional[_Union[_schema_pb2.IDs, _Mapping]] = ..., segment_id: _Optional[int] = ...) -> None: ... class MsgPosition(_message.Message): __slots__ = ("channel_name", "msgID", "msgGroup", "timestamp") diff --git a/pymilvus/grpc_gen/python_gen.sh b/pymilvus/grpc_gen/python_gen.sh index a227b2d90..435fe2e08 100755 --- a/pymilvus/grpc_gen/python_gen.sh +++ b/pymilvus/grpc_gen/python_gen.sh @@ -3,8 +3,6 @@ OUTDIR=. PROTO_DIR="milvus-proto/proto" -python -m pip install "grpcio-tools==$(python3 -c 'import grpc; print(grpc.__version__)')" - python -m grpc_tools.protoc -I ${PROTO_DIR} --python_out=${OUTDIR} --pyi_out=${OUTDIR} ${PROTO_DIR}/common.proto python -m grpc_tools.protoc -I ${PROTO_DIR} --python_out=${OUTDIR} --pyi_out=${OUTDIR} ${PROTO_DIR}/schema.proto python -m grpc_tools.protoc -I ${PROTO_DIR} --python_out=${OUTDIR} --pyi_out=${OUTDIR} ${PROTO_DIR}/feder.proto diff --git a/pymilvus/grpc_gen/rg_pb2.py b/pymilvus/grpc_gen/rg_pb2.py index 1806e7b7f..5f2ae2f69 100644 --- a/pymilvus/grpc_gen/rg_pb2.py +++ b/pymilvus/grpc_gen/rg_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: rg.proto -# Protobuf Python Version: 4.25.0 +# Protobuf Python Version: 4.25.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool @@ -12,9 +12,10 @@ _sym_db = _symbol_database.Default() +from . import common_pb2 as common__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x08rg.proto\x12\x0fmilvus.proto.rg\"&\n\x12ResourceGroupLimit\x12\x10\n\x08node_num\x18\x01 \x01(\x05\"/\n\x15ResourceGroupTransfer\x12\x16\n\x0eresource_group\x18\x01 \x01(\t\"\xfd\x01\n\x13ResourceGroupConfig\x12\x35\n\x08requests\x18\x01 \x01(\x0b\x32#.milvus.proto.rg.ResourceGroupLimit\x12\x33\n\x06limits\x18\x02 \x01(\x0b\x32#.milvus.proto.rg.ResourceGroupLimit\x12=\n\rtransfer_from\x18\x03 \x03(\x0b\x32&.milvus.proto.rg.ResourceGroupTransfer\x12;\n\x0btransfer_to\x18\x04 \x03(\x0b\x32&.milvus.proto.rg.ResourceGroupTransferBp\n\x0eio.milvus.grpcB\x12ResourceGroupProtoP\x01Z0github.com/milvus-io/milvus-proto/go-api/v2/rgpb\xa0\x01\x01\xaa\x02\x12Milvus.Client.Grpcb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x08rg.proto\x12\x0fmilvus.proto.rg\x1a\x0c\x63ommon.proto\"&\n\x12ResourceGroupLimit\x12\x10\n\x08node_num\x18\x01 \x01(\x05\"/\n\x15ResourceGroupTransfer\x12\x16\n\x0eresource_group\x18\x01 \x01(\t\"Q\n\x17ResourceGroupNodeFilter\x12\x36\n\x0bnode_labels\x18\x01 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\"\xbc\x02\n\x13ResourceGroupConfig\x12\x35\n\x08requests\x18\x01 \x01(\x0b\x32#.milvus.proto.rg.ResourceGroupLimit\x12\x33\n\x06limits\x18\x02 \x01(\x0b\x32#.milvus.proto.rg.ResourceGroupLimit\x12=\n\rtransfer_from\x18\x03 \x03(\x0b\x32&.milvus.proto.rg.ResourceGroupTransfer\x12;\n\x0btransfer_to\x18\x04 \x03(\x0b\x32&.milvus.proto.rg.ResourceGroupTransfer\x12=\n\x0bnode_filter\x18\x05 \x01(\x0b\x32(.milvus.proto.rg.ResourceGroupNodeFilterBp\n\x0eio.milvus.grpcB\x12ResourceGroupProtoP\x01Z0github.com/milvus-io/milvus-proto/go-api/v2/rgpb\xa0\x01\x01\xaa\x02\x12Milvus.Client.Grpcb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -22,10 +23,12 @@ if _descriptor._USE_C_DESCRIPTORS == False: _globals['DESCRIPTOR']._options = None _globals['DESCRIPTOR']._serialized_options = b'\n\016io.milvus.grpcB\022ResourceGroupProtoP\001Z0github.com/milvus-io/milvus-proto/go-api/v2/rgpb\240\001\001\252\002\022Milvus.Client.Grpc' - _globals['_RESOURCEGROUPLIMIT']._serialized_start=29 - _globals['_RESOURCEGROUPLIMIT']._serialized_end=67 - _globals['_RESOURCEGROUPTRANSFER']._serialized_start=69 - _globals['_RESOURCEGROUPTRANSFER']._serialized_end=116 - _globals['_RESOURCEGROUPCONFIG']._serialized_start=119 - _globals['_RESOURCEGROUPCONFIG']._serialized_end=372 + _globals['_RESOURCEGROUPLIMIT']._serialized_start=43 + _globals['_RESOURCEGROUPLIMIT']._serialized_end=81 + _globals['_RESOURCEGROUPTRANSFER']._serialized_start=83 + _globals['_RESOURCEGROUPTRANSFER']._serialized_end=130 + _globals['_RESOURCEGROUPNODEFILTER']._serialized_start=132 + _globals['_RESOURCEGROUPNODEFILTER']._serialized_end=213 + _globals['_RESOURCEGROUPCONFIG']._serialized_start=216 + _globals['_RESOURCEGROUPCONFIG']._serialized_end=532 # @@protoc_insertion_point(module_scope) diff --git a/pymilvus/grpc_gen/rg_pb2.pyi b/pymilvus/grpc_gen/rg_pb2.pyi index bc414361a..57620897f 100644 --- a/pymilvus/grpc_gen/rg_pb2.pyi +++ b/pymilvus/grpc_gen/rg_pb2.pyi @@ -1,3 +1,4 @@ +from . import common_pb2 as _common_pb2 from google.protobuf.internal import containers as _containers from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message @@ -17,14 +18,22 @@ class ResourceGroupTransfer(_message.Message): resource_group: str def __init__(self, resource_group: _Optional[str] = ...) -> None: ... +class ResourceGroupNodeFilter(_message.Message): + __slots__ = ("node_labels",) + NODE_LABELS_FIELD_NUMBER: _ClassVar[int] + node_labels: _containers.RepeatedCompositeFieldContainer[_common_pb2.KeyValuePair] + def __init__(self, node_labels: _Optional[_Iterable[_Union[_common_pb2.KeyValuePair, _Mapping]]] = ...) -> None: ... + class ResourceGroupConfig(_message.Message): - __slots__ = ("requests", "limits", "transfer_from", "transfer_to") + __slots__ = ("requests", "limits", "transfer_from", "transfer_to", "node_filter") REQUESTS_FIELD_NUMBER: _ClassVar[int] LIMITS_FIELD_NUMBER: _ClassVar[int] TRANSFER_FROM_FIELD_NUMBER: _ClassVar[int] TRANSFER_TO_FIELD_NUMBER: _ClassVar[int] + NODE_FILTER_FIELD_NUMBER: _ClassVar[int] requests: ResourceGroupLimit limits: ResourceGroupLimit transfer_from: _containers.RepeatedCompositeFieldContainer[ResourceGroupTransfer] transfer_to: _containers.RepeatedCompositeFieldContainer[ResourceGroupTransfer] - def __init__(self, requests: _Optional[_Union[ResourceGroupLimit, _Mapping]] = ..., limits: _Optional[_Union[ResourceGroupLimit, _Mapping]] = ..., transfer_from: _Optional[_Iterable[_Union[ResourceGroupTransfer, _Mapping]]] = ..., transfer_to: _Optional[_Iterable[_Union[ResourceGroupTransfer, _Mapping]]] = ...) -> None: ... + node_filter: ResourceGroupNodeFilter + def __init__(self, requests: _Optional[_Union[ResourceGroupLimit, _Mapping]] = ..., limits: _Optional[_Union[ResourceGroupLimit, _Mapping]] = ..., transfer_from: _Optional[_Iterable[_Union[ResourceGroupTransfer, _Mapping]]] = ..., transfer_to: _Optional[_Iterable[_Union[ResourceGroupTransfer, _Mapping]]] = ..., node_filter: _Optional[_Union[ResourceGroupNodeFilter, _Mapping]] = ...) -> None: ... diff --git a/pymilvus/grpc_gen/schema_pb2.py b/pymilvus/grpc_gen/schema_pb2.py index b9dd24c21..6482ac224 100644 --- a/pymilvus/grpc_gen/schema_pb2.py +++ b/pymilvus/grpc_gen/schema_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: schema.proto -# Protobuf Python Version: 4.25.0 +# Protobuf Python Version: 4.25.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool @@ -16,7 +16,7 @@ from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0cschema.proto\x12\x13milvus.proto.schema\x1a\x0c\x63ommon.proto\x1a google/protobuf/descriptor.proto\"\xf2\x03\n\x0b\x46ieldSchema\x12\x0f\n\x07\x66ieldID\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x16\n\x0eis_primary_key\x18\x03 \x01(\x08\x12\x13\n\x0b\x64\x65scription\x18\x04 \x01(\t\x12\x30\n\tdata_type\x18\x05 \x01(\x0e\x32\x1d.milvus.proto.schema.DataType\x12\x36\n\x0btype_params\x18\x06 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\x37\n\x0cindex_params\x18\x07 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\x0e\n\x06\x61utoID\x18\x08 \x01(\x08\x12.\n\x05state\x18\t \x01(\x0e\x32\x1f.milvus.proto.schema.FieldState\x12\x33\n\x0c\x65lement_type\x18\n \x01(\x0e\x32\x1d.milvus.proto.schema.DataType\x12\x36\n\rdefault_value\x18\x0b \x01(\x0b\x32\x1f.milvus.proto.schema.ValueField\x12\x12\n\nis_dynamic\x18\x0c \x01(\x08\x12\x18\n\x10is_partition_key\x18\r \x01(\x08\x12\x19\n\x11is_clustering_key\x18\x0e \x01(\x08\"\xd0\x01\n\x10\x43ollectionSchema\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x12\n\x06\x61utoID\x18\x03 \x01(\x08\x42\x02\x18\x01\x12\x30\n\x06\x66ields\x18\x04 \x03(\x0b\x32 .milvus.proto.schema.FieldSchema\x12\x1c\n\x14\x65nable_dynamic_field\x18\x05 \x01(\x08\x12\x35\n\nproperties\x18\x06 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\"\x19\n\tBoolArray\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x08\"\x18\n\x08IntArray\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x05\"\x19\n\tLongArray\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x03\"\x1a\n\nFloatArray\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x02\"\x1b\n\x0b\x44oubleArray\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x01\"\x1a\n\nBytesArray\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x0c\"\x1b\n\x0bStringArray\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\t\"q\n\nArrayArray\x12.\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32 .milvus.proto.schema.ScalarField\x12\x33\n\x0c\x65lement_type\x18\x02 \x01(\x0e\x32\x1d.milvus.proto.schema.DataType\"\x19\n\tJSONArray\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x0c\"\xac\x01\n\nValueField\x12\x13\n\tbool_data\x18\x01 \x01(\x08H\x00\x12\x12\n\x08int_data\x18\x02 \x01(\x05H\x00\x12\x13\n\tlong_data\x18\x03 \x01(\x03H\x00\x12\x14\n\nfloat_data\x18\x04 \x01(\x02H\x00\x12\x15\n\x0b\x64ouble_data\x18\x05 \x01(\x01H\x00\x12\x15\n\x0bstring_data\x18\x06 \x01(\tH\x00\x12\x14\n\nbytes_data\x18\x07 \x01(\x0cH\x00\x42\x06\n\x04\x64\x61ta\"\xfe\x03\n\x0bScalarField\x12\x33\n\tbool_data\x18\x01 \x01(\x0b\x32\x1e.milvus.proto.schema.BoolArrayH\x00\x12\x31\n\x08int_data\x18\x02 \x01(\x0b\x32\x1d.milvus.proto.schema.IntArrayH\x00\x12\x33\n\tlong_data\x18\x03 \x01(\x0b\x32\x1e.milvus.proto.schema.LongArrayH\x00\x12\x35\n\nfloat_data\x18\x04 \x01(\x0b\x32\x1f.milvus.proto.schema.FloatArrayH\x00\x12\x37\n\x0b\x64ouble_data\x18\x05 \x01(\x0b\x32 .milvus.proto.schema.DoubleArrayH\x00\x12\x37\n\x0bstring_data\x18\x06 \x01(\x0b\x32 .milvus.proto.schema.StringArrayH\x00\x12\x35\n\nbytes_data\x18\x07 \x01(\x0b\x32\x1f.milvus.proto.schema.BytesArrayH\x00\x12\x35\n\narray_data\x18\x08 \x01(\x0b\x32\x1f.milvus.proto.schema.ArrayArrayH\x00\x12\x33\n\tjson_data\x18\t \x01(\x0b\x32\x1e.milvus.proto.schema.JSONArrayH\x00\x42\x06\n\x04\x64\x61ta\"1\n\x10SparseFloatArray\x12\x10\n\x08\x63ontents\x18\x01 \x03(\x0c\x12\x0b\n\x03\x64im\x18\x02 \x01(\x03\"\xef\x01\n\x0bVectorField\x12\x0b\n\x03\x64im\x18\x01 \x01(\x03\x12\x37\n\x0c\x66loat_vector\x18\x02 \x01(\x0b\x32\x1f.milvus.proto.schema.FloatArrayH\x00\x12\x17\n\rbinary_vector\x18\x03 \x01(\x0cH\x00\x12\x18\n\x0e\x66loat16_vector\x18\x04 \x01(\x0cH\x00\x12\x19\n\x0f\x62\x66loat16_vector\x18\x05 \x01(\x0cH\x00\x12\x44\n\x13sparse_float_vector\x18\x06 \x01(\x0b\x32%.milvus.proto.schema.SparseFloatArrayH\x00\x42\x06\n\x04\x64\x61ta\"\xe5\x01\n\tFieldData\x12+\n\x04type\x18\x01 \x01(\x0e\x32\x1d.milvus.proto.schema.DataType\x12\x12\n\nfield_name\x18\x02 \x01(\t\x12\x33\n\x07scalars\x18\x03 \x01(\x0b\x32 .milvus.proto.schema.ScalarFieldH\x00\x12\x33\n\x07vectors\x18\x04 \x01(\x0b\x32 .milvus.proto.schema.VectorFieldH\x00\x12\x10\n\x08\x66ield_id\x18\x05 \x01(\x03\x12\x12\n\nis_dynamic\x18\x06 \x01(\x08\x42\x07\n\x05\x66ield\"w\n\x03IDs\x12\x30\n\x06int_id\x18\x01 \x01(\x0b\x32\x1e.milvus.proto.schema.LongArrayH\x00\x12\x32\n\x06str_id\x18\x02 \x01(\x0b\x32 .milvus.proto.schema.StringArrayH\x00\x42\n\n\x08id_field\"\xb3\x02\n\x10SearchResultData\x12\x13\n\x0bnum_queries\x18\x01 \x01(\x03\x12\r\n\x05top_k\x18\x02 \x01(\x03\x12\x33\n\x0b\x66ields_data\x18\x03 \x03(\x0b\x32\x1e.milvus.proto.schema.FieldData\x12\x0e\n\x06scores\x18\x04 \x03(\x02\x12%\n\x03ids\x18\x05 \x01(\x0b\x32\x18.milvus.proto.schema.IDs\x12\r\n\x05topks\x18\x06 \x03(\x03\x12\x15\n\routput_fields\x18\x07 \x03(\t\x12<\n\x14group_by_field_value\x18\x08 \x01(\x0b\x32\x1e.milvus.proto.schema.FieldData\x12\x18\n\x10\x61ll_search_count\x18\t \x01(\x03\x12\x11\n\tdistances\x18\n \x03(\x02\"Y\n\x14VectorClusteringInfo\x12\r\n\x05\x66ield\x18\x01 \x01(\t\x12\x32\n\x08\x63\x65ntroid\x18\x02 \x01(\x0b\x32 .milvus.proto.schema.VectorField\"%\n\x14ScalarClusteringInfo\x12\r\n\x05\x66ield\x18\x01 \x01(\t\"\xa8\x01\n\x0e\x43lusteringInfo\x12J\n\x17vector_clustering_infos\x18\x01 \x03(\x0b\x32).milvus.proto.schema.VectorClusteringInfo\x12J\n\x17scalar_clustering_infos\x18\x02 \x03(\x0b\x32).milvus.proto.schema.ScalarClusteringInfo*\xef\x01\n\x08\x44\x61taType\x12\x08\n\x04None\x10\x00\x12\x08\n\x04\x42ool\x10\x01\x12\x08\n\x04Int8\x10\x02\x12\t\n\x05Int16\x10\x03\x12\t\n\x05Int32\x10\x04\x12\t\n\x05Int64\x10\x05\x12\t\n\x05\x46loat\x10\n\x12\n\n\x06\x44ouble\x10\x0b\x12\n\n\x06String\x10\x14\x12\x0b\n\x07VarChar\x10\x15\x12\t\n\x05\x41rray\x10\x16\x12\x08\n\x04JSON\x10\x17\x12\x10\n\x0c\x42inaryVector\x10\x64\x12\x0f\n\x0b\x46loatVector\x10\x65\x12\x11\n\rFloat16Vector\x10\x66\x12\x12\n\x0e\x42\x46loat16Vector\x10g\x12\x15\n\x11SparseFloatVector\x10h*V\n\nFieldState\x12\x10\n\x0c\x46ieldCreated\x10\x00\x12\x11\n\rFieldCreating\x10\x01\x12\x11\n\rFieldDropping\x10\x02\x12\x10\n\x0c\x46ieldDropped\x10\x03\x42m\n\x0eio.milvus.grpcB\x0bSchemaProtoP\x01Z4github.com/milvus-io/milvus-proto/go-api/v2/schemapb\xa0\x01\x01\xaa\x02\x12Milvus.Client.Grpcb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0cschema.proto\x12\x13milvus.proto.schema\x1a\x0c\x63ommon.proto\x1a google/protobuf/descriptor.proto\"\xa0\x04\n\x0b\x46ieldSchema\x12\x0f\n\x07\x66ieldID\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x16\n\x0eis_primary_key\x18\x03 \x01(\x08\x12\x13\n\x0b\x64\x65scription\x18\x04 \x01(\t\x12\x30\n\tdata_type\x18\x05 \x01(\x0e\x32\x1d.milvus.proto.schema.DataType\x12\x36\n\x0btype_params\x18\x06 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\x37\n\x0cindex_params\x18\x07 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\x0e\n\x06\x61utoID\x18\x08 \x01(\x08\x12.\n\x05state\x18\t \x01(\x0e\x32\x1f.milvus.proto.schema.FieldState\x12\x33\n\x0c\x65lement_type\x18\n \x01(\x0e\x32\x1d.milvus.proto.schema.DataType\x12\x36\n\rdefault_value\x18\x0b \x01(\x0b\x32\x1f.milvus.proto.schema.ValueField\x12\x12\n\nis_dynamic\x18\x0c \x01(\x08\x12\x18\n\x10is_partition_key\x18\r \x01(\x08\x12\x19\n\x11is_clustering_key\x18\x0e \x01(\x08\x12\x10\n\x08nullable\x18\x0f \x01(\x08\x12\x1a\n\x12is_function_output\x18\x10 \x01(\x08\"\x8d\x02\n\x0e\x46unctionSchema\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\x03\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12/\n\x04type\x18\x04 \x01(\x0e\x32!.milvus.proto.schema.FunctionType\x12\x19\n\x11input_field_names\x18\x05 \x03(\t\x12\x17\n\x0finput_field_ids\x18\x06 \x03(\x03\x12\x1a\n\x12output_field_names\x18\x07 \x03(\t\x12\x18\n\x10output_field_ids\x18\x08 \x03(\x03\x12\x31\n\x06params\x18\t \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\"\x88\x02\n\x10\x43ollectionSchema\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x12\n\x06\x61utoID\x18\x03 \x01(\x08\x42\x02\x18\x01\x12\x30\n\x06\x66ields\x18\x04 \x03(\x0b\x32 .milvus.proto.schema.FieldSchema\x12\x1c\n\x14\x65nable_dynamic_field\x18\x05 \x01(\x08\x12\x35\n\nproperties\x18\x06 \x03(\x0b\x32!.milvus.proto.common.KeyValuePair\x12\x36\n\tfunctions\x18\x07 \x03(\x0b\x32#.milvus.proto.schema.FunctionSchema\"\x19\n\tBoolArray\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x08\"\x18\n\x08IntArray\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x05\"\x19\n\tLongArray\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x03\"\x1a\n\nFloatArray\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x02\"\x1b\n\x0b\x44oubleArray\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x01\"\x1a\n\nBytesArray\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x0c\"\x1b\n\x0bStringArray\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\t\"q\n\nArrayArray\x12.\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32 .milvus.proto.schema.ScalarField\x12\x33\n\x0c\x65lement_type\x18\x02 \x01(\x0e\x32\x1d.milvus.proto.schema.DataType\"\x19\n\tJSONArray\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x0c\"\x1d\n\rGeometryArray\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x0c\"\xac\x01\n\nValueField\x12\x13\n\tbool_data\x18\x01 \x01(\x08H\x00\x12\x12\n\x08int_data\x18\x02 \x01(\x05H\x00\x12\x13\n\tlong_data\x18\x03 \x01(\x03H\x00\x12\x14\n\nfloat_data\x18\x04 \x01(\x02H\x00\x12\x15\n\x0b\x64ouble_data\x18\x05 \x01(\x01H\x00\x12\x15\n\x0bstring_data\x18\x06 \x01(\tH\x00\x12\x14\n\nbytes_data\x18\x07 \x01(\x0cH\x00\x42\x06\n\x04\x64\x61ta\"\xbb\x04\n\x0bScalarField\x12\x33\n\tbool_data\x18\x01 \x01(\x0b\x32\x1e.milvus.proto.schema.BoolArrayH\x00\x12\x31\n\x08int_data\x18\x02 \x01(\x0b\x32\x1d.milvus.proto.schema.IntArrayH\x00\x12\x33\n\tlong_data\x18\x03 \x01(\x0b\x32\x1e.milvus.proto.schema.LongArrayH\x00\x12\x35\n\nfloat_data\x18\x04 \x01(\x0b\x32\x1f.milvus.proto.schema.FloatArrayH\x00\x12\x37\n\x0b\x64ouble_data\x18\x05 \x01(\x0b\x32 .milvus.proto.schema.DoubleArrayH\x00\x12\x37\n\x0bstring_data\x18\x06 \x01(\x0b\x32 .milvus.proto.schema.StringArrayH\x00\x12\x35\n\nbytes_data\x18\x07 \x01(\x0b\x32\x1f.milvus.proto.schema.BytesArrayH\x00\x12\x35\n\narray_data\x18\x08 \x01(\x0b\x32\x1f.milvus.proto.schema.ArrayArrayH\x00\x12\x33\n\tjson_data\x18\t \x01(\x0b\x32\x1e.milvus.proto.schema.JSONArrayH\x00\x12;\n\rgeometry_data\x18\n \x01(\x0b\x32\".milvus.proto.schema.GeometryArrayH\x00\x42\x06\n\x04\x64\x61ta\"1\n\x10SparseFloatArray\x12\x10\n\x08\x63ontents\x18\x01 \x03(\x0c\x12\x0b\n\x03\x64im\x18\x02 \x01(\x03\"\xef\x01\n\x0bVectorField\x12\x0b\n\x03\x64im\x18\x01 \x01(\x03\x12\x37\n\x0c\x66loat_vector\x18\x02 \x01(\x0b\x32\x1f.milvus.proto.schema.FloatArrayH\x00\x12\x17\n\rbinary_vector\x18\x03 \x01(\x0cH\x00\x12\x18\n\x0e\x66loat16_vector\x18\x04 \x01(\x0cH\x00\x12\x19\n\x0f\x62\x66loat16_vector\x18\x05 \x01(\x0cH\x00\x12\x44\n\x13sparse_float_vector\x18\x06 \x01(\x0b\x32%.milvus.proto.schema.SparseFloatArrayH\x00\x42\x06\n\x04\x64\x61ta\"\xf9\x01\n\tFieldData\x12+\n\x04type\x18\x01 \x01(\x0e\x32\x1d.milvus.proto.schema.DataType\x12\x12\n\nfield_name\x18\x02 \x01(\t\x12\x33\n\x07scalars\x18\x03 \x01(\x0b\x32 .milvus.proto.schema.ScalarFieldH\x00\x12\x33\n\x07vectors\x18\x04 \x01(\x0b\x32 .milvus.proto.schema.VectorFieldH\x00\x12\x10\n\x08\x66ield_id\x18\x05 \x01(\x03\x12\x12\n\nis_dynamic\x18\x06 \x01(\x08\x12\x12\n\nvalid_data\x18\x07 \x03(\x08\x42\x07\n\x05\x66ield\"w\n\x03IDs\x12\x30\n\x06int_id\x18\x01 \x01(\x0b\x32\x1e.milvus.proto.schema.LongArrayH\x00\x12\x32\n\x06str_id\x18\x02 \x01(\x0b\x32 .milvus.proto.schema.StringArrayH\x00\x42\n\n\x08id_field\"<\n\x17SearchIteratorV2Results\x12\r\n\x05token\x18\x01 \x01(\t\x12\x12\n\nlast_bound\x18\x02 \x01(\x02\"\xa9\x03\n\x10SearchResultData\x12\x13\n\x0bnum_queries\x18\x01 \x01(\x03\x12\r\n\x05top_k\x18\x02 \x01(\x03\x12\x33\n\x0b\x66ields_data\x18\x03 \x03(\x0b\x32\x1e.milvus.proto.schema.FieldData\x12\x0e\n\x06scores\x18\x04 \x03(\x02\x12%\n\x03ids\x18\x05 \x01(\x0b\x32\x18.milvus.proto.schema.IDs\x12\r\n\x05topks\x18\x06 \x03(\x03\x12\x15\n\routput_fields\x18\x07 \x03(\t\x12<\n\x14group_by_field_value\x18\x08 \x01(\x0b\x32\x1e.milvus.proto.schema.FieldData\x12\x18\n\x10\x61ll_search_count\x18\t \x01(\x03\x12\x11\n\tdistances\x18\n \x03(\x02\x12U\n\x1asearch_iterator_v2_results\x18\x0b \x01(\x0b\x32,.milvus.proto.schema.SearchIteratorV2ResultsH\x00\x88\x01\x01\x42\x1d\n\x1b_search_iterator_v2_results\"Y\n\x14VectorClusteringInfo\x12\r\n\x05\x66ield\x18\x01 \x01(\t\x12\x32\n\x08\x63\x65ntroid\x18\x02 \x01(\x0b\x32 .milvus.proto.schema.VectorField\"%\n\x14ScalarClusteringInfo\x12\r\n\x05\x66ield\x18\x01 \x01(\t\"\xa8\x01\n\x0e\x43lusteringInfo\x12J\n\x17vector_clustering_infos\x18\x01 \x03(\x0b\x32).milvus.proto.schema.VectorClusteringInfo\x12J\n\x17scalar_clustering_infos\x18\x02 \x03(\x0b\x32).milvus.proto.schema.ScalarClusteringInfo\"\xa8\x01\n\rTemplateValue\x12\x12\n\x08\x62ool_val\x18\x01 \x01(\x08H\x00\x12\x13\n\tint64_val\x18\x02 \x01(\x03H\x00\x12\x13\n\tfloat_val\x18\x03 \x01(\x01H\x00\x12\x14\n\nstring_val\x18\x04 \x01(\tH\x00\x12<\n\tarray_val\x18\x05 \x01(\x0b\x32\'.milvus.proto.schema.TemplateArrayValueH\x00\x42\x05\n\x03val\"\xf1\x02\n\x12TemplateArrayValue\x12\x33\n\tbool_data\x18\x01 \x01(\x0b\x32\x1e.milvus.proto.schema.BoolArrayH\x00\x12\x33\n\tlong_data\x18\x02 \x01(\x0b\x32\x1e.milvus.proto.schema.LongArrayH\x00\x12\x37\n\x0b\x64ouble_data\x18\x03 \x01(\x0b\x32 .milvus.proto.schema.DoubleArrayH\x00\x12\x37\n\x0bstring_data\x18\x04 \x01(\x0b\x32 .milvus.proto.schema.StringArrayH\x00\x12\x42\n\narray_data\x18\x05 \x01(\x0b\x32,.milvus.proto.schema.TemplateArrayValueArrayH\x00\x12\x33\n\tjson_data\x18\x06 \x01(\x0b\x32\x1e.milvus.proto.schema.JSONArrayH\x00\x42\x06\n\x04\x64\x61ta\"P\n\x17TemplateArrayValueArray\x12\x35\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32\'.milvus.proto.schema.TemplateArrayValue*\xfd\x01\n\x08\x44\x61taType\x12\x08\n\x04None\x10\x00\x12\x08\n\x04\x42ool\x10\x01\x12\x08\n\x04Int8\x10\x02\x12\t\n\x05Int16\x10\x03\x12\t\n\x05Int32\x10\x04\x12\t\n\x05Int64\x10\x05\x12\t\n\x05\x46loat\x10\n\x12\n\n\x06\x44ouble\x10\x0b\x12\n\n\x06String\x10\x14\x12\x0b\n\x07VarChar\x10\x15\x12\t\n\x05\x41rray\x10\x16\x12\x08\n\x04JSON\x10\x17\x12\x0c\n\x08Geometry\x10\x18\x12\x10\n\x0c\x42inaryVector\x10\x64\x12\x0f\n\x0b\x46loatVector\x10\x65\x12\x11\n\rFloat16Vector\x10\x66\x12\x12\n\x0e\x42\x46loat16Vector\x10g\x12\x15\n\x11SparseFloatVector\x10h*8\n\x0c\x46unctionType\x12\x0b\n\x07Unknown\x10\x00\x12\x08\n\x04\x42M25\x10\x01\x12\x11\n\rTextEmbedding\x10\x02*V\n\nFieldState\x12\x10\n\x0c\x46ieldCreated\x10\x00\x12\x11\n\rFieldCreating\x10\x01\x12\x11\n\rFieldDropping\x10\x02\x12\x10\n\x0c\x46ieldDropped\x10\x03\x42m\n\x0eio.milvus.grpcB\x0bSchemaProtoP\x01Z4github.com/milvus-io/milvus-proto/go-api/v2/schemapb\xa0\x01\x01\xaa\x02\x12Milvus.Client.Grpcb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -26,50 +26,64 @@ _globals['DESCRIPTOR']._serialized_options = b'\n\016io.milvus.grpcB\013SchemaProtoP\001Z4github.com/milvus-io/milvus-proto/go-api/v2/schemapb\240\001\001\252\002\022Milvus.Client.Grpc' _globals['_COLLECTIONSCHEMA'].fields_by_name['autoID']._options = None _globals['_COLLECTIONSCHEMA'].fields_by_name['autoID']._serialized_options = b'\030\001' - _globals['_DATATYPE']._serialized_start=3079 - _globals['_DATATYPE']._serialized_end=3318 - _globals['_FIELDSTATE']._serialized_start=3320 - _globals['_FIELDSTATE']._serialized_end=3406 + _globals['_DATATYPE']._serialized_start=4370 + _globals['_DATATYPE']._serialized_end=4623 + _globals['_FUNCTIONTYPE']._serialized_start=4625 + _globals['_FUNCTIONTYPE']._serialized_end=4681 + _globals['_FIELDSTATE']._serialized_start=4683 + _globals['_FIELDSTATE']._serialized_end=4769 _globals['_FIELDSCHEMA']._serialized_start=86 - _globals['_FIELDSCHEMA']._serialized_end=584 - _globals['_COLLECTIONSCHEMA']._serialized_start=587 - _globals['_COLLECTIONSCHEMA']._serialized_end=795 - _globals['_BOOLARRAY']._serialized_start=797 - _globals['_BOOLARRAY']._serialized_end=822 - _globals['_INTARRAY']._serialized_start=824 - _globals['_INTARRAY']._serialized_end=848 - _globals['_LONGARRAY']._serialized_start=850 - _globals['_LONGARRAY']._serialized_end=875 - _globals['_FLOATARRAY']._serialized_start=877 - _globals['_FLOATARRAY']._serialized_end=903 - _globals['_DOUBLEARRAY']._serialized_start=905 - _globals['_DOUBLEARRAY']._serialized_end=932 - _globals['_BYTESARRAY']._serialized_start=934 - _globals['_BYTESARRAY']._serialized_end=960 - _globals['_STRINGARRAY']._serialized_start=962 - _globals['_STRINGARRAY']._serialized_end=989 - _globals['_ARRAYARRAY']._serialized_start=991 - _globals['_ARRAYARRAY']._serialized_end=1104 - _globals['_JSONARRAY']._serialized_start=1106 - _globals['_JSONARRAY']._serialized_end=1131 - _globals['_VALUEFIELD']._serialized_start=1134 - _globals['_VALUEFIELD']._serialized_end=1306 - _globals['_SCALARFIELD']._serialized_start=1309 - _globals['_SCALARFIELD']._serialized_end=1819 - _globals['_SPARSEFLOATARRAY']._serialized_start=1821 - _globals['_SPARSEFLOATARRAY']._serialized_end=1870 - _globals['_VECTORFIELD']._serialized_start=1873 - _globals['_VECTORFIELD']._serialized_end=2112 - _globals['_FIELDDATA']._serialized_start=2115 - _globals['_FIELDDATA']._serialized_end=2344 - _globals['_IDS']._serialized_start=2346 - _globals['_IDS']._serialized_end=2465 - _globals['_SEARCHRESULTDATA']._serialized_start=2468 - _globals['_SEARCHRESULTDATA']._serialized_end=2775 - _globals['_VECTORCLUSTERINGINFO']._serialized_start=2777 - _globals['_VECTORCLUSTERINGINFO']._serialized_end=2866 - _globals['_SCALARCLUSTERINGINFO']._serialized_start=2868 - _globals['_SCALARCLUSTERINGINFO']._serialized_end=2905 - _globals['_CLUSTERINGINFO']._serialized_start=2908 - _globals['_CLUSTERINGINFO']._serialized_end=3076 + _globals['_FIELDSCHEMA']._serialized_end=630 + _globals['_FUNCTIONSCHEMA']._serialized_start=633 + _globals['_FUNCTIONSCHEMA']._serialized_end=902 + _globals['_COLLECTIONSCHEMA']._serialized_start=905 + _globals['_COLLECTIONSCHEMA']._serialized_end=1169 + _globals['_BOOLARRAY']._serialized_start=1171 + _globals['_BOOLARRAY']._serialized_end=1196 + _globals['_INTARRAY']._serialized_start=1198 + _globals['_INTARRAY']._serialized_end=1222 + _globals['_LONGARRAY']._serialized_start=1224 + _globals['_LONGARRAY']._serialized_end=1249 + _globals['_FLOATARRAY']._serialized_start=1251 + _globals['_FLOATARRAY']._serialized_end=1277 + _globals['_DOUBLEARRAY']._serialized_start=1279 + _globals['_DOUBLEARRAY']._serialized_end=1306 + _globals['_BYTESARRAY']._serialized_start=1308 + _globals['_BYTESARRAY']._serialized_end=1334 + _globals['_STRINGARRAY']._serialized_start=1336 + _globals['_STRINGARRAY']._serialized_end=1363 + _globals['_ARRAYARRAY']._serialized_start=1365 + _globals['_ARRAYARRAY']._serialized_end=1478 + _globals['_JSONARRAY']._serialized_start=1480 + _globals['_JSONARRAY']._serialized_end=1505 + _globals['_GEOMETRYARRAY']._serialized_start=1507 + _globals['_GEOMETRYARRAY']._serialized_end=1536 + _globals['_VALUEFIELD']._serialized_start=1539 + _globals['_VALUEFIELD']._serialized_end=1711 + _globals['_SCALARFIELD']._serialized_start=1714 + _globals['_SCALARFIELD']._serialized_end=2285 + _globals['_SPARSEFLOATARRAY']._serialized_start=2287 + _globals['_SPARSEFLOATARRAY']._serialized_end=2336 + _globals['_VECTORFIELD']._serialized_start=2339 + _globals['_VECTORFIELD']._serialized_end=2578 + _globals['_FIELDDATA']._serialized_start=2581 + _globals['_FIELDDATA']._serialized_end=2830 + _globals['_IDS']._serialized_start=2832 + _globals['_IDS']._serialized_end=2951 + _globals['_SEARCHITERATORV2RESULTS']._serialized_start=2953 + _globals['_SEARCHITERATORV2RESULTS']._serialized_end=3013 + _globals['_SEARCHRESULTDATA']._serialized_start=3016 + _globals['_SEARCHRESULTDATA']._serialized_end=3441 + _globals['_VECTORCLUSTERINGINFO']._serialized_start=3443 + _globals['_VECTORCLUSTERINGINFO']._serialized_end=3532 + _globals['_SCALARCLUSTERINGINFO']._serialized_start=3534 + _globals['_SCALARCLUSTERINGINFO']._serialized_end=3571 + _globals['_CLUSTERINGINFO']._serialized_start=3574 + _globals['_CLUSTERINGINFO']._serialized_end=3742 + _globals['_TEMPLATEVALUE']._serialized_start=3745 + _globals['_TEMPLATEVALUE']._serialized_end=3913 + _globals['_TEMPLATEARRAYVALUE']._serialized_start=3916 + _globals['_TEMPLATEARRAYVALUE']._serialized_end=4285 + _globals['_TEMPLATEARRAYVALUEARRAY']._serialized_start=4287 + _globals['_TEMPLATEARRAYVALUEARRAY']._serialized_end=4367 # @@protoc_insertion_point(module_scope) diff --git a/pymilvus/grpc_gen/schema_pb2.pyi b/pymilvus/grpc_gen/schema_pb2.pyi index 26b754dee..85be5eb3d 100644 --- a/pymilvus/grpc_gen/schema_pb2.pyi +++ b/pymilvus/grpc_gen/schema_pb2.pyi @@ -22,12 +22,19 @@ class DataType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): VarChar: _ClassVar[DataType] Array: _ClassVar[DataType] JSON: _ClassVar[DataType] + Geometry: _ClassVar[DataType] BinaryVector: _ClassVar[DataType] FloatVector: _ClassVar[DataType] Float16Vector: _ClassVar[DataType] BFloat16Vector: _ClassVar[DataType] SparseFloatVector: _ClassVar[DataType] +class FunctionType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + Unknown: _ClassVar[FunctionType] + BM25: _ClassVar[FunctionType] + TextEmbedding: _ClassVar[FunctionType] + class FieldState(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): __slots__ = () FieldCreated: _ClassVar[FieldState] @@ -46,18 +53,22 @@ String: DataType VarChar: DataType Array: DataType JSON: DataType +Geometry: DataType BinaryVector: DataType FloatVector: DataType Float16Vector: DataType BFloat16Vector: DataType SparseFloatVector: DataType +Unknown: FunctionType +BM25: FunctionType +TextEmbedding: FunctionType FieldCreated: FieldState FieldCreating: FieldState FieldDropping: FieldState FieldDropped: FieldState class FieldSchema(_message.Message): - __slots__ = ("fieldID", "name", "is_primary_key", "description", "data_type", "type_params", "index_params", "autoID", "state", "element_type", "default_value", "is_dynamic", "is_partition_key", "is_clustering_key") + __slots__ = ("fieldID", "name", "is_primary_key", "description", "data_type", "type_params", "index_params", "autoID", "state", "element_type", "default_value", "is_dynamic", "is_partition_key", "is_clustering_key", "nullable", "is_function_output") FIELDID_FIELD_NUMBER: _ClassVar[int] NAME_FIELD_NUMBER: _ClassVar[int] IS_PRIMARY_KEY_FIELD_NUMBER: _ClassVar[int] @@ -72,6 +83,8 @@ class FieldSchema(_message.Message): IS_DYNAMIC_FIELD_NUMBER: _ClassVar[int] IS_PARTITION_KEY_FIELD_NUMBER: _ClassVar[int] IS_CLUSTERING_KEY_FIELD_NUMBER: _ClassVar[int] + NULLABLE_FIELD_NUMBER: _ClassVar[int] + IS_FUNCTION_OUTPUT_FIELD_NUMBER: _ClassVar[int] fieldID: int name: str is_primary_key: bool @@ -86,23 +99,49 @@ class FieldSchema(_message.Message): is_dynamic: bool is_partition_key: bool is_clustering_key: bool - def __init__(self, fieldID: _Optional[int] = ..., name: _Optional[str] = ..., is_primary_key: bool = ..., description: _Optional[str] = ..., data_type: _Optional[_Union[DataType, str]] = ..., type_params: _Optional[_Iterable[_Union[_common_pb2.KeyValuePair, _Mapping]]] = ..., index_params: _Optional[_Iterable[_Union[_common_pb2.KeyValuePair, _Mapping]]] = ..., autoID: bool = ..., state: _Optional[_Union[FieldState, str]] = ..., element_type: _Optional[_Union[DataType, str]] = ..., default_value: _Optional[_Union[ValueField, _Mapping]] = ..., is_dynamic: bool = ..., is_partition_key: bool = ..., is_clustering_key: bool = ...) -> None: ... + nullable: bool + is_function_output: bool + def __init__(self, fieldID: _Optional[int] = ..., name: _Optional[str] = ..., is_primary_key: bool = ..., description: _Optional[str] = ..., data_type: _Optional[_Union[DataType, str]] = ..., type_params: _Optional[_Iterable[_Union[_common_pb2.KeyValuePair, _Mapping]]] = ..., index_params: _Optional[_Iterable[_Union[_common_pb2.KeyValuePair, _Mapping]]] = ..., autoID: bool = ..., state: _Optional[_Union[FieldState, str]] = ..., element_type: _Optional[_Union[DataType, str]] = ..., default_value: _Optional[_Union[ValueField, _Mapping]] = ..., is_dynamic: bool = ..., is_partition_key: bool = ..., is_clustering_key: bool = ..., nullable: bool = ..., is_function_output: bool = ...) -> None: ... + +class FunctionSchema(_message.Message): + __slots__ = ("name", "id", "description", "type", "input_field_names", "input_field_ids", "output_field_names", "output_field_ids", "params") + NAME_FIELD_NUMBER: _ClassVar[int] + ID_FIELD_NUMBER: _ClassVar[int] + DESCRIPTION_FIELD_NUMBER: _ClassVar[int] + TYPE_FIELD_NUMBER: _ClassVar[int] + INPUT_FIELD_NAMES_FIELD_NUMBER: _ClassVar[int] + INPUT_FIELD_IDS_FIELD_NUMBER: _ClassVar[int] + OUTPUT_FIELD_NAMES_FIELD_NUMBER: _ClassVar[int] + OUTPUT_FIELD_IDS_FIELD_NUMBER: _ClassVar[int] + PARAMS_FIELD_NUMBER: _ClassVar[int] + name: str + id: int + description: str + type: FunctionType + input_field_names: _containers.RepeatedScalarFieldContainer[str] + input_field_ids: _containers.RepeatedScalarFieldContainer[int] + output_field_names: _containers.RepeatedScalarFieldContainer[str] + output_field_ids: _containers.RepeatedScalarFieldContainer[int] + params: _containers.RepeatedCompositeFieldContainer[_common_pb2.KeyValuePair] + def __init__(self, name: _Optional[str] = ..., id: _Optional[int] = ..., description: _Optional[str] = ..., type: _Optional[_Union[FunctionType, str]] = ..., input_field_names: _Optional[_Iterable[str]] = ..., input_field_ids: _Optional[_Iterable[int]] = ..., output_field_names: _Optional[_Iterable[str]] = ..., output_field_ids: _Optional[_Iterable[int]] = ..., params: _Optional[_Iterable[_Union[_common_pb2.KeyValuePair, _Mapping]]] = ...) -> None: ... class CollectionSchema(_message.Message): - __slots__ = ("name", "description", "autoID", "fields", "enable_dynamic_field", "properties") + __slots__ = ("name", "description", "autoID", "fields", "enable_dynamic_field", "properties", "functions") NAME_FIELD_NUMBER: _ClassVar[int] DESCRIPTION_FIELD_NUMBER: _ClassVar[int] AUTOID_FIELD_NUMBER: _ClassVar[int] FIELDS_FIELD_NUMBER: _ClassVar[int] ENABLE_DYNAMIC_FIELD_FIELD_NUMBER: _ClassVar[int] PROPERTIES_FIELD_NUMBER: _ClassVar[int] + FUNCTIONS_FIELD_NUMBER: _ClassVar[int] name: str description: str autoID: bool fields: _containers.RepeatedCompositeFieldContainer[FieldSchema] enable_dynamic_field: bool properties: _containers.RepeatedCompositeFieldContainer[_common_pb2.KeyValuePair] - def __init__(self, name: _Optional[str] = ..., description: _Optional[str] = ..., autoID: bool = ..., fields: _Optional[_Iterable[_Union[FieldSchema, _Mapping]]] = ..., enable_dynamic_field: bool = ..., properties: _Optional[_Iterable[_Union[_common_pb2.KeyValuePair, _Mapping]]] = ...) -> None: ... + functions: _containers.RepeatedCompositeFieldContainer[FunctionSchema] + def __init__(self, name: _Optional[str] = ..., description: _Optional[str] = ..., autoID: bool = ..., fields: _Optional[_Iterable[_Union[FieldSchema, _Mapping]]] = ..., enable_dynamic_field: bool = ..., properties: _Optional[_Iterable[_Union[_common_pb2.KeyValuePair, _Mapping]]] = ..., functions: _Optional[_Iterable[_Union[FunctionSchema, _Mapping]]] = ...) -> None: ... class BoolArray(_message.Message): __slots__ = ("data",) @@ -160,6 +199,12 @@ class JSONArray(_message.Message): data: _containers.RepeatedScalarFieldContainer[bytes] def __init__(self, data: _Optional[_Iterable[bytes]] = ...) -> None: ... +class GeometryArray(_message.Message): + __slots__ = ("data",) + DATA_FIELD_NUMBER: _ClassVar[int] + data: _containers.RepeatedScalarFieldContainer[bytes] + def __init__(self, data: _Optional[_Iterable[bytes]] = ...) -> None: ... + class ValueField(_message.Message): __slots__ = ("bool_data", "int_data", "long_data", "float_data", "double_data", "string_data", "bytes_data") BOOL_DATA_FIELD_NUMBER: _ClassVar[int] @@ -179,7 +224,7 @@ class ValueField(_message.Message): def __init__(self, bool_data: bool = ..., int_data: _Optional[int] = ..., long_data: _Optional[int] = ..., float_data: _Optional[float] = ..., double_data: _Optional[float] = ..., string_data: _Optional[str] = ..., bytes_data: _Optional[bytes] = ...) -> None: ... class ScalarField(_message.Message): - __slots__ = ("bool_data", "int_data", "long_data", "float_data", "double_data", "string_data", "bytes_data", "array_data", "json_data") + __slots__ = ("bool_data", "int_data", "long_data", "float_data", "double_data", "string_data", "bytes_data", "array_data", "json_data", "geometry_data") BOOL_DATA_FIELD_NUMBER: _ClassVar[int] INT_DATA_FIELD_NUMBER: _ClassVar[int] LONG_DATA_FIELD_NUMBER: _ClassVar[int] @@ -189,6 +234,7 @@ class ScalarField(_message.Message): BYTES_DATA_FIELD_NUMBER: _ClassVar[int] ARRAY_DATA_FIELD_NUMBER: _ClassVar[int] JSON_DATA_FIELD_NUMBER: _ClassVar[int] + GEOMETRY_DATA_FIELD_NUMBER: _ClassVar[int] bool_data: BoolArray int_data: IntArray long_data: LongArray @@ -198,7 +244,8 @@ class ScalarField(_message.Message): bytes_data: BytesArray array_data: ArrayArray json_data: JSONArray - def __init__(self, bool_data: _Optional[_Union[BoolArray, _Mapping]] = ..., int_data: _Optional[_Union[IntArray, _Mapping]] = ..., long_data: _Optional[_Union[LongArray, _Mapping]] = ..., float_data: _Optional[_Union[FloatArray, _Mapping]] = ..., double_data: _Optional[_Union[DoubleArray, _Mapping]] = ..., string_data: _Optional[_Union[StringArray, _Mapping]] = ..., bytes_data: _Optional[_Union[BytesArray, _Mapping]] = ..., array_data: _Optional[_Union[ArrayArray, _Mapping]] = ..., json_data: _Optional[_Union[JSONArray, _Mapping]] = ...) -> None: ... + geometry_data: GeometryArray + def __init__(self, bool_data: _Optional[_Union[BoolArray, _Mapping]] = ..., int_data: _Optional[_Union[IntArray, _Mapping]] = ..., long_data: _Optional[_Union[LongArray, _Mapping]] = ..., float_data: _Optional[_Union[FloatArray, _Mapping]] = ..., double_data: _Optional[_Union[DoubleArray, _Mapping]] = ..., string_data: _Optional[_Union[StringArray, _Mapping]] = ..., bytes_data: _Optional[_Union[BytesArray, _Mapping]] = ..., array_data: _Optional[_Union[ArrayArray, _Mapping]] = ..., json_data: _Optional[_Union[JSONArray, _Mapping]] = ..., geometry_data: _Optional[_Union[GeometryArray, _Mapping]] = ...) -> None: ... class SparseFloatArray(_message.Message): __slots__ = ("contents", "dim") @@ -225,20 +272,22 @@ class VectorField(_message.Message): def __init__(self, dim: _Optional[int] = ..., float_vector: _Optional[_Union[FloatArray, _Mapping]] = ..., binary_vector: _Optional[bytes] = ..., float16_vector: _Optional[bytes] = ..., bfloat16_vector: _Optional[bytes] = ..., sparse_float_vector: _Optional[_Union[SparseFloatArray, _Mapping]] = ...) -> None: ... class FieldData(_message.Message): - __slots__ = ("type", "field_name", "scalars", "vectors", "field_id", "is_dynamic") + __slots__ = ("type", "field_name", "scalars", "vectors", "field_id", "is_dynamic", "valid_data") TYPE_FIELD_NUMBER: _ClassVar[int] FIELD_NAME_FIELD_NUMBER: _ClassVar[int] SCALARS_FIELD_NUMBER: _ClassVar[int] VECTORS_FIELD_NUMBER: _ClassVar[int] FIELD_ID_FIELD_NUMBER: _ClassVar[int] IS_DYNAMIC_FIELD_NUMBER: _ClassVar[int] + VALID_DATA_FIELD_NUMBER: _ClassVar[int] type: DataType field_name: str scalars: ScalarField vectors: VectorField field_id: int is_dynamic: bool - def __init__(self, type: _Optional[_Union[DataType, str]] = ..., field_name: _Optional[str] = ..., scalars: _Optional[_Union[ScalarField, _Mapping]] = ..., vectors: _Optional[_Union[VectorField, _Mapping]] = ..., field_id: _Optional[int] = ..., is_dynamic: bool = ...) -> None: ... + valid_data: _containers.RepeatedScalarFieldContainer[bool] + def __init__(self, type: _Optional[_Union[DataType, str]] = ..., field_name: _Optional[str] = ..., scalars: _Optional[_Union[ScalarField, _Mapping]] = ..., vectors: _Optional[_Union[VectorField, _Mapping]] = ..., field_id: _Optional[int] = ..., is_dynamic: bool = ..., valid_data: _Optional[_Iterable[bool]] = ...) -> None: ... class IDs(_message.Message): __slots__ = ("int_id", "str_id") @@ -248,8 +297,16 @@ class IDs(_message.Message): str_id: StringArray def __init__(self, int_id: _Optional[_Union[LongArray, _Mapping]] = ..., str_id: _Optional[_Union[StringArray, _Mapping]] = ...) -> None: ... +class SearchIteratorV2Results(_message.Message): + __slots__ = ("token", "last_bound") + TOKEN_FIELD_NUMBER: _ClassVar[int] + LAST_BOUND_FIELD_NUMBER: _ClassVar[int] + token: str + last_bound: float + def __init__(self, token: _Optional[str] = ..., last_bound: _Optional[float] = ...) -> None: ... + class SearchResultData(_message.Message): - __slots__ = ("num_queries", "top_k", "fields_data", "scores", "ids", "topks", "output_fields", "group_by_field_value", "all_search_count", "distances") + __slots__ = ("num_queries", "top_k", "fields_data", "scores", "ids", "topks", "output_fields", "group_by_field_value", "all_search_count", "distances", "search_iterator_v2_results") NUM_QUERIES_FIELD_NUMBER: _ClassVar[int] TOP_K_FIELD_NUMBER: _ClassVar[int] FIELDS_DATA_FIELD_NUMBER: _ClassVar[int] @@ -260,6 +317,7 @@ class SearchResultData(_message.Message): GROUP_BY_FIELD_VALUE_FIELD_NUMBER: _ClassVar[int] ALL_SEARCH_COUNT_FIELD_NUMBER: _ClassVar[int] DISTANCES_FIELD_NUMBER: _ClassVar[int] + SEARCH_ITERATOR_V2_RESULTS_FIELD_NUMBER: _ClassVar[int] num_queries: int top_k: int fields_data: _containers.RepeatedCompositeFieldContainer[FieldData] @@ -270,7 +328,8 @@ class SearchResultData(_message.Message): group_by_field_value: FieldData all_search_count: int distances: _containers.RepeatedScalarFieldContainer[float] - def __init__(self, num_queries: _Optional[int] = ..., top_k: _Optional[int] = ..., fields_data: _Optional[_Iterable[_Union[FieldData, _Mapping]]] = ..., scores: _Optional[_Iterable[float]] = ..., ids: _Optional[_Union[IDs, _Mapping]] = ..., topks: _Optional[_Iterable[int]] = ..., output_fields: _Optional[_Iterable[str]] = ..., group_by_field_value: _Optional[_Union[FieldData, _Mapping]] = ..., all_search_count: _Optional[int] = ..., distances: _Optional[_Iterable[float]] = ...) -> None: ... + search_iterator_v2_results: SearchIteratorV2Results + def __init__(self, num_queries: _Optional[int] = ..., top_k: _Optional[int] = ..., fields_data: _Optional[_Iterable[_Union[FieldData, _Mapping]]] = ..., scores: _Optional[_Iterable[float]] = ..., ids: _Optional[_Union[IDs, _Mapping]] = ..., topks: _Optional[_Iterable[int]] = ..., output_fields: _Optional[_Iterable[str]] = ..., group_by_field_value: _Optional[_Union[FieldData, _Mapping]] = ..., all_search_count: _Optional[int] = ..., distances: _Optional[_Iterable[float]] = ..., search_iterator_v2_results: _Optional[_Union[SearchIteratorV2Results, _Mapping]] = ...) -> None: ... class VectorClusteringInfo(_message.Message): __slots__ = ("field", "centroid") @@ -293,3 +352,39 @@ class ClusteringInfo(_message.Message): vector_clustering_infos: _containers.RepeatedCompositeFieldContainer[VectorClusteringInfo] scalar_clustering_infos: _containers.RepeatedCompositeFieldContainer[ScalarClusteringInfo] def __init__(self, vector_clustering_infos: _Optional[_Iterable[_Union[VectorClusteringInfo, _Mapping]]] = ..., scalar_clustering_infos: _Optional[_Iterable[_Union[ScalarClusteringInfo, _Mapping]]] = ...) -> None: ... + +class TemplateValue(_message.Message): + __slots__ = ("bool_val", "int64_val", "float_val", "string_val", "array_val") + BOOL_VAL_FIELD_NUMBER: _ClassVar[int] + INT64_VAL_FIELD_NUMBER: _ClassVar[int] + FLOAT_VAL_FIELD_NUMBER: _ClassVar[int] + STRING_VAL_FIELD_NUMBER: _ClassVar[int] + ARRAY_VAL_FIELD_NUMBER: _ClassVar[int] + bool_val: bool + int64_val: int + float_val: float + string_val: str + array_val: TemplateArrayValue + def __init__(self, bool_val: bool = ..., int64_val: _Optional[int] = ..., float_val: _Optional[float] = ..., string_val: _Optional[str] = ..., array_val: _Optional[_Union[TemplateArrayValue, _Mapping]] = ...) -> None: ... + +class TemplateArrayValue(_message.Message): + __slots__ = ("bool_data", "long_data", "double_data", "string_data", "array_data", "json_data") + BOOL_DATA_FIELD_NUMBER: _ClassVar[int] + LONG_DATA_FIELD_NUMBER: _ClassVar[int] + DOUBLE_DATA_FIELD_NUMBER: _ClassVar[int] + STRING_DATA_FIELD_NUMBER: _ClassVar[int] + ARRAY_DATA_FIELD_NUMBER: _ClassVar[int] + JSON_DATA_FIELD_NUMBER: _ClassVar[int] + bool_data: BoolArray + long_data: LongArray + double_data: DoubleArray + string_data: StringArray + array_data: TemplateArrayValueArray + json_data: JSONArray + def __init__(self, bool_data: _Optional[_Union[BoolArray, _Mapping]] = ..., long_data: _Optional[_Union[LongArray, _Mapping]] = ..., double_data: _Optional[_Union[DoubleArray, _Mapping]] = ..., string_data: _Optional[_Union[StringArray, _Mapping]] = ..., array_data: _Optional[_Union[TemplateArrayValueArray, _Mapping]] = ..., json_data: _Optional[_Union[JSONArray, _Mapping]] = ...) -> None: ... + +class TemplateArrayValueArray(_message.Message): + __slots__ = ("data",) + DATA_FIELD_NUMBER: _ClassVar[int] + data: _containers.RepeatedCompositeFieldContainer[TemplateArrayValue] + def __init__(self, data: _Optional[_Iterable[_Union[TemplateArrayValue, _Mapping]]] = ...) -> None: ... diff --git a/pymilvus/milvus_client/milvus_client.py b/pymilvus/milvus_client/milvus_client.py index 072044efd..4f593eef6 100644 --- a/pymilvus/milvus_client/milvus_client.py +++ b/pymilvus/milvus_client/milvus_client.py @@ -4,11 +4,13 @@ from typing import Dict, List, Optional, Union from uuid import uuid4 +from pymilvus.client.abstract import AnnSearchRequest, BaseRanker from pymilvus.client.constants import DEFAULT_CONSISTENCY_LEVEL from pymilvus.client.types import ( ExceptionsMessage, ExtraList, LoadState, + OmitZeroDict, construct_cost_extra, ) from pymilvus.exceptions import ( @@ -31,7 +33,7 @@ class MilvusClient: """The Milvus Client""" - # pylint: disable=logging-too-many-args, too-many-instance-attributes, import-outside-toplevel + # pylint: disable=logging-too-many-args, too-many-instance-attributes def __init__( self, @@ -221,11 +223,13 @@ def insert( ) except Exception as ex: raise ex from ex - return { - "insert_count": res.insert_count, - "ids": res.primary_keys, - "cost": res.cost, - } + return OmitZeroDict( + { + "insert_count": res.insert_count, + "ids": res.primary_keys, + "cost": res.cost, + } + ) def upsert( self, @@ -272,10 +276,83 @@ def upsert( except Exception as ex: raise ex from ex - return { - "upsert_count": res.upsert_count, - "cost": res.cost, - } + return OmitZeroDict( + { + "upsert_count": res.upsert_count, + "cost": res.cost, + } + ) + + def hybrid_search( + self, + collection_name: str, + reqs: List[AnnSearchRequest], + ranker: BaseRanker, + limit: int = 10, + output_fields: Optional[List[str]] = None, + timeout: Optional[float] = None, + partition_names: Optional[List[str]] = None, + **kwargs, + ) -> List[List[dict]]: + """Conducts multi vector similarity search with a rerank for rearrangement. + + Args: + collection_name(``string``): The name of collection. + reqs (``List[AnnSearchRequest]``): The vector search requests. + ranker (``BaseRanker``): The ranker for rearrange nummer of limit results. + limit (``int``): The max number of returned record, also known as `topk`. + + partition_names (``List[str]``, optional): The names of partitions to search on. + output_fields (``List[str]``, optional): + The name of fields to return in the search result. Can only get scalar fields. + round_decimal (``int``, optional): + The specified number of decimal places of returned distance. + Defaults to -1 means no round to returned distance. + timeout (``float``, optional): A duration of time in seconds to allow for the RPC. + If timeout is set to None, the client keeps waiting until the server + responds or an error occurs. + **kwargs (``dict``): Optional search params + + * *offset* (``int``, optinal) + offset for pagination. + + * *consistency_level* (``str/int``, optional) + Which consistency level to use when searching in the collection. + + Options of consistency level: Strong, Bounded, Eventually, Session, Customized. + + Note: this parameter overwrites the same one specified when creating collection, + if no consistency level was specified, search will use the + consistency level when you create the collection. + + Returns: + List[List[dict]]: A nested list of dicts containing the result data. + + Raises: + MilvusException: If anything goes wrong + """ + + conn = self._get_connection() + try: + res = conn.hybrid_search( + collection_name, + reqs, + ranker, + limit=limit, + partition_names=partition_names, + output_fields=output_fields, + timeout=timeout, + **kwargs, + ) + except Exception as ex: + logger.error("Failed to hybrid search collection: %s", collection_name) + raise ex from ex + + ret = [] + for hits in res: + ret.append([hit.to_dict() for hit in hits]) + + return ExtraList(ret, extra=construct_cost_extra(res.cost)) def search( self, @@ -324,6 +401,7 @@ def search( output_fields=output_fields, partition_names=partition_names, timeout=timeout, + expr_params=kwargs.pop("filter_params", {}), **kwargs, ) except Exception as ex: @@ -375,20 +453,17 @@ def query( ids = [ids] conn = self._get_connection() - try: - schema_dict = conn.describe_collection(collection_name, timeout=timeout, **kwargs) - except Exception as ex: - logger.error("Failed to describe collection: %s", collection_name) - raise ex from ex if ids: + try: + schema_dict = conn.describe_collection(collection_name, timeout=timeout, **kwargs) + except Exception as ex: + logger.error("Failed to describe collection: %s", collection_name) + raise ex from ex filter = self._pack_pks_expr(schema_dict, ids) if not output_fields: output_fields = ["*"] - vec_field_name = self._get_vector_field_name(schema_dict) - if vec_field_name: - output_fields.append(vec_field_name) try: res = conn.query( @@ -397,6 +472,7 @@ def query( output_fields=output_fields, partition_names=partition_names, timeout=timeout, + expr_params=kwargs.pop("filter_params", {}), **kwargs, ) except Exception as ex: @@ -445,9 +521,6 @@ def get( if not output_fields: output_fields = ["*"] - vec_field_name = self._get_vector_field_name(schema_dict) - if vec_field_name: - output_fields.append(vec_field_name) expr = self._pack_pks_expr(schema_dict, ids) try: @@ -544,6 +617,7 @@ def delete( partition_name, timeout=timeout, param_name="filter or ids", + expr_params=kwargs.pop("filter_params", {}), **kwargs, ) if res.primary_keys: @@ -555,7 +629,7 @@ def delete( if ret_pks: return ret_pks - return {"delete_count": res.delete_count, "cost": res.cost} + return OmitZeroDict({"delete_count": res.delete_count, "cost": res.cost}) def get_collection_stats(self, collection_name: str, timeout: Optional[float] = None) -> Dict: conn = self._get_connection() @@ -664,16 +738,6 @@ def _extract_primary_field(self, schema_dict: Dict) -> dict: return {} - def _get_vector_field_name(self, schema_dict: Dict): - fields = schema_dict.get("fields", []) - if not fields: - return {} - - for field_dict in fields: - if field_dict.get("type", None) == DataType.FLOAT_VECTOR: - return field_dict.get("name", "") - return "" - def _pack_pks_expr(self, schema_dict: Dict, pks: List) -> str: primary_field = self._extract_primary_field(schema_dict) pk_field_name = primary_field["name"] @@ -887,9 +951,7 @@ def drop_role(self, role_name: str, timeout: Optional[float] = None, **kwargs): conn = self._get_connection() conn.drop_role(role_name, timeout=timeout, **kwargs) - def describe_role( - self, role_name: str, timeout: Optional[float] = None, **kwargs - ) -> List[Dict]: + def describe_role(self, role_name: str, timeout: Optional[float] = None, **kwargs) -> Dict: conn = self._get_connection() db_name = kwargs.pop("db_name", "") try: @@ -970,3 +1032,216 @@ def list_aliases( def using_database(self, db_name: str, **kwargs): conn = self._get_connection() conn.reset_db_name(db_name) + + def create_database(self, db_name: str, **kwargs): + conn = self._get_connection() + conn.create_database(db_name, **kwargs) + + def drop_database(self, db_name: str, **kwargs): + conn = self._get_connection() + conn.drop_database(db_name, **kwargs) + + def list_databases(self, **kwargs) -> List[str]: + conn = self._get_connection() + return conn.list_database(**kwargs) + + def flush( + self, + collection_name: str, + timeout: Optional[float] = None, + **kwargs, + ): + """Seal all segments in the collection. Inserts after flushing will be written into + new segments. + + Args: + collection_name(``string``): The name of collection. + timeout (float): an optional duration of time in seconds to allow for the RPCs. + If timeout is not set, the client keeps waiting until the server + responds or an error occurs. + + Raises: + MilvusException: If anything goes wrong. + """ + conn = self._get_connection() + conn.flush([collection_name], timeout=timeout, **kwargs) + + def compact( + self, + collection_name: str, + is_clustering: Optional[bool] = False, + timeout: Optional[float] = None, + **kwargs, + ) -> int: + """Compact merge the small segments in a collection + + Args: + timeout (``float``, optional): An optional duration of time in seconds to allow + for the RPC. When timeout is set to None, client waits until server response + or error occur. + + is_clustering (``bool``, optional): Option to trigger clustering compaction. + + Raises: + MilvusException: If anything goes wrong. + + Returns: + int: An integer represents the server's compaction job. You can use this job ID + for subsequent state inquiries. + """ + conn = self._get_connection() + return conn.compact(collection_name, is_clustering=is_clustering, timeout=timeout, **kwargs) + + def get_compaction_state( + self, + job_id: int, + timeout: Optional[float] = None, + **kwargs, + ) -> str: + """Get the state of compaction job + + Args: + timeout (``float``, optional): An optional duration of time in seconds to allow + for the RPC. When timeout is set to None, client waits until server response + or error occur. + + Raises: + MilvusException: If anything goes wrong. + + Returns: + str: the state of this compaction job. Possible values are "UndefiedState", "Executing" + and "Completed". + """ + conn = self._get_connection() + result = conn.get_compaction_state(job_id, timeout=timeout, **kwargs) + return result.state_name + + def get_server_version( + self, + timeout: Optional[float] = None, + **kwargs, + ) -> str: + """Get the running server's version + + Args: + timeout (``float``, optional): A duration of time in seconds to allow for the RPC. + If timeout is set to None, the client keeps waiting until the server + responds or an error occurs. + + Returns: + str: A string represent the server's version. + + Raises: + MilvusException: If anything goes wrong + """ + conn = self._get_connection() + return conn.get_server_version(timeout=timeout, **kwargs) + + def create_privilege_group( + self, + group_name: str, + timeout: Optional[float] = None, + **kwargs, + ): + """Create a new privilege group. + + Args: + group_name (``str``): The name of the privilege group. + timeout (``float``, optional): An optional duration of time in seconds to allow + for the RPC. When timeout is set to None, client waits until server response + or error occur. + + Raises: + MilvusException: If anything goes wrong. + """ + conn = self._get_connection() + conn.create_privilege_group(group_name, timeout=timeout, **kwargs) + + def drop_privilege_group( + self, + group_name: str, + timeout: Optional[float] = None, + **kwargs, + ): + """Drop a privilege group. + + Args: + group_name (``str``): The name of the privilege group. + timeout (``float``, optional): An optional duration of time in seconds to allow + for the RPC. When timeout is set to None, client waits until server response + or error occur. + + Raises: + MilvusException: If anything goes wrong. + """ + conn = self._get_connection() + conn.drop_privilege_group(group_name, timeout=timeout, **kwargs) + + def list_privilege_groups( + self, + timeout: Optional[float] = None, + **kwargs, + ) -> Dict[str, List[str]]: + """List all privilege groups. + + Args: + timeout (``float``, optional): An optional duration of time in seconds to allow + for the RPC. When timeout is set to None, client waits until server response + or error occur. + + Returns: + Dict[str, List[str]]: A dictionary of privilege groups and their privileges. + + Raises: + MilvusException: If anything goes wrong. + """ + conn = self._get_connection() + pgs = conn.list_privilege_groups(timeout=timeout, **kwargs) + ret = {} + for pg in pgs: + ret[pg.group_name] = [p.name for p in pg.privileges] + return ret + + def add_privileges_to_group( + self, + group_name: str, + privileges: List[str], + timeout: Optional[float] = None, + **kwargs, + ): + """Add privileges to a privilege group. + + Args: + group_name (``str``): The name of the privilege group. + privileges (``List[str]``): A list of privileges to be added to the group. + timeout (``float``, optional): An optional duration of time in seconds to allow + for the RPC. When timeout is set to None, client waits until server response + or error occur. + + Raises: + MilvusException: If anything goes wrong. + """ + conn = self._get_connection() + conn.add_privileges_to_group(group_name, privileges, timeout=timeout, **kwargs) + + def remove_privileges_from_group( + self, + group_name: str, + privileges: List[str], + timeout: Optional[float] = None, + **kwargs, + ): + """Remove privileges from a privilege group. + + Args: + group_name (``str``): The name of the privilege group. + privileges (``List[str]``): A list of privileges to be removed from the group. + timeout (``float``, optional): An optional duration of time in seconds to allow + for the RPC. When timeout is set to None, client waits until server response + or error occur. + + Raises: + MilvusException: If anything goes wrong. + """ + conn = self._get_connection() + conn.remove_privileges_from_group(group_name, privileges, timeout=timeout, **kwargs) diff --git a/pymilvus/orm/collection.py b/pymilvus/orm/collection.py index 27fdfee71..a31d374cf 100644 --- a/pymilvus/orm/collection.py +++ b/pymilvus/orm/collection.py @@ -16,7 +16,7 @@ import pandas as pd -from pymilvus.client import entity_helper +from pymilvus.client import utils from pymilvus.client.abstract import BaseRanker, SearchResult from pymilvus.client.constants import DEFAULT_CONSISTENCY_LEVEL from pymilvus.client.types import ( @@ -34,7 +34,6 @@ IndexNotExistException, PartitionAlreadyExistException, SchemaNotReadyException, - UpsertAutoIDTrueException, ) from pymilvus.grpc_gen import schema_pb2 from pymilvus.settings import Config @@ -186,7 +185,7 @@ def construct_from_dataframe(cls, name: str, dataframe: pd.DataFrame, **kwargs): pk_index = i if pk_index == -1: raise SchemaNotReadyException(message=ExceptionsMessage.PrimaryKeyNotExist) - if "auto_id" in kwargs and not isinstance(kwargs.get("auto_id", None), bool): + if "auto_id" in kwargs and not isinstance(kwargs.get("auto_id"), bool): raise AutoIDException(message=ExceptionsMessage.AutoIDType) auto_id = kwargs.pop("auto_id", False) if auto_id: @@ -234,10 +233,10 @@ def schema(self) -> CollectionSchema: return self._schema @property - def aliases(self, **kwargs) -> list: + def aliases(self) -> list: """List[str]: all the aliases of the collection.""" conn = self._get_connection() - resp = conn.describe_collection(self._name, **kwargs) + resp = conn.describe_collection(self._name) return resp["aliases"] @property @@ -256,14 +255,14 @@ def is_empty(self) -> bool: return self.num_entities == 0 @property - def num_shards(self, **kwargs) -> int: + def num_shards(self) -> int: """int: number of shards used by the collection.""" if self._num_shards is None: - self._num_shards = self.describe(timeout=kwargs.get("timeout")).get("num_shards") + self._num_shards = self.describe().get("num_shards") return self._num_shards @property - def num_entities(self, **kwargs) -> int: + def num_entities(self) -> int: """int: The number of entities in the collection, not real time. Examples: @@ -283,7 +282,7 @@ def num_entities(self, **kwargs) -> int: 2 """ conn = self._get_connection() - stats = conn.get_collection_stats(collection_name=self._name, **kwargs) + stats = conn.get_collection_stats(collection_name=self._name) result = {stat.key: stat.value for stat in stats} result["row_count"] = int(result["row_count"]) return result["row_count"] @@ -347,7 +346,9 @@ def set_properties(self, properties: dict, timeout: Optional[float] = None, **kw Args: properties (``dict``): collection properties. - only support collection TTL with key `collection.ttl.seconds` + support collection TTL with key `collection.ttl.seconds` + support collection replica number with key `collection.replica.number` + support collection resource groups with key `collection.resource_groups`. timeout (float, optional): an optional duration of time in seconds to allow for the RPCs. If timeout is not set, the client keeps waiting until the server responds or an error occurs. @@ -373,7 +374,7 @@ def set_properties(self, properties: dict, timeout: Optional[float] = None, **kw def load( self, partition_names: Optional[list] = None, - replica_number: int = 1, + replica_number: int = 0, timeout: Optional[float] = None, **kwargs, ): @@ -390,10 +391,14 @@ def load( * *_async*(``bool``) Indicate if invoke asynchronously. - * *_refresh*(``bool``) + * *refresh*(``bool``) Whether to renew the segment list of this collection before loading - * *_resource_groups(``List[str]``) + * *resource_groups(``List[str]``) Specify resource groups which can be used during loading. + * *load_fields(``List[str]``) + Specify load fields list needed during this load + * *_skip_load_dynamic_field(``bool``) + Specify whether this load shall skip dynamic schmea field Raises: MilvusException: If anything goes wrong. @@ -454,7 +459,7 @@ def release(self, timeout: Optional[float] = None, **kwargs): def insert( self, - data: Union[List, pd.DataFrame, Dict, entity_helper.SparseMatrixInputType], + data: Union[List, pd.DataFrame, Dict, utils.SparseMatrixInputType], partition_name: Optional[str] = None, timeout: Optional[float] = None, **kwargs, @@ -509,7 +514,7 @@ def insert( ) check_insert_schema(self.schema, data) - entities = Prepare.prepare_insert_data(data, self.schema) + entities = Prepare.prepare_data(data, self.schema) return conn.batch_insert( self._name, entities, @@ -581,7 +586,7 @@ def delete( def upsert( self, - data: Union[List, pd.DataFrame, Dict, entity_helper.SparseMatrixInputType], + data: Union[List, pd.DataFrame, Dict, utils.SparseMatrixInputType], partition_name: Optional[str] = None, timeout: Optional[float] = None, **kwargs, @@ -620,9 +625,6 @@ def upsert( 10 """ - if self.schema.auto_id: - raise UpsertAutoIDTrueException(message=ExceptionsMessage.UpsertAutoIDTrue) - if not is_valid_insert_data(data): raise DataTypeNotSupportException( message="The type of data should be List, pd.DataFrame or Dict" @@ -641,7 +643,7 @@ def upsert( return MutationResult(res) check_upsert_schema(self.schema, data) - entities = Prepare.prepare_upsert_data(data, self.schema) + entities = Prepare.prepare_data(data, self.schema, False) res = conn.upsert( self._name, entities, @@ -655,7 +657,7 @@ def upsert( def search( self, - data: Union[List, entity_helper.SparseMatrixInputType], + data: Union[List, utils.SparseMatrixInputType], anns_field: str, param: Dict, limit: int, @@ -790,7 +792,7 @@ def search( if expr is not None and not isinstance(expr, str): raise DataTypeNotMatchException(message=ExceptionsMessage.ExprType % type(expr)) - empty_scipy_sparse = entity_helper.sparse_is_scipy_format(data) and (data.shape[0] == 0) + empty_scipy_sparse = utils.SciPyHelper.is_scipy_sparse(data) and (data.shape[0] == 0) if (isinstance(data, list) and len(data) == 0) or empty_scipy_sparse: resp = SearchResult(schema_pb2.SearchResultData()) return SearchFuture(None) if kwargs.get("_async", False) else resp @@ -957,7 +959,7 @@ def hybrid_search( def search_iterator( self, - data: Union[List, entity_helper.SparseMatrixInputType], + data: Union[List, utils.SparseMatrixInputType], anns_field: str, param: Dict, batch_size: Optional[int] = 1000, @@ -969,10 +971,6 @@ def search_iterator( round_decimal: int = -1, **kwargs, ): - if entity_helper.entity_is_sparse_matrix(data): - # search iterator is based on range_search, which is not yet supported for sparse. - raise DataTypeNotSupportException(message=ExceptionsMessage.DataTypeNotSupport) - if expr is not None and not isinstance(expr, str): raise DataTypeNotMatchException(message=ExceptionsMessage.ExprType % type(expr)) return SearchIterator( @@ -1111,7 +1109,7 @@ def query_iterator( ) @property - def partitions(self, **kwargs) -> List[Partition]: + def partitions(self) -> List[Partition]: """List[Partition]: List of Partition object. Raises: @@ -1128,7 +1126,7 @@ def partitions(self, **kwargs) -> List[Partition]: [{"name": "_default", "description": "", "num_entities": 0}] """ conn = self._get_connection() - partition_strs = conn.list_partitions(self._name, **kwargs) + partition_strs = conn.list_partitions(self._name) partitions = [] for partition in partition_strs: partitions.append(Partition(self, partition, construct_only=True)) @@ -1252,7 +1250,7 @@ def drop_partition(self, partition_name: str, timeout: Optional[float] = None, * return conn.drop_partition(self._name, partition_name, timeout=timeout, **kwargs) @property - def indexes(self, **kwargs) -> List[Index]: + def indexes(self) -> List[Index]: """List[Index]: list of indexes of this collection. Examples: @@ -1267,7 +1265,7 @@ def indexes(self, **kwargs) -> List[Index]: """ conn = self._get_connection() indexes = [] - tmp_index = conn.list_indexes(self._name, **kwargs) + tmp_index = conn.list_indexes(self._name) for index in tmp_index: if index is not None: info_dict = {kv.key: kv.value for kv in index.params} @@ -1320,6 +1318,10 @@ def index(self, **kwargs) -> Index: if tmp_index is not None: field_name = tmp_index.pop("field_name", None) index_name = tmp_index.pop("index_name", index_name) + tmp_index.pop("total_rows") + tmp_index.pop("indexed_rows") + tmp_index.pop("pending_index_rows") + tmp_index.pop("state") return Index(self, field_name, tmp_index, construct_only=True, index_name=index_name) raise IndexNotExistException(message=ExceptionsMessage.IndexNotExist) @@ -1442,9 +1444,10 @@ def has_index(self, timeout: Optional[float] = None, **kwargs) -> bool: conn = self._get_connection() copy_kwargs = copy.deepcopy(kwargs) index_name = copy_kwargs.pop("index_name", Config.IndexName) - if conn.describe_index(self._name, index_name, timeout=timeout, **copy_kwargs) is None: - return False - return True + + return ( + conn.describe_index(self._name, index_name, timeout=timeout, **copy_kwargs) is not None + ) def drop_index(self, timeout: Optional[float] = None, **kwargs): """Drop index and its corresponding index files. @@ -1480,16 +1483,17 @@ def drop_index(self, timeout: Optional[float] = None, **kwargs): conn = self._get_connection() tmp_index = conn.describe_index(self._name, index_name, timeout=timeout, **copy_kwargs) if tmp_index is not None: - index = Index( - collection=self, + conn.drop_index( + collection_name=self._name, field_name=tmp_index["field_name"], - index_params=tmp_index, - construct_only=True, index_name=index_name, + timeout=timeout, + **copy_kwargs, ) - index.drop(timeout=timeout, **kwargs) - def compact(self, timeout: Optional[float] = None, **kwargs): + def compact( + self, is_clustering: Optional[bool] = False, timeout: Optional[float] = None, **kwargs + ): """Compact merge the small segments in a collection Args: @@ -1497,13 +1501,24 @@ def compact(self, timeout: Optional[float] = None, **kwargs): for the RPC. When timeout is set to None, client waits until server response or error occur. + is_clustering (``bool``, optional): Option to trigger clustering compaction. + Raises: MilvusException: If anything goes wrong. """ conn = self._get_connection() - self.compaction_id = conn.compact(self._name, timeout=timeout, **kwargs) + if is_clustering: + self.clustering_compaction_id = conn.compact( + self._name, is_clustering=is_clustering, timeout=timeout, **kwargs + ) + else: + self.compaction_id = conn.compact( + self._name, is_clustering=is_clustering, timeout=timeout, **kwargs + ) - def get_compaction_state(self, timeout: Optional[float] = None, **kwargs) -> CompactionState: + def get_compaction_state( + self, timeout: Optional[float] = None, is_clustering: Optional[bool] = False, **kwargs + ) -> CompactionState: """Get the current compaction state Args: @@ -1511,15 +1526,22 @@ def get_compaction_state(self, timeout: Optional[float] = None, **kwargs) -> Com for the RPC. When timeout is set to None, client waits until server response or error occur. + is_clustering (``bool``, optional): Option to get clustering compaction state. + Raises: MilvusException: If anything goes wrong. """ conn = self._get_connection() + if is_clustering: + return conn.get_compaction_state( + self.clustering_compaction_id, timeout=timeout, **kwargs + ) return conn.get_compaction_state(self.compaction_id, timeout=timeout, **kwargs) def wait_for_compaction_completed( self, timeout: Optional[float] = None, + is_clustering: Optional[bool] = False, **kwargs, ) -> CompactionState: """Block until the current collection's compaction completed @@ -1529,10 +1551,16 @@ def wait_for_compaction_completed( for the RPC. When timeout is set to None, client waits until server response or error occur. + is_clustering (``bool``, optional): Option to get clustering compaction state. + Raises: MilvusException: If anything goes wrong. """ conn = self._get_connection() + if is_clustering: + return conn.wait_for_compaction_completed( + self.clustering_compaction_id, timeout=timeout, **kwargs + ) return conn.wait_for_compaction_completed(self.compaction_id, timeout=timeout, **kwargs) def get_compaction_plans(self, timeout: Optional[float] = None, **kwargs) -> CompactionPlans: diff --git a/pymilvus/orm/connections.py b/pymilvus/orm/connections.py index af71d8df9..7855c72e4 100644 --- a/pymilvus/orm/connections.py +++ b/pymilvus/orm/connections.py @@ -12,6 +12,7 @@ import copy import logging +import pathlib import threading import time from typing import Callable, Tuple, Union @@ -357,6 +358,34 @@ def connect( >>> connections.connect("test", host="localhost", port="19530") """ + if kwargs.get("uri") and parse.urlparse(kwargs["uri"]).scheme.lower() not in [ + "unix", + "http", + "https", + "tcp", + "grpc", + ]: + # start and connect milvuslite + if not kwargs["uri"].endswith(".db"): + raise ConnectionConfigException( + message=f"uri: {kwargs['uri']} is illegal, needs start with [unix, http, https, tcp] or a local file endswith [.db]" + ) + logger.info(f"Pass in the local path {kwargs['uri']}, and run it using milvus-lite") + parent_path = pathlib.Path(kwargs["uri"]).parent + if not parent_path.is_dir(): + raise ConnectionConfigException( + message=f"Open local milvus failed, dir: {parent_path} not exists" + ) + + from milvus_lite.server_manager import ( + server_manager_instance, + ) + + local_uri = server_manager_instance.start_and_get_uri(kwargs["uri"]) + if local_uri is None: + raise ConnectionConfigException(message="Open local milvus failed") + kwargs["uri"] = local_uri + # kwargs_copy is used for auto reconnect kwargs_copy = copy.deepcopy(kwargs) kwargs_copy["user"] = user diff --git a/pymilvus/orm/constants.py b/pymilvus/orm/constants.py index b4980204d..6862ab75f 100644 --- a/pymilvus/orm/constants.py +++ b/pymilvus/orm/constants.py @@ -10,7 +10,14 @@ # or implied. See the License for the specific language governing permissions and limitations under # the License. -COMMON_TYPE_PARAMS = ("dim", "max_length", "max_capacity") +COMMON_TYPE_PARAMS = ( + "dim", + "max_length", + "max_capacity", + "enable_match", + "enable_analyzer", + "analyzer_params", +) CALC_DIST_IDS = "ids" CALC_DIST_FLOAT_VEC = "float_vectors" @@ -18,6 +25,7 @@ CALC_DIST_METRIC = "metric" CALC_DIST_L2 = "L2" CALC_DIST_IP = "IP" +CALC_DIST_BM25 = "BM25" CALC_DIST_HAMMING = "HAMMING" CALC_DIST_TANIMOTO = "TANIMOTO" CALC_DIST_JACCARD = "JACCARD" @@ -39,6 +47,8 @@ IS_PRIMARY = "is_primary" REDUCE_STOP_FOR_BEST = "reduce_stop_for_best" ITERATOR_FIELD = "iterator" +ITERATOR_SESSION_TS_FIELD = "iterator_session_ts" +PRINT_ITERATOR_CURSOR = "print_iterator_cursor" DEFAULT_MAX_L2_DISTANCE = 99999999.0 DEFAULT_MIN_IP_DISTANCE = -99999999.0 DEFAULT_MAX_HAMMING_DISTANCE = 99999999.0 @@ -51,3 +61,7 @@ DEFAULT_SEARCH_EXTENSION_RATE: int = 10 UNLIMITED: int = -1 MAX_TRY_TIME: int = 20 +GUARANTEE_TIMESTAMP = "guarantee_timestamp" +ITERATOR_SESSION_CP_FILE = "iterator_cp_file" +BM25_k1 = "bm25_k1" +BM25_b = "bm25_b" diff --git a/pymilvus/orm/db.py b/pymilvus/orm/db.py index 141b6fa60..5a9ea5002 100644 --- a/pymilvus/orm/db.py +++ b/pymilvus/orm/db.py @@ -17,14 +17,17 @@ def using_database(db_name: str, using: str = "default"): _get_connection(using).reset_db_name(db_name) -def create_database(db_name: str, using: str = "default", timeout: Optional[float] = None): +def create_database( + db_name: str, using: str = "default", timeout: Optional[float] = None, **kwargs +): """Create a database using provided database name - - :param db_name: Database name - :type db_name: str - + Args: + db_name (``str``): Database name + properties (``dict``): database properties. + support database replica number with key `database.replica.number` + support database resource groups with key `database.resource_groups` """ - _get_connection(using).create_database(db_name, timeout=timeout) + _get_connection(using).create_database(db_name, timeout=timeout, **kwargs) def drop_database(db_name: str, using: str = "default", timeout: Optional[float] = None): @@ -44,3 +47,32 @@ def list_database(using: str = "default", timeout: Optional[float] = None) -> li List of database names, return when operation is successful """ return _get_connection(using).list_database(timeout=timeout) + + +def set_properties( + db_name: str, + properties: dict, + using: str = "default", + timeout: Optional[float] = None, +): + """Set properties for a database using provided database name + Args: + db_name (``str``): Database name + properties (``dict``): database properties. + support database replica number with key `database.replica.number` + support database resource groups with key `database.resource_groups` + """ + _get_connection(using).alter_database(db_name, properties=properties, timeout=timeout) + + +def describe_database(db_name: str, using: str = "default", timeout: Optional[float] = None): + """Describe a database using provided database name + + :param db_name: Database name + :type db_name: str + + :return dict: + Database information, return when operation is successful + + """ + return _get_connection(using).describe_database(db_name, timeout=timeout) diff --git a/pymilvus/orm/index.py b/pymilvus/orm/index.py index df151ad67..a158d673a 100644 --- a/pymilvus/orm/index.py +++ b/pymilvus/orm/index.py @@ -73,10 +73,8 @@ def __init__( self._collection = collection self._field_name = field_name self._index_params = index_params - index_name = kwargs.get("index_name", Config.IndexName) - self._index_name = index_name - self._kwargs = kwargs - if self._kwargs.pop("construct_only", False): + self._index_name = kwargs.get("index_name", Config.IndexName) + if kwargs.get("construct_only", False): return conn = self._get_connection() @@ -130,17 +128,11 @@ def drop(self, timeout: Optional[float] = None, **kwargs): timeout(float, optional): An optional duration of time in seconds to allow for the RPC. When timeout is set to None, client waits until server response or error occur - kwargs: - * *index_name* (``str``) -- - The name of index. If no index is specified, the default index name is used. """ - copy_kwargs = copy.deepcopy(kwargs) - index_name = copy_kwargs.pop("index_name", Config.IndexName) conn = self._get_connection() conn.drop_index( collection_name=self._collection.name, field_name=self.field_name, - index_name=index_name, + index_name=self.index_name, timeout=timeout, - **copy_kwargs, ) diff --git a/pymilvus/orm/iterator.py b/pymilvus/orm/iterator.py index ea25bf5f5..15118523f 100644 --- a/pymilvus/orm/iterator.py +++ b/pymilvus/orm/iterator.py @@ -1,8 +1,10 @@ +import datetime import logging from copy import deepcopy -from typing import Any, Dict, List, Optional, TypeVar, Union +from pathlib import Path +from typing import Any, Callable, Dict, List, Optional, TypeVar, Union -from pymilvus.client import entity_helper +from pymilvus.client import entity_helper, utils from pymilvus.client.abstract import Hits, LoopBase from pymilvus.exceptions import ( MilvusException, @@ -12,6 +14,7 @@ from .connections import Connections from .constants import ( BATCH_SIZE, + CALC_DIST_BM25, CALC_DIST_COSINE, CALC_DIST_HAMMING, CALC_DIST_IP, @@ -21,9 +24,12 @@ DEFAULT_SEARCH_EXTENSION_RATE, EF, FIELDS, + GUARANTEE_TIMESTAMP, INT64_MAX, IS_PRIMARY, ITERATOR_FIELD, + ITERATOR_SESSION_CP_FILE, + ITERATOR_SESSION_TS_FIELD, MAX_BATCH_SIZE, MAX_FILTERED_IDS_COUNT_ITERATION, MAX_TRY_TIME, @@ -31,6 +37,7 @@ MILVUS_LIMIT, OFFSET, PARAMS, + PRINT_ITERATOR_CURSOR, RADIUS, RANGE_FILTER, REDUCE_STOP_FOR_BEST, @@ -38,12 +45,32 @@ ) from .schema import CollectionSchema from .types import DataType +from .utility import mkts_from_datetime LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.ERROR) +LOGGER.setLevel(logging.INFO) QueryIterator = TypeVar("QueryIterator") SearchIterator = TypeVar("SearchIterator") +log = logging.getLogger(__name__) + + +def fall_back_to_latest_session_ts(): + d = datetime.datetime.now() + return mkts_from_datetime(d, milliseconds=1000.0) + + +def assert_info(condition: bool, message: str): + if not condition: + raise MilvusException(message) + + +def io_operation(io_func: Callable[[Any], None], message: str): + try: + io_func() + except OSError as ose: + raise MilvusException(message=message) from ose + def extend_batch_size(batch_size: int, next_param: dict, to_extend_batch_size: bool) -> int: extend_rate = 1 @@ -51,12 +78,15 @@ def extend_batch_size(batch_size: int, next_param: dict, to_extend_batch_size: b extend_rate = DEFAULT_SEARCH_EXTENSION_RATE if EF in next_param[PARAMS]: real_batch = min(MAX_BATCH_SIZE, batch_size * extend_rate, next_param[PARAMS][EF]) - if next_param[PARAMS][EF] > real_batch: - next_param[PARAMS][EF] = real_batch + next_param[PARAMS][EF] = min(next_param[PARAMS][EF], real_batch) return real_batch return min(MAX_BATCH_SIZE, batch_size * extend_rate) +def check_set_flag(obj: Any, flag_name: str, kwargs: Dict[str, Any], key: str): + setattr(obj, flag_name, kwargs.get(key, False)) + + class QueryIterator: def __init__( self, @@ -77,22 +107,89 @@ def __init__( self._partition_names = partition_names self._schema = schema self._timeout = timeout + self._session_ts = 0 self._kwargs = kwargs - self.__set_up_iteration_states() + self._kwargs[ITERATOR_FIELD] = "True" self.__check_set_batch_size(batch_size) self._limit = limit self.__check_set_reduce_stop_for_best() + check_set_flag(self, "_print_iterator_cursor", self._kwargs, PRINT_ITERATOR_CURSOR) self._returned_count = 0 self.__setup__pk_prop() self.__set_up_expr(expr) - self.__seek() + self._next_id = None self._cache_id_in_use = NO_CACHE_ID + self._cp_file_handler = None + self.__set_up_ts_cp() + self.__seek_to_offset() - def __set_up_iteration_states(self): - self._kwargs[ITERATOR_FIELD] = "True" + def __seek_to_offset(self): + # read pk cursor from cp file, no need to seek offset + if self._next_id is not None: + return + offset = self._kwargs.get(OFFSET, 0) + if offset > 0: + seek_params = self._kwargs.copy() + seek_params[OFFSET] = 0 + seek_params[MILVUS_LIMIT] = offset + res = self._conn.query( + collection_name=self._collection_name, + expr=self._expr, + output_field=self._output_fields, + partition_name=self._partition_names, + timeout=self._timeout, + **seek_params, + ) + result_index = min(len(res), offset) + self.__update_cursor(res[:result_index]) + self._kwargs[OFFSET] = 0 + + def __init_cp_file_handler(self) -> bool: + mode = "w" + if self._cp_file_path.exists(): + mode = "r+" + try: + self._cp_file_handler = self._cp_file_path.open(mode) + except OSError as ose: + raise MilvusException( + message=f"Failed to open cp file for iterator:{self._cp_file_path_str}" + ) from ose + return mode == "r+" + + def __save_mvcc_ts(self): + assert_info( + self._cp_file_handler is not None, + "Must init cp file handler before saving session_ts", + ) + self._cp_file_handler.writelines(str(self._session_ts) + "\n") + + def __save_pk_cursor(self): + if self._need_save_cp and self._next_id is not None: + if not self._cp_file_path.exists(): + self._cp_file_handler.close() + self._cp_file_handler = self._cp_file_path.open("w") + self._buffer_cursor_lines_number = 0 + self.__save_mvcc_ts() + log.warning( + "iterator cp file is not existed any more, recreate for iteration, " + "do not remove this file manually!" + ) + if self._buffer_cursor_lines_number >= 100: + self._cp_file_handler.seek(0) + self._cp_file_handler.truncate() + log.info( + "cursor lines in cp file has exceeded 100 lines, truncate the file and rewrite" + ) + self._buffer_cursor_lines_number = 0 + self._cp_file_handler.writelines(str(self._next_id) + "\n") + self._cp_file_handler.flush() + self._buffer_cursor_lines_number += 1 def __check_set_reduce_stop_for_best(self): - self._kwargs[REDUCE_STOP_FOR_BEST] = "True" + if self._kwargs.get(REDUCE_STOP_FOR_BEST, True): + self._kwargs[REDUCE_STOP_FOR_BEST] = "True" + else: + self._kwargs[REDUCE_STOP_FOR_BEST] = "False" def __check_set_batch_size(self, batch_size: int): if batch_size < 0: @@ -102,7 +199,7 @@ def __check_set_batch_size(self, batch_size: int): self._kwargs[BATCH_SIZE] = batch_size self._kwargs[MILVUS_LIMIT] = batch_size - # rely on pk prop, so this method should be called after __set_up_expr + # rely on pk prop, so this method should be called after __setup__pk_prop def __set_up_expr(self, expr: str): if expr is not None: self._expr = expr @@ -111,27 +208,67 @@ def __set_up_expr(self, expr: str): else: self._expr = self._pk_field_name + " < " + str(INT64_MAX) - def __seek(self): - self._cache_id_in_use = NO_CACHE_ID - if self._kwargs.get(OFFSET, 0) == 0: - self._next_id = None - return - - first_cursor_kwargs = self._kwargs.copy() - first_cursor_kwargs[OFFSET] = 0 - # offset may be too large, needed to seek in multiple times - first_cursor_kwargs[MILVUS_LIMIT] = self._kwargs[OFFSET] - + def __setup_ts_by_request(self): + init_ts_kwargs = self._kwargs.copy() + init_ts_kwargs[OFFSET] = 0 + init_ts_kwargs[MILVUS_LIMIT] = 1 + # just to set up mvccTs for iterator, no need correct limit res = self._conn.query( collection_name=self._collection_name, expr=self._expr, output_field=self._output_fields, partition_name=self._partition_names, timeout=self._timeout, - **first_cursor_kwargs, + **init_ts_kwargs, ) - self.__update_cursor(res) - self._kwargs[OFFSET] = 0 + if res is None: + raise MilvusException( + message="failed to connect to milvus for setting up " + "mvccTs, check milvus servers' status" + ) + if res.extra is not None: + self._session_ts = res.extra.get(ITERATOR_SESSION_TS_FIELD, 0) + if self._session_ts <= 0: + log.warning("failed to get mvccTs from milvus server, use client-side ts instead") + self._session_ts = fall_back_to_latest_session_ts() + self._kwargs[GUARANTEE_TIMESTAMP] = self._session_ts + + def __set_up_ts_cp(self): + self._buffer_cursor_lines_number = 0 + self._cp_file_path_str = self._kwargs.get(ITERATOR_SESSION_CP_FILE, None) + self._cp_file_path = None + # no input cp_file, set up mvccTs by query request + if self._cp_file_path_str is None: + self._need_save_cp = False + self.__setup_ts_by_request() + else: + self._need_save_cp = True + self._cp_file_path = Path(self._cp_file_path_str) + if not self.__init_cp_file_handler(): + # input cp file is empty, set up mvccTs by query request + self.__setup_ts_by_request() + io_operation(self.__save_mvcc_ts, "Failed to save mvcc ts") + else: + try: + # input cp file is not emtpy, init mvccTs by reading cp file + lines = self._cp_file_handler.readlines() + line_count = len(lines) + if line_count < 2: + raise ParamError( + message=f"input cp file:{self._cp_file_path_str} should contain " + f"at least two lines, but only:{line_count} lines" + ) + self._session_ts = int(lines[0]) + self._kwargs[GUARANTEE_TIMESTAMP] = self._session_ts + if line_count > 1: + self._buffer_cursor_lines_number = line_count - 1 + self._next_id = lines[self._buffer_cursor_lines_number].strip() + except OSError as ose: + raise MilvusException( + message=f"Failed to read cp info from file:{self._cp_file_path_str}" + ) from ose + except ValueError as e: + raise ParamError(message=f"cannot parse input cp session_ts:{lines[0]}") from e def __maybe_cache(self, result: List): if len(result) < 2 * self._kwargs[BATCH_SIZE]: @@ -154,6 +291,8 @@ def next(self): else: iterator_cache.release_cache(self._cache_id_in_use) current_expr = self.__setup_next_expr() + if self._print_iterator_cursor: + log.info(f"query_iterator_next_expr:{current_expr}") res = self._conn.query( collection_name=self._collection_name, expr=current_expr, @@ -167,6 +306,7 @@ def next(self): ret = self.__check_reached_limit(ret) self.__update_cursor(ret) + io_operation(self.__save_pk_cursor, "failed to save pk cursor") self._returned_count += len(ret) return ret @@ -192,7 +332,7 @@ def __setup__pk_prop(self): if self._pk_field_name is None or self._pk_field_name == "": raise MilvusException(message="schema must contain pk field, broke") - def __setup_next_expr(self) -> None: + def __setup_next_expr(self) -> str: current_expr = self._expr if self._next_id is None: return current_expr @@ -203,7 +343,7 @@ def __setup_next_expr(self) -> None: filtered_pk_str = f"{self._pk_field_name} > {self._next_id}" if current_expr is None or len(current_expr) == 0: return filtered_pk_str - return current_expr + " and " + filtered_pk_str + return "(" + current_expr + ")" + " and " + filtered_pk_str def __update_cursor(self, res: List) -> None: if len(res) == 0: @@ -213,12 +353,22 @@ def __update_cursor(self, res: List) -> None: def close(self) -> None: # release cache in use iterator_cache.release_cache(self._cache_id_in_use) + if self._cp_file_handler is not None: + + def inner_close(): + self._cp_file_handler.close() + self._cp_file_path.unlink() + log.info(f"removed cp file:{self._cp_file_path_str} for query iterator") + + io_operation( + inner_close, f"failed to clear cp file:{self._cp_file_path_str} for query iterator" + ) def metrics_positive_related(metrics: str) -> bool: if metrics in [CALC_DIST_L2, CALC_DIST_JACCARD, CALC_DIST_HAMMING, CALC_DIST_TANIMOTO]: return True - if metrics in [CALC_DIST_IP, CALC_DIST_COSINE]: + if metrics in [CALC_DIST_IP, CALC_DIST_COSINE, CALC_DIST_BM25]: return False raise MilvusException(message=f"unsupported metrics type for search iteration: {metrics}") @@ -227,12 +377,16 @@ class SearchPage(LoopBase): """Since we only support nq=1 in search iteration, so search iteration response should be different from raw response of search operation""" - def __init__(self, res: Hits): + def __init__(self, res: Hits, session_ts: Optional[int] = 0): super().__init__() + self._session_ts = session_ts self._results = [] if res is not None: self._results.append(res) + def get_session_ts(self): + return self._session_ts + def get_res(self): return self._results @@ -283,7 +437,7 @@ def __init__( self, connection: Connections, collection_name: str, - data: Union[List, entity_helper.SparseMatrixInputType], + data: Union[List, utils.SparseMatrixInputType], ann_field: str, param: Dict, batch_size: Optional[int] = 1000, @@ -318,7 +472,7 @@ def __init__( self.__check_set_params(param) self.__check_for_special_index_param() self._kwargs = kwargs - self.__set_up_iteration_states() + self._kwargs[ITERATOR_FIELD] = "True" self._filtered_ids = [] self._filtered_distance = None self._schema = schema @@ -328,10 +482,16 @@ def __init__( self.__check_offset() self.__check_rm_range_search_parameters() self.__setup__pk_prop() + check_set_flag(self, "_print_iterator_cursor", self._kwargs, PRINT_ITERATOR_CURSOR) self.__init_search_iterator() def __init_search_iterator(self): init_page = self.__execute_next_search(self._param, self._expr, False) + self._session_ts = init_page.get_session_ts() + if self._session_ts <= 0: + log.warning("failed to set up mvccTs from milvus server, use client-side ts instead") + self._session_ts = fall_back_to_latest_session_ts() + self._kwargs[GUARANTEE_TIMESTAMP] = self._session_ts if len(init_page) == 0: message = ( "Cannot init search iterator because init page contains no matched rows, " @@ -346,9 +506,6 @@ def __init_search_iterator(self): self.__update_filtered_ids(init_page) self._init_success = True - def __set_up_iteration_states(self): - self._kwargs[ITERATOR_FIELD] = "True" - def __update_width(self, page: SearchPage): first_hit, last_hit = page[0], page[-1] if metrics_positive_related(self._param[METRIC_TYPE]): @@ -455,9 +612,7 @@ def __update_filtered_ids(self, res: SearchPage): def __is_cache_enough(self, count: int) -> bool: cached_page = iterator_cache.fetch_cache(self._cache_id) - if cached_page is None or len(cached_page) < count: - return False - return True + return cached_page is not None and len(cached_page) >= count def __extract_page_from_cache(self, count: int) -> SearchPage: cached_page = iterator_cache.fetch_cache(self._cache_id) @@ -538,6 +693,8 @@ def __try_search_fill(self) -> SearchPage: def __execute_next_search( self, next_params: dict, next_expr: str, to_extend_batch: bool ) -> SearchPage: + if self._print_iterator_cursor: + log.info(f"search_iterator_next_expr:{next_expr}, next_params:{next_params}") res = self._conn.search( self._iterator_params["collection_name"], self._iterator_params["data"], @@ -552,7 +709,7 @@ def __execute_next_search( schema=self._schema, **self._kwargs, ) - return SearchPage(res[0]) + return SearchPage(res[0], res.get_session_ts()) # at present, the range_filter parameter means 'larger/less and equal', # so there would be vectors with same distances returned multiple times in different pages @@ -572,7 +729,7 @@ def __filtered_duplicated_result_expr(self, expr: str): if len(filtered_ids_str) > 0: if expr is not None and len(expr) > 0: filter_expr = f" and {self._pk_field_name} not in [{filtered_ids_str}]" - return expr + filter_expr + return "(" + expr + ")" + filter_expr return f"{self._pk_field_name} not in [{filtered_ids_str}]" return expr diff --git a/pymilvus/orm/partition.py b/pymilvus/orm/partition.py index cdcc6d6e2..e860f4cad 100644 --- a/pymilvus/orm/partition.py +++ b/pymilvus/orm/partition.py @@ -15,7 +15,7 @@ import pandas as pd import ujson -from pymilvus.client import entity_helper +from pymilvus.client import utils from pymilvus.client.abstract import BaseRanker, SearchResult from pymilvus.client.types import Replica from pymilvus.exceptions import MilvusException @@ -109,7 +109,7 @@ def is_empty(self) -> bool: return self.num_entities == 0 @property - def num_entities(self, **kwargs) -> int: + def num_entities(self) -> int: """int: number of entities in the partition Examples: @@ -132,7 +132,7 @@ def num_entities(self, **kwargs) -> int: """ conn = self._get_connection() stats = conn.get_partition_stats( - collection_name=self._collection.name, partition_name=self.name, **kwargs + collection_name=self._collection.name, partition_name=self.name ) result = {stat.key: stat.value for stat in stats} result["row_count"] = int(result["row_count"]) @@ -172,7 +172,7 @@ def drop(self, timeout: Optional[float] = None, **kwargs): conn = self._get_connection() return conn.drop_partition(self._collection.name, self.name, timeout=timeout, **kwargs) - def load(self, replica_number: int = 1, timeout: Optional[float] = None, **kwargs): + def load(self, replica_number: int = 0, timeout: Optional[float] = None, **kwargs): """Load the partition data into memory. Args: @@ -239,7 +239,7 @@ def release(self, timeout: Optional[float] = None, **kwargs): def insert( self, - data: Union[List, pd.DataFrame, entity_helper.SparseMatrixInputType], + data: Union[List, pd.DataFrame, utils.SparseMatrixInputType], timeout: Optional[float] = None, **kwargs, ) -> MutationResult: @@ -317,7 +317,7 @@ def delete(self, expr: str, timeout: Optional[float] = None, **kwargs): def upsert( self, - data: Union[List, pd.DataFrame, entity_helper.SparseMatrixInputType], + data: Union[List, pd.DataFrame, utils.SparseMatrixInputType], timeout: Optional[float] = None, **kwargs, ) -> MutationResult: @@ -357,7 +357,7 @@ def upsert( def search( self, - data: Union[List, entity_helper.SparseMatrixInputType], + data: Union[List, utils.SparseMatrixInputType], anns_field: str, param: Dict, limit: int, diff --git a/pymilvus/orm/prepare.py b/pymilvus/orm/prepare.py index 799fcad04..44080b68d 100644 --- a/pymilvus/orm/prepare.py +++ b/pymilvus/orm/prepare.py @@ -10,20 +10,17 @@ # or implied. See the License for the specific language governing permissions and limitations under # the License. -import copy from typing import List, Tuple, Union import numpy as np import pandas as pd -from pymilvus.client import entity_helper from pymilvus.client.types import DataType from pymilvus.exceptions import ( DataNotMatchException, DataTypeNotSupportException, ExceptionsMessage, ParamError, - UpsertAutoIDTrueException, ) from .schema import CollectionSchema @@ -31,10 +28,11 @@ class Prepare: @classmethod - def prepare_insert_data( + def prepare_data( cls, data: Union[List, Tuple, pd.DataFrame], schema: CollectionSchema, + is_insert: bool = True, ) -> List: if not isinstance(data, (list, tuple, pd.DataFrame)): raise DataTypeNotSupportException(message=ExceptionsMessage.DataTypeNotSupport) @@ -46,12 +44,15 @@ def prepare_insert_data( if ( schema.auto_id and schema.primary_field.name in data + and is_insert and not data[schema.primary_field.name].isnull().all() ): raise DataNotMatchException(message=ExceptionsMessage.AutoIDWithData) # TODO(SPARSE): support pd.SparseDtype for sparse float vector field for field in fields: - if field.is_primary and field.auto_id: + if field.is_primary and field.auto_id and is_insert: + continue + if field.is_function_output: continue values = [] if field.name in list(data.columns): @@ -59,12 +60,13 @@ def prepare_insert_data( entities.append({"name": field.name, "type": field.dtype, "values": values}) return entities - tmp_fields = copy.deepcopy(fields) - for i, field in enumerate(tmp_fields): - # TODO Goose: Checking auto_id and is_primary only, maybe different than - # schema.is_primary, schema.auto_id, need to check why and how schema is built. - if field.is_primary and field.auto_id: - tmp_fields.pop(i) + tmp_fields = list( + filter( + lambda field: not (field.is_primary and field.auto_id and is_insert) + and not field.is_function_output, + fields, + ) + ) vec_dtype_checker = { DataType.FLOAT_VECTOR: lambda ndarr: ndarr.dtype in ("float32", "float64"), @@ -152,14 +154,3 @@ def prepare_insert_data( entities.append({"name": field.name, "type": field.dtype, "values": d}) return entities - - @classmethod - def prepare_upsert_data( - cls, - data: Union[List, Tuple, pd.DataFrame, entity_helper.SparseMatrixInputType], - schema: CollectionSchema, - ) -> List: - if schema.auto_id: - raise UpsertAutoIDTrueException(message=ExceptionsMessage.UpsertAutoIDTrue) - - return cls.prepare_insert_data(data, schema) diff --git a/pymilvus/orm/schema.py b/pymilvus/orm/schema.py index 3d90c0d82..b2faa4d31 100644 --- a/pymilvus/orm/schema.py +++ b/pymilvus/orm/schema.py @@ -11,11 +11,13 @@ # the License. import copy +import json from typing import Any, Dict, List, Optional, Union import pandas as pd -from pandas.api.types import is_list_like +from pandas.api.types import is_list_like, is_scalar +from pymilvus.client.types import FunctionType from pymilvus.exceptions import ( AutoIDException, CannotInferSchemaException, @@ -25,15 +27,18 @@ ExceptionsMessage, FieldsTypeException, FieldTypeException, + FunctionsTypeException, + ParamError, PartitionKeyException, PrimaryKeyException, SchemaNotReadyException, - UpsertAutoIDTrueException, ) +from pymilvus.grpc_gen import schema_pb2 as schema_types from .constants import COMMON_TYPE_PARAMS from .types import ( DataType, + infer_dtype_by_scalar_data, infer_dtype_bydata, map_numpy_dtype_to_datatype, ) @@ -63,12 +68,8 @@ def validate_partition_key( ) -def validate_clustering_key( - clustering_key_field_name: Any, clustering_key_field: Any, primary_field_name: Any -): +def validate_clustering_key(clustering_key_field_name: Any, clustering_key_field: Any): if clustering_key_field is not None: - if clustering_key_field.name == primary_field_name: - raise ClusteringKeyException(message=ExceptionsMessage.ClusteringKeyNotPrimary) if clustering_key_field.dtype not in [ DataType.INT8, DataType.INT16, @@ -82,12 +83,14 @@ def validate_clustering_key( raise ClusteringKeyException(message=ExceptionsMessage.ClusteringKeyType) elif clustering_key_field_name is not None: raise ClusteringKeyException( - message=ExceptionsMessage.PartitionKeyFieldNotExist % clustering_key_field_name + message=ExceptionsMessage.ClusteringKeyFieldNotExist % clustering_key_field_name ) class CollectionSchema: - def __init__(self, fields: List, description: str = "", **kwargs): + def __init__( + self, fields: List, description: str = "", functions: Optional[List] = None, **kwargs + ): self._kwargs = copy.deepcopy(kwargs) self._fields = [] self._description = description @@ -97,10 +100,25 @@ def __init__(self, fields: List, description: str = "", **kwargs): self._partition_key_field = None self._clustering_key_field = None + if functions is None: + functions = [] + + if not isinstance(functions, list): + raise FunctionsTypeException(message=ExceptionsMessage.FunctionsType) + for function in functions: + if not isinstance(function, Function): + raise SchemaNotReadyException(message=ExceptionsMessage.FunctionIncorrectType) + self._functions = [copy.deepcopy(function) for function in functions] + if not isinstance(fields, list): raise FieldsTypeException(message=ExceptionsMessage.FieldsType) + for field in fields: + if not isinstance(field, FieldSchema): + raise FieldTypeException(message=ExceptionsMessage.FieldType) self._fields = [copy.deepcopy(field) for field in fields] + self._mark_output_fields() + self._check_kwargs() if kwargs.get("check_fields", True): self._check_fields() @@ -116,10 +134,6 @@ def _check_kwargs(self): if clustering_key_field_name is not None and not isinstance(clustering_key_field_name, str): raise ClusteringKeyException(message=ExceptionsMessage.ClusteringKeyFieldType) - for field in self._fields: - if not isinstance(field, FieldSchema): - raise FieldTypeException(message=ExceptionsMessage.FieldType) - if "auto_id" in self._kwargs and not isinstance(self._kwargs["auto_id"], bool): raise AutoIDException(0, ExceptionsMessage.AutoIDType) @@ -171,17 +185,54 @@ def _check_fields(self): validate_partition_key( partition_key_field_name, self._partition_key_field, self._primary_field.name ) - validate_clustering_key( - clustering_key_field_name, self._clustering_key_field, self._primary_field.name - ) + validate_clustering_key(clustering_key_field_name, self._clustering_key_field) auto_id = self._kwargs.get("auto_id", False) if auto_id: self._primary_field.auto_id = auto_id + def _check_functions(self): + for function in self._functions: + for output_field_name in function.output_field_names: + output_field = next( + (field for field in self._fields if field.name == output_field_name), None + ) + if output_field is None: + raise ParamError( + message=f"{ExceptionsMessage.FunctionMissingOutputField}: {output_field_name}" + ) + + if output_field is not None and ( + output_field.is_primary + or output_field.is_partition_key + or output_field.is_clustering_key + ): + raise ParamError(message=ExceptionsMessage.FunctionInvalidOutputField) + + for input_field_name in function.input_field_names: + input_field = next( + (field for field in self._fields if field.name == input_field_name), None + ) + if input_field is None: + raise ParamError( + message=f"{ExceptionsMessage.FunctionMissingInputField}: {input_field_name}" + ) + + function.verify(self) + + def _mark_output_fields(self): + for function in self._functions: + for output_field_name in function.output_field_names: + output_field = next( + (field for field in self._fields if field.name == output_field_name), None + ) + if output_field is not None: + output_field.is_function_output = True + def _check(self): self._check_kwargs() self._check_fields() + self._check_functions() def __repr__(self) -> str: return str(self.to_dict()) @@ -196,9 +247,15 @@ def __eq__(self, other: object): @classmethod def construct_from_dict(cls, raw: Dict): fields = [FieldSchema.construct_from_dict(field_raw) for field_raw in raw["fields"]] + if "functions" in raw: + functions = [ + Function.construct_from_dict(function_raw) for function_raw in raw["functions"] + ] + else: + functions = [] enable_dynamic_field = raw.get("enable_dynamic_field", False) return CollectionSchema( - fields, raw.get("description", ""), enable_dynamic_field=enable_dynamic_field + fields, raw.get("description", ""), functions, enable_dynamic_field=enable_dynamic_field ) @property @@ -226,6 +283,16 @@ def fields(self): """ return self._fields + @property + def functions(self): + """ + Returns the functions of the CollectionSchema. + + :return list: + List of Function, return when operation is successful. + """ + return self._functions + @property def description(self): """ @@ -277,12 +344,15 @@ def enable_dynamic_field(self, value: bool): self._enable_dynamic_field = bool(value) def to_dict(self): - return { + res = { "auto_id": self.auto_id, "description": self._description, "fields": [s.to_dict() for s in self._fields], "enable_dynamic_field": self.enable_dynamic_field, } + if self._functions is not None and len(self._functions) > 0: + res["functions"] = [s.to_dict() for s in self._functions] + return res def verify(self): # final check, detect obvious problems @@ -291,6 +361,14 @@ def verify(self): def add_field(self, field_name: str, datatype: DataType, **kwargs): field = FieldSchema(field_name, datatype, **kwargs) self._fields.append(field) + self._mark_output_fields() + return self + + def add_function(self, function: "Function"): + if not isinstance(function, Function): + raise ParamError(message=ExceptionsMessage.FunctionIncorrectType) + self._functions.append(function) + self._mark_output_fields() return self @@ -311,6 +389,7 @@ def __init__(self, name: str, dtype: DataType, description: str = "", **kwargs) raise PrimaryKeyException(message=ExceptionsMessage.IsPrimaryType) self.is_primary = kwargs.get("is_primary", False) self.is_dynamic = kwargs.get("is_dynamic", False) + self.nullable = kwargs.get("nullable", False) self.auto_id = kwargs.get("auto_id", False) if "auto_id" in kwargs: if not isinstance(self.auto_id, bool): @@ -324,8 +403,19 @@ def __init__(self, name: str, dtype: DataType, description: str = "", **kwargs) raise ClusteringKeyException(message=ExceptionsMessage.IsClusteringKeyType) self.is_partition_key = kwargs.get("is_partition_key", False) self.is_clustering_key = kwargs.get("is_clustering_key", False) - self.element_type = kwargs.get("element_type", None) + self.default_value = kwargs.get("default_value") + if "default_value" in kwargs and self.default_value is None and not self.nullable: + raise ParamError(message=ExceptionsMessage.DefaultValueInvalid) + if isinstance(self.default_value, schema_types.ValueField): + if self.default_value.WhichOneof("data") is None: + self.default_value = None + else: + self.default_value = infer_default_value_bydata(kwargs.get("default_value")) + self.element_type = kwargs.get("element_type") + if "mmap_enabled" in kwargs: + self._type_params["mmap_enabled"] = kwargs["mmap_enabled"] self._parse_type_params() + self.is_function_output = False def __repr__(self) -> str: return str(self.to_dict()) @@ -349,13 +439,25 @@ def _parse_type_params(self): return if not self._kwargs: return - # currently only support "dim", "max_length", "max_capacity" if self._kwargs: for k in COMMON_TYPE_PARAMS: if k in self._kwargs: if self._type_params is None: self._type_params = {} - self._type_params[k] = int(self._kwargs[k]) + if isinstance(self._kwargs[k], str): + if self._kwargs[k].lower() == "true": + self._type_params[k] = True + continue + if self._kwargs[k].lower() == "false": + self._type_params[k] = False + continue + if k == "analyzer_params": + # TODO: a more complicate json may be reordered which + # can still cause server_schema == schema to be False. + # need a better approach. + self._type_params[k] = json.loads(self._kwargs[k]) + continue + self._type_params[k] = self._kwargs[k] @classmethod def construct_from_dict(cls, raw: Dict): @@ -366,9 +468,15 @@ def construct_from_dict(cls, raw: Dict): kwargs["auto_id"] = raw.get("auto_id") kwargs["is_partition_key"] = raw.get("is_partition_key", False) kwargs["is_clustering_key"] = raw.get("is_clustering_key", False) + if raw.get("default_value") is not None: + kwargs["default_value"] = raw.get("default_value") kwargs["is_dynamic"] = raw.get("is_dynamic", False) + kwargs["nullable"] = raw.get("nullable", False) kwargs["element_type"] = raw.get("element_type") - return FieldSchema(raw["name"], raw["type"], raw.get("description", ""), **kwargs) + is_function_output = raw.get("is_function_output", False) + fs = FieldSchema(raw["name"], raw["type"], raw.get("description", ""), **kwargs) + fs.is_function_output = is_function_output + return fs def to_dict(self): _dict = { @@ -383,12 +491,20 @@ def to_dict(self): _dict["auto_id"] = self.auto_id if self.is_partition_key: _dict["is_partition_key"] = True + if self.default_value is not None: + if self.default_value.WhichOneof("data") is None: + self.default_value = None + _dict["default_value"] = self.default_value if self.is_dynamic: _dict["is_dynamic"] = self.is_dynamic + if self.nullable: + _dict["nullable"] = self.nullable if self.dtype == DataType.ARRAY and self.element_type: _dict["element_type"] = self.element_type if self.is_clustering_key: _dict["is_clustering_key"] = True + if self.is_function_output: + _dict["is_function_output"] = True return _dict def __getattr__(self, item: str): @@ -441,6 +557,135 @@ def dtype(self) -> DataType: return self._dtype +class Function: + def __init__( + self, + name: str, + function_type: FunctionType, + input_field_names: Union[str, List[str]], + output_field_names: Union[str, List[str]], + description: str = "", + params: Optional[Dict] = None, + ): + self._name = name + self._description = description + input_field_names = ( + [input_field_names] if isinstance(input_field_names, str) else input_field_names + ) + output_field_names = ( + [output_field_names] if isinstance(output_field_names, str) else output_field_names + ) + try: + self._type = FunctionType(function_type) + except ValueError as err: + raise ParamError(message=ExceptionsMessage.UnknownFunctionType) from err + + for field_name in list(input_field_names) + list(output_field_names): + if not isinstance(field_name, str): + raise ParamError(message=ExceptionsMessage.FunctionIncorrectInputOutputType) + if len(input_field_names) != len(set(input_field_names)): + raise ParamError(message=ExceptionsMessage.FunctionDuplicateInputs) + if len(output_field_names) != len(set(output_field_names)): + raise ParamError(message=ExceptionsMessage.FunctionDuplicateOutputs) + + if set(input_field_names) & set(output_field_names): + raise ParamError(message=ExceptionsMessage.FunctionCommonInputOutput) + + self._input_field_names = input_field_names + self._output_field_names = output_field_names + self._params = params if params is not None else {} + + @property + def name(self): + return self._name + + @property + def description(self): + return self._description + + @property + def type(self): + return self._type + + @property + def input_field_names(self): + return self._input_field_names + + @property + def output_field_names(self): + return self._output_field_names + + @property + def params(self): + return self._params + + def _check_bm25_function(self, schema: CollectionSchema): + if len(self._input_field_names) != 1 or len(self._output_field_names) != 1: + raise ParamError(message=ExceptionsMessage.BM25FunctionIncorrectInputOutputCount) + + for field in schema.fields: + if field.name == self._input_field_names[0] and field.dtype != DataType.VARCHAR: + raise ParamError(message=ExceptionsMessage.BM25FunctionIncorrectInputFieldType) + if ( + field.name == self._output_field_names[0] + and field.dtype != DataType.SPARSE_FLOAT_VECTOR + ): + raise ParamError(message=ExceptionsMessage.BM25FunctionIncorrectOutputFieldType) + + def _check_text_embedding_function(self, schema: CollectionSchema): + if len(self._input_field_names) != 1 or len(self._output_field_names) != 1: + raise ParamError( + message=ExceptionsMessage.TextEmbeddingFunctionIncorrectInputOutputCount + ) + + for field in schema.fields: + if field.name == self._input_field_names[0] and field.dtype != DataType.VARCHAR: + raise ParamError( + message=ExceptionsMessage.TextEmbeddingFunctionIncorrectInputFieldType + ) + if field.name == self._output_field_names[0] and field.dtype != DataType.FLOAT_VECTOR: + raise ParamError( + message=ExceptionsMessage.TextEmbeddingFunctionIncorrectOutputFieldType + ) + + def verify(self, schema: CollectionSchema): + if self._type == FunctionType.BM25: + self._check_bm25_function(schema) + elif self._type == FunctionType.TEXTEMBEDDING: + self._check_text_embedding_function(schema) + elif self._type == FunctionType.UNKNOWN: + raise ParamError(message=ExceptionsMessage.UnknownFunctionType) + + @classmethod + def construct_from_dict(cls, raw: Dict): + return Function( + raw["name"], + raw["type"], + raw["input_field_names"], + raw["output_field_names"], + raw["description"], + raw["params"], + ) + + def __repr__(self) -> str: + return str(self.to_dict()) + + def to_dict(self): + return { + "name": self._name, + "description": self._description, + "type": self._type, + "input_field_names": self._input_field_names, + "output_field_names": self._output_field_names, + "params": self._params, + } + + def __eq__(self, value: object) -> bool: + if not isinstance(value, Function): + return False + return self.to_dict() == value.to_dict() + + def is_valid_insert_data(data: Union[pd.DataFrame, list, dict]) -> bool: """DataFrame, list, dict are valid insert data""" return isinstance(data, (pd.DataFrame, list, dict)) @@ -482,34 +727,29 @@ def _check_insert_data(data: Union[List[List], pd.DataFrame]): is_dataframe = isinstance(data, pd.DataFrame) for col in data: if not is_dataframe and not is_list_like(col): - raise DataTypeNotSupportException(message="data should be a list of list") - + raise DataTypeNotSupportException(message="The data should be a list of list") -def _check_data_schema_cnt(schema: CollectionSchema, data: Union[List[List], pd.DataFrame]): - tmp_fields = copy.deepcopy(schema.fields) - for i, field in enumerate(tmp_fields): - if field.is_primary and field.auto_id: - tmp_fields.pop(i) - field_cnt = len(tmp_fields) +def _check_data_schema_cnt(fields: List, data: Union[List[List], pd.DataFrame]): + field_cnt = len([f for f in fields if not f.is_function_output]) is_dataframe = isinstance(data, pd.DataFrame) data_cnt = len(data.columns) if is_dataframe else len(data) if field_cnt != data_cnt: message = ( - f"The data don't match with schema fields, expect {field_cnt} list, got {len(data)}" + f"The data doesn't match with schema fields, expect {field_cnt} list, got {len(data)}" ) if is_dataframe: - i_name = [f.name for f in tmp_fields] + i_name = [f.name for f in fields] t_name = list(data.columns) message = f"The fields don't match with schema fields, expected: {i_name}, got {t_name}" raise DataNotMatchException(message=message) if is_dataframe: - for x, y in zip(list(data.columns), tmp_fields): + for x, y in zip(list(data.columns), fields): if x != y.name: raise DataNotMatchException( - message=f"The name of field don't match, expected: {y.name}, got {x}" + message=f"The name of field doesn't match, expected: {y.name}, got {x}" ) @@ -522,19 +762,29 @@ def check_insert_schema(schema: CollectionSchema, data: Union[List[List], pd.Dat raise DataNotMatchException(message=msg) columns = list(data.columns) columns.remove(schema.primary_field) - data = data[[columns]] + data = data[columns] + + tmp_fields = list( + filter( + lambda field: not (field.is_primary and field.auto_id) and not field.is_function_output, + schema.fields, + ) + ) - _check_data_schema_cnt(schema, data) + _check_data_schema_cnt(tmp_fields, data) _check_insert_data(data) def check_upsert_schema(schema: CollectionSchema, data: Union[List[List], pd.DataFrame]): if schema is None: raise SchemaNotReadyException(message="Schema shouldn't be None") - if schema.auto_id: - raise UpsertAutoIDTrueException(message=ExceptionsMessage.UpsertAutoIDTrue) + if isinstance(data, pd.DataFrame): + if schema.primary_field.name not in data or data[schema.primary_field.name].isnull().all(): + raise DataNotMatchException(message=ExceptionsMessage.UpsertPrimaryKeyEmpty) + columns = list(data.columns) + data = data[columns] - _check_data_schema_cnt(schema, data) + _check_data_schema_cnt(copy.deepcopy(schema.fields), data) _check_insert_data(data) @@ -603,3 +853,27 @@ def check_schema(schema: CollectionSchema): vector_fields.append(field.name) if len(vector_fields) < 1: raise SchemaNotReadyException(message=ExceptionsMessage.NoVector) + + +def infer_default_value_bydata(data: Any): + if data is None: + return None + default_data = schema_types.ValueField() + d_type = DataType.UNKNOWN + if is_scalar(data): + d_type = infer_dtype_by_scalar_data(data) + if d_type is DataType.BOOL: + default_data.bool_data = data + elif d_type in (DataType.INT8, DataType.INT16, DataType.INT32): + default_data.int_data = data + elif d_type is DataType.INT64: + default_data.long_data = data + elif d_type is DataType.FLOAT: + default_data.float_data = data + elif d_type is DataType.DOUBLE: + default_data.double_data = data + elif d_type is DataType.VARCHAR: + default_data.string_data = data + else: + raise ParamError(message=f"Default value unsupported data type: {d_type}") + return default_data diff --git a/pymilvus/orm/types.py b/pymilvus/orm/types.py index eebbb50bc..5c6a375d1 100644 --- a/pymilvus/orm/types.py +++ b/pymilvus/orm/types.py @@ -73,6 +73,8 @@ def is_numeric_datatype(data_type: DataType): # pylint: disable=too-many-return-statements def infer_dtype_by_scalar_data(data: Any): + if isinstance(data, list): + return DataType.ARRAY if isinstance(data, float): return DataType.DOUBLE if isinstance(data, bool): diff --git a/pymilvus/orm/utility.py b/pymilvus/orm/utility.py index 9b522b8bb..7844dbb66 100644 --- a/pymilvus/orm/utility.py +++ b/pymilvus/orm/utility.py @@ -1299,7 +1299,7 @@ def list_indexes( :rtype: str list """ indexes = _get_connection(using).list_indexes(collection_name, timeout, **kwargs) - field_name = kwargs.get("field_name", None) + field_name = kwargs.get("field_name") index_name_list = [] for index in indexes: if index is not None: diff --git a/pymilvus/settings.py b/pymilvus/settings.py index b1f89d428..c4e745073 100644 --- a/pymilvus/settings.py +++ b/pymilvus/settings.py @@ -1,21 +1,18 @@ -import contextlib import logging.config +import os -import environs +from dotenv import load_dotenv -env = environs.Env() - -with contextlib.suppress(Exception): - env.read_env(".env") +load_dotenv() class Config: # legacy env MILVUS_DEFAULT_CONNECTION, not recommended - LEGACY_URI = env.str("MILVUS_DEFAULT_CONNECTION", "") - MILVUS_URI = env.str("MILVUS_URI", LEGACY_URI) + LEGACY_URI = str(os.getenv("MILVUS_DEFAULT_CONNECTION", "")) + MILVUS_URI = str(os.getenv("MILVUS_URI", LEGACY_URI)) - MILVUS_CONN_ALIAS = env.str("MILVUS_CONN_ALIAS", "default") - MILVUS_CONN_TIMEOUT = env.float("MILVUS_CONN_TIMEOUT", 10) + MILVUS_CONN_ALIAS = str(os.getenv("MILVUS_CONN_ALIAS", "default")) + MILVUS_CONN_TIMEOUT = float(os.getenv("MILVUS_CONN_TIMEOUT", "10.0")) # legacy configs: DEFAULT_USING = MILVUS_CONN_ALIAS @@ -29,7 +26,7 @@ class Config: DEFAULT_HOST = "localhost" DEFAULT_PORT = "19530" - WaitTimeDurationWhenLoad = 0.5 # in seconds + WaitTimeDurationWhenLoad = 0.2 # in seconds MaxVarCharLengthKey = "max_length" MaxVarCharLength = 65535 EncodeProtocol = "utf-8" diff --git a/pyproject.toml b/pyproject.toml index c237b5dff..1357ef11e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,22 +17,24 @@ requires-python = '>=3.8' description = "Python Sdk for Milvus" readme = "README.md" dependencies=[ - "setuptools >= 67", # python3.12 pkg_resources - "grpcio>=1.49.1,<=1.60.0", + "setuptools>69", + "setuptools<70.1;python_version<='3.8'", + "grpcio>=1.49.1", "protobuf>=3.20.0", - "environs<=9.5.0", + "python-dotenv>=1.0.1, <2.0.0", "ujson>=2.0.0", "pandas>=1.2.4", "numpy<1.25.0;python_version<='3.8'", - "requests", - "minio>=7.0.0", - "pyarrow>=12.0.0", - "azure-storage-blob", - "scipy", + "milvus-lite>=2.4.0;sys_platform!='win32'", ] classifiers=[ "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "License :: OSI Approved :: Apache Software License", ] @@ -42,16 +44,27 @@ dynamic = ["version"] "repository" = 'https://github.com/milvus-io/pymilvus' [project.optional-dependencies] +bulk_writer = [ + "requests", + "minio>=7.0.0", + "pyarrow>=12.0.0", + "azure-storage-blob", +] + model = [ "milvus-model>=0.1.0", ] -test = [ +dev = [ + # The generated codes of 1.63.0 is incompatible with versions < 1.63.0, + # so we use fixed grpcio version to develop. + "grpcio==1.62.2", + "grpcio-tools==1.62.2", + "grpcio-testing==1.62.2", "pytest>=5.3.4", "pytest-cov>=2.8.1", "pytest-timeout>=1.3.4", - "grpcio-testing", - "ruff>=0.3.3", + "ruff>0.4.0", "black", ] @@ -113,6 +126,7 @@ lint.ignore = [ "PLR0915", # To many statements TODO "C901", # TODO "PYI041", # TODO + "E402", ] # Allow autofix for all enabled rules (when `--fix`) is provided. diff --git a/requirements.txt b/requirements.txt index 1c4cbd485..f576391d7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,10 +1,10 @@ build==0.4.0 -certifi==2023.7.22 +certifi==2024.7.4 chardet==4.0.0 -environs==9.5.0 -grpcio==1.60.0 -grpcio-testing==1.60.0 -grpcio-tools==1.60.0 +python-dotenv>=1.0.1, <2.0.0 +grpcio==1.62.2 +grpcio-testing==1.62.2 +grpcio-tools==1.62.2 protobuf==4.25.2 idna==3.7 packaging==20.9 @@ -13,9 +13,8 @@ pyparsing==2.4.7 six==1.16.0 toml==0.10.2 ujson>=2.0.0 -urllib3==1.26.18 +urllib3==1.26.19 m2r==0.3.1 -scipy>=1.9.3 Sphinx==4.0.0 sphinx-copybutton sphinx-rtd-theme @@ -27,7 +26,7 @@ sphinxcontrib-qthelp sphinxcontrib-serializinghtml sphinxcontrib-napoleon sphinxcontrib-prettyspecialmethods -tqdm==4.65.0 +tqdm==4.66.3 pyarrow>=12.0.0 pytest>=5.3.4 pytest-cov>=2.8.1 @@ -38,3 +37,4 @@ black requests minio azure-storage-blob +milvus-lite>=2.4.0 diff --git a/tests/test_create_collection.py b/tests/test_create_collection.py index 11fc1d502..1b6b23425 100644 --- a/tests/test_create_collection.py +++ b/tests/test_create_collection.py @@ -202,3 +202,45 @@ def test_create_bf16_collection(self, collection_name): return_value = future.result() assert return_value.code == 0 assert return_value.reason == "success" + + def test_create_clustering_key_collection(self, collection_name): + id_field = { + "name": "my_id", + "type": DataType.INT64, + "auto_id": True, + "is_primary": True, + "is_clustering_key": True, + } + vector_field = { + "name": "embedding", + "type": DataType.FLOAT_VECTOR, + "metric_type": "L2", + "params": {"dim": "4"}, + } + fields = {"fields": [id_field, vector_field]} + future = self._milvus.create_collection( + collection_name=collection_name, fields=fields, _async=True + ) + + invocation_metadata, request, rpc = self._real_time_channel.take_unary_unary( + self._servicer.methods_by_name["CreateCollection"] + ) + rpc.send_initial_metadata(()) + rpc.terminate( + common_pb2.Status( + code=ErrorCode.SUCCESS, error_code=common_pb2.Success, reason="success" + ), + (), + grpc.StatusCode.OK, + "", + ) + + request_schema = schema_pb2.CollectionSchema() + request_schema.ParseFromString(request.schema) + + assert request.collection_name == collection_name + assert Fields.equal(request_schema.fields, fields["fields"]) + + return_value = future.result() + assert return_value.code == 0 + assert return_value.reason == "success" diff --git a/tests/test_milvus_lite.py b/tests/test_milvus_lite.py new file mode 100644 index 000000000..544da9f17 --- /dev/null +++ b/tests/test_milvus_lite.py @@ -0,0 +1,66 @@ +import os +import sys +from tempfile import TemporaryDirectory +import numpy as np +import pytest + +from pymilvus.milvus_client import MilvusClient +from pymilvus.exceptions import ConnectionConfigException + + +@pytest.mark.skipif(sys.platform.startswith('win'), reason="Milvus Lite is not supported on Windows") +class TestMilvusLite: + def test_milvus_lite(self): + with TemporaryDirectory(dir='./') as root: + db_file = os.path.join(root, 'test.db') + client = MilvusClient(db_file) + client.create_collection( + collection_name="demo_collection", + dimension=3 + ) + + # Text strings to search from. + docs = [ + "Artificial intelligence was founded as an academic discipline in 1956.", + "Alan Turing was the first person to conduct substantial research in AI.", + "Born in Maida Vale, London, Turing was raised in southern England.", + ] + + vectors = [[np.random.uniform(-1, 1) for _ in range(3) ] for _ in range(len(docs))] + data = [{"id": i, "vector": vectors[i], "text": docs[i], "subject": "history"} for i in range(len(vectors))] + res = client.insert( + collection_name="demo_collection", + data=data + ) + assert res["insert_count"] == 3 + + res = client.search( + collection_name="demo_collection", + data=[vectors[0]], + filter="subject == 'history'", + limit=2, + output_fields=["text", "subject"], + ) + assert len(res[0]) == 2 + + # a query that retrieves all entities matching filter expressions. + res = client.query( + collection_name="demo_collection", + filter="subject == 'history'", + output_fields=["text", "subject"], + ) + assert len(res) == 3 + + # delete + res = client.delete( + collection_name="demo_collection", + filter="subject == 'history'", + ) + assert len(res) == 3 + + def test_illegal_name(self): + try: + MilvusClient("localhost") + assert False + except ConnectionConfigException as e: + assert e.message == "uri: localhost is illegal, needs start with [unix, http, https, tcp] or a local file endswith [.db]" diff --git a/tests/test_prepare.py b/tests/test_prepare.py index 363d316c0..222347b12 100644 --- a/tests/test_prepare.py +++ b/tests/test_prepare.py @@ -1,5 +1,7 @@ import pytest +import json +from pymilvus.client.constants import PAGE_RETAIN_ORDER_FIELD from pymilvus.client.prepare import Prepare from pymilvus import DataType, MilvusException, CollectionSchema, FieldSchema from pymilvus import DefaultConfig @@ -24,17 +26,26 @@ def test_search_requests_with_expr_offset(self): search_params = { "metric_type": "L2", "offset": 10, + "params": {"page_retain_order": True} } ret = Prepare.search_requests_with_expr("name", data, "v", search_params, 100) offset_exists = False + page_retain_order_exists = False + print(ret.search_params) for p in ret.search_params: if p.key == "offset": offset_exists = True assert p.value == "10" + elif p.key == "params": + params = json.loads(p.value) + if PAGE_RETAIN_ORDER_FIELD in params: + page_retain_order_exists = True + assert params[PAGE_RETAIN_ORDER_FIELD] == True assert offset_exists is True + assert page_retain_order_exists is True class TestCreateCollectionRequest: @@ -179,7 +190,58 @@ def test_row_insert_param_with_auto_id(self): ] Prepare.row_insert_param("", rows, "", fields_info=schema.to_dict()["fields"], enable_dynamic=True) + + def test_row_insert_param_with_none(self): + import numpy as np + rng = np.random.default_rng(seed=19530) + dim = 8 + schema = CollectionSchema([ + FieldSchema("float_vector", DataType.FLOAT_VECTOR, dim=dim), + FieldSchema("nullable_field", DataType.INT64, nullable=True), + FieldSchema("default_field", DataType.FLOAT, default_value=10), + FieldSchema("pk_field", DataType.INT64, is_primary=True, auto_id=True), + FieldSchema("float", DataType.DOUBLE), + ]) + rows = [ + {"float": 1.0,"nullable_field": None, "default_field": None,"float_vector": rng.random((1, dim))[0], "a": 1}, + {"float": 1.0, "float_vector": rng.random((1, dim))[0], "b": 1}, + ] + + Prepare.row_insert_param("", rows, "", fields_info=schema.to_dict()["fields"], enable_dynamic=True) + + def test_row_upsert_param_with_auto_id(self): + import numpy as np + rng = np.random.default_rng(seed=19530) + dim = 8 + schema = CollectionSchema([ + FieldSchema("float_vector", DataType.FLOAT_VECTOR, dim=dim), + FieldSchema("pk_field", DataType.INT64, is_primary=True, auto_id=True), + FieldSchema("float", DataType.DOUBLE) + ]) + rows = [ + {"pk_field":1, "float": 1.0, "float_vector": rng.random((1, dim))[0], "a": 1}, + {"pk_field":2, "float": 1.0, "float_vector": rng.random((1, dim))[0], "b": 1}, + ] + + Prepare.row_upsert_param("", rows, "", fields_info=schema.to_dict()["fields"], enable_dynamic=True) + + def test_upsert_param_with_none(self): + import numpy as np + rng = np.random.default_rng(seed=19530) + dim = 8 + schema = CollectionSchema([ + FieldSchema("float_vector", DataType.FLOAT_VECTOR, dim=dim), + FieldSchema("nullable_field", DataType.INT64, nullable=True), + FieldSchema("default_field", DataType.FLOAT, default_value=10), + FieldSchema("pk_field", DataType.INT64, is_primary=True, auto_id=True), + FieldSchema("float", DataType.DOUBLE), + ]) + rows = [ + {"pk_field":1, "float": 1.0,"nullable_field": None, "default_field": None,"float_vector": rng.random((1, dim))[0], "a": 1}, + {"pk_field":2, "float": 1.0, "float_vector": rng.random((1, dim))[0], "b": 1}, + ] + Prepare.row_upsert_param("", rows, "", fields_info=schema.to_dict()["fields"], enable_dynamic=True) class TestAlterCollectionRequest: def test_alter_collection_request(self):