diff --git a/packages/rs-drive-abci/src/query/document_query/v0/mod.rs b/packages/rs-drive-abci/src/query/document_query/v0/mod.rs index 8af82e39cc0..d4177878bce 100644 --- a/packages/rs-drive-abci/src/query/document_query/v0/mod.rs +++ b/packages/rs-drive-abci/src/query/document_query/v0/mod.rs @@ -180,9 +180,20 @@ impl Platform { #[cfg(test)] mod tests { use super::*; - use crate::query::tests::{assert_invalid_identifier, setup_platform, store_data_contract}; + use crate::query::tests::{ + assert_invalid_identifier, setup_platform, store_data_contract, store_document, + }; + use assert_matches::assert_matches; + use ciborium::value::Value as CborValue; use dpp::dashcore::Network; + use dpp::data_contract::document_type::random_document::CreateRandomDocument; + use dpp::document::{Document, DocumentV0, DocumentV0Getters}; use dpp::tests::fixtures::get_data_contract_fixture; + use drive::query::{InternalClauses, OrderClause, WhereClause, WhereOperator}; + use indexmap::IndexMap; + use rand::rngs::StdRng; + use rand::SeedableRng; + use std::collections::BTreeMap; #[test] fn test_invalid_document_id() { @@ -479,4 +490,1087 @@ mod tests { }) )); } + + #[test] + fn test_documents_single_item_proof() { + let (platform, state, version) = setup_platform(None, Network::Testnet, None); + + let platform_version = PlatformVersion::latest(); + let created_data_contract = get_data_contract_fixture(None, 0, version.protocol_version); + store_data_contract(&platform, created_data_contract.data_contract(), version); + + let data_contract_id = created_data_contract.data_contract().id(); + let document_type_name = "niceDocument"; + let document_type = created_data_contract + .data_contract() + .document_type_for_name(document_type_name) + .expect("expected document type"); + + let random_document = document_type + .random_document(Some(4), platform_version) + .expect("expected to get random document"); + + store_document( + &platform, + created_data_contract.data_contract(), + document_type, + &random_document, + platform_version, + ); + + let drive_document_query = DriveDocumentQuery { + contract: &created_data_contract.data_contract(), + document_type, + internal_clauses: Default::default(), + offset: None, + limit: Some(1), + order_by: Default::default(), + start_at: None, + start_at_included: false, + block_time_ms: None, + }; + + let request = GetDocumentsRequestV0 { + data_contract_id: data_contract_id.to_vec(), + document_type: document_type_name.to_string(), + r#where: vec![], + limit: 1, + order_by: vec![], + prove: true, + start: None, + }; + + let result = platform + .query_documents_v0(request, &state, version) + .expect("expected query to succeed"); + + let Some(GetDocumentsResponseV0 { + result: Some(get_documents_response_v0::Result::Proof(proof)), + metadata: Some(_), + }) = result.data + else { + panic!("expected proof") + }; + + let (_, documents) = drive_document_query + .verify_proof(&proof.grovedb_proof, platform_version) + .expect("expected to verify proof"); + + assert_eq!(documents.len(), 1); + assert_eq!(documents.get(0).expect("first"), &random_document); + } + + #[test] + fn test_documents_range_proof() { + let (platform, state, version) = setup_platform(None, Network::Testnet, None); + + let platform_version = PlatformVersion::latest(); + let created_data_contract = get_data_contract_fixture(None, 0, version.protocol_version); + store_data_contract(&platform, created_data_contract.data_contract(), version); + + let data_contract_id = created_data_contract.data_contract().id(); + let document_type_name = "niceDocument"; + let document_type = created_data_contract + .data_contract() + .document_type_for_name(document_type_name) + .expect("expected document type"); + + let mut std_rng = StdRng::seed_from_u64(393); + let mut documents_by_id = BTreeMap::new(); + for _i in 0..20 { + let random_document = document_type + .random_document_with_rng(&mut std_rng, platform_version) + .expect("expected to get random document"); + store_document( + &platform, + created_data_contract.data_contract(), + document_type, + &random_document, + platform_version, + ); + documents_by_id.insert(random_document.id(), random_document); + } + + let drive_document_query = DriveDocumentQuery { + contract: &created_data_contract.data_contract(), + document_type, + internal_clauses: Default::default(), + offset: None, + limit: Some(10), + order_by: Default::default(), + start_at: None, + start_at_included: false, + block_time_ms: None, + }; + + let request = GetDocumentsRequestV0 { + data_contract_id: data_contract_id.to_vec(), + document_type: document_type_name.to_string(), + r#where: vec![], + limit: 10, + order_by: vec![], + prove: true, + start: None, + }; + + let result = platform + .query_documents_v0(request, &state, version) + .expect("expected query to succeed"); + + let Some(GetDocumentsResponseV0 { + result: Some(get_documents_response_v0::Result::Proof(proof)), + metadata: Some(_), + }) = result.data + else { + panic!("expected proof") + }; + + let (_, queried_documents) = drive_document_query + .verify_proof(&proof.grovedb_proof, platform_version) + .expect("expected to verify proof"); + + assert_eq!(queried_documents.len(), 10); + assert_eq!( + queried_documents.get(9).expect("first"), + documents_by_id + .values() + .nth(9) + .expect("expected to get 9th document") + ); + } + + #[test] + fn test_documents_start_after_proof_primary_index() { + let (platform, state, version) = setup_platform(None, Network::Testnet, None); + + let platform_version = PlatformVersion::latest(); + let created_data_contract = get_data_contract_fixture(None, 0, version.protocol_version); + store_data_contract(&platform, created_data_contract.data_contract(), version); + + let data_contract_id = created_data_contract.data_contract().id(); + let document_type_name = "niceDocument"; + let document_type = created_data_contract + .data_contract() + .document_type_for_name(document_type_name) + .expect("expected document type"); + + let mut std_rng = StdRng::seed_from_u64(393); + let mut documents_by_id = BTreeMap::new(); + for _i in 0..20 { + let random_document = document_type + .random_document_with_rng(&mut std_rng, platform_version) + .expect("expected to get random document"); + store_document( + &platform, + created_data_contract.data_contract(), + document_type, + &random_document, + platform_version, + ); + documents_by_id.insert(random_document.id(), random_document); + } + + let after = documents_by_id + .keys() + .nth(9) + .expect("expected to get 9th document") + .to_buffer(); + + let drive_document_query = DriveDocumentQuery { + contract: &created_data_contract.data_contract(), + document_type, + internal_clauses: Default::default(), + offset: None, + limit: Some(10), + order_by: Default::default(), + start_at: Some(after), + start_at_included: false, + block_time_ms: None, + }; + + let request = GetDocumentsRequestV0 { + data_contract_id: data_contract_id.to_vec(), + document_type: document_type_name.to_string(), + r#where: vec![], + limit: 10, + order_by: vec![], + prove: true, + start: Some(Start::StartAfter(after.to_vec())), + }; + + let result = platform + .query_documents_v0(request, &state, version) + .expect("expected query to succeed"); + + let Some(GetDocumentsResponseV0 { + result: Some(get_documents_response_v0::Result::Proof(proof)), + metadata: Some(_), + }) = result.data + else { + panic!("expected proof") + }; + + let (_, queried_documents) = drive_document_query + .verify_proof(&proof.grovedb_proof, platform_version) + .expect("expected to verify proof"); + + assert_eq!(queried_documents.len(), 10); + assert_eq!( + queried_documents.get(9).expect("last"), + documents_by_id + .values() + .nth(19) + .expect("expected to get 9th document") + ); + } + + fn serialize_vec_to_cbor>(input: Vec) -> Result, Error> { + let values = Value::Array( + input + .into_iter() + .map(|v| v.into() as Value) + .collect::>(), + ); + + let cbor_values: CborValue = TryInto::::try_into(values) + .map_err(|e| Error::Protocol(dpp::ProtocolError::EncodingError(e.to_string())))?; + + let mut serialized = Vec::new(); + ciborium::ser::into_writer(&cbor_values, &mut serialized) + .map_err(|e| Error::Protocol(dpp::ProtocolError::EncodingError(e.to_string())))?; + + Ok(serialized) + } + + #[test] + fn test_documents_start_after_proof_secondary_index() { + let (platform, state, version) = setup_platform(Some((1, 1)), Network::Testnet, None); + + let platform_version = PlatformVersion::latest(); + let withdrawals = platform + .drive + .cache + .system_data_contracts + .load_withdrawals(); + + let data_contract_id = withdrawals.id(); + let document_type_name = "withdrawal"; + let document_type = withdrawals + .document_type_for_name(document_type_name) + .expect("expected document type"); + + let mut std_rng = StdRng::seed_from_u64(393); + let mut documents_by_created_at = BTreeMap::new(); + + // Define the base time as the current system time + let base_time = 1730028481000; + + for i in 0..20 { + let created_at = base_time + i * 20000; + // Create a Document with the desired properties + let random_document: Document = DocumentV0 { + id: Identifier::random_with_rng(&mut std_rng), + owner_id: Identifier::random_with_rng(&mut std_rng), + properties: { + let mut properties = BTreeMap::new(); + properties.insert("status".to_string(), Value::I64(0)); // Always queued + properties.insert("pooling".to_string(), Value::I64(0)); // Always 0 + properties.insert("coreFeePerByte".to_string(), Value::I64(1)); // Always 1 + properties.insert("amount".to_string(), Value::I64(1000)); // Set a minimum amount of 1000 + properties.insert("outputScript".to_string(), Value::Bytes(vec![])); // Set an empty output script + properties + }, + revision: Some(1), // Example revision + created_at: Some(created_at), // Set created_at + updated_at: Some(created_at), // Set updated_at + transferred_at: None, + created_at_block_height: None, + updated_at_block_height: None, + transferred_at_block_height: None, + created_at_core_block_height: None, + updated_at_core_block_height: None, + transferred_at_core_block_height: None, + } + .into(); + store_document( + &platform, + &withdrawals, + document_type, + &random_document, + platform_version, + ); + documents_by_created_at.insert(created_at, random_document); + } + + let after = documents_by_created_at + .values() + .nth(9) + .expect("expected to get 9th document") + .id(); + + let drive_document_query = DriveDocumentQuery { + contract: &withdrawals, + document_type, + internal_clauses: InternalClauses { + primary_key_in_clause: None, + primary_key_equal_clause: None, + in_clause: None, + range_clause: None, + equal_clauses: BTreeMap::from([ + ( + "status".to_string(), + WhereClause { + field: "status".to_string(), + operator: WhereOperator::Equal, + value: Value::I64(0), + }, + ), + ( + "pooling".to_string(), + WhereClause { + field: "pooling".to_string(), + operator: WhereOperator::Equal, + value: Value::I64(0), + }, + ), + ( + "coreFeePerByte".to_string(), + WhereClause { + field: "coreFeePerByte".to_string(), + operator: WhereOperator::Equal, + value: Value::I64(1), + }, + ), + ]), + }, + offset: None, + limit: Some(10), + order_by: IndexMap::from([( + "$updatedAt".to_string(), + OrderClause { + field: "$updatedAt".to_string(), + ascending: true, + }, + )]), + start_at: Some(after.to_buffer()), + start_at_included: false, + block_time_ms: None, + }; + + let where_clauses = serialize_vec_to_cbor( + drive_document_query + .internal_clauses + .equal_clauses + .values() + .cloned() + .collect(), + ) + .expect("where clauses serialization should never fail"); + let order_by = + serialize_vec_to_cbor(drive_document_query.order_by.values().cloned().collect()) + .expect("order by clauses serialization should never fail"); + + let request = GetDocumentsRequestV0 { + data_contract_id: data_contract_id.to_vec(), + document_type: document_type_name.to_string(), + r#where: where_clauses, + limit: 10, + order_by, + prove: true, + start: Some(Start::StartAfter(after.to_vec())), + }; + + let result = platform + .query_documents_v0(request, &state, version) + .expect("expected query to succeed"); + + let Some(GetDocumentsResponseV0 { + result: Some(get_documents_response_v0::Result::Proof(proof)), + metadata: Some(_), + }) = result.data + else { + panic!("expected proof") + }; + + let (_, queried_documents) = drive_document_query + .verify_proof(&proof.grovedb_proof, platform_version) + .expect("expected to verify proof"); + + assert_eq!(queried_documents.len(), 10); + assert_eq!( + queried_documents.get(9).expect("last"), + documents_by_created_at + .values() + .nth(19) + .expect("expected to get 9th document") + ); + } + + #[test] + fn test_documents_start_after_proof_secondary_index_many_statuses() { + let (platform, state, version) = setup_platform(Some((1, 1)), Network::Testnet, None); + + let platform_version = PlatformVersion::latest(); + let withdrawals = platform + .drive + .cache + .system_data_contracts + .load_withdrawals(); + + let data_contract_id = withdrawals.id(); + let document_type_name = "withdrawal"; + let document_type = withdrawals + .document_type_for_name(document_type_name) + .expect("expected document type"); + + let mut std_rng = StdRng::seed_from_u64(393); + let mut documents_by_created_at = BTreeMap::new(); + + // Define the base time as the current system time + let base_time = 1730028481000; + + for i in 0..20 { + let created_at = base_time + i * 20000; + // Create a Document with the desired properties + let random_document: Document = DocumentV0 { + id: Identifier::random_with_rng(&mut std_rng), + owner_id: Identifier::random_with_rng(&mut std_rng), + properties: { + let mut properties = BTreeMap::new(); + properties.insert("status".to_string(), Value::I64(i as i64 % 4)); // Always queued + properties.insert("pooling".to_string(), Value::I64(0)); // Always 0 + properties.insert("coreFeePerByte".to_string(), Value::I64(1)); // Always 1 + properties.insert("amount".to_string(), Value::I64(1000)); // Set a minimum amount of 1000 + properties.insert("outputScript".to_string(), Value::Bytes(vec![])); // Set an empty output script + properties + }, + revision: Some(1), // Example revision + created_at: Some(created_at), // Set created_at + updated_at: Some(created_at), // Set updated_at + transferred_at: None, + created_at_block_height: None, + updated_at_block_height: None, + transferred_at_block_height: None, + created_at_core_block_height: None, + updated_at_core_block_height: None, + transferred_at_core_block_height: None, + } + .into(); + store_document( + &platform, + &withdrawals, + document_type, + &random_document, + platform_version, + ); + documents_by_created_at.insert(created_at, random_document); + } + + let after = documents_by_created_at + .values() + .nth(9) + .expect("expected to get 9th document") + .id(); + + let drive_document_query = DriveDocumentQuery { + contract: &withdrawals, + document_type, + internal_clauses: InternalClauses { + primary_key_in_clause: None, + primary_key_equal_clause: None, + in_clause: None, + range_clause: None, + equal_clauses: BTreeMap::from([ + ( + "status".to_string(), + WhereClause { + field: "status".to_string(), + operator: WhereOperator::Equal, + value: Value::I64(0), + }, + ), + ( + "pooling".to_string(), + WhereClause { + field: "pooling".to_string(), + operator: WhereOperator::Equal, + value: Value::I64(0), + }, + ), + ( + "coreFeePerByte".to_string(), + WhereClause { + field: "coreFeePerByte".to_string(), + operator: WhereOperator::Equal, + value: Value::I64(1), + }, + ), + ]), + }, + offset: None, + limit: Some(3), + order_by: IndexMap::from([( + "$updatedAt".to_string(), + OrderClause { + field: "$updatedAt".to_string(), + ascending: true, + }, + )]), + start_at: Some(after.to_buffer()), + start_at_included: false, + block_time_ms: None, + }; + + let where_clauses = serialize_vec_to_cbor( + drive_document_query + .internal_clauses + .equal_clauses + .values() + .cloned() + .collect(), + ) + .expect("where clauses serialization should never fail"); + let order_by = + serialize_vec_to_cbor(drive_document_query.order_by.values().cloned().collect()) + .expect("order by clauses serialization should never fail"); + + let request = GetDocumentsRequestV0 { + data_contract_id: data_contract_id.to_vec(), + document_type: document_type_name.to_string(), + r#where: where_clauses, + limit: 3, + order_by, + prove: true, + start: Some(Start::StartAfter(after.to_vec())), + }; + + let result = platform + .query_documents_v0(request, &state, version) + .expect("expected query to succeed"); + + let Some(GetDocumentsResponseV0 { + result: Some(get_documents_response_v0::Result::Proof(proof)), + metadata: Some(_), + }) = result.data + else { + panic!("expected proof") + }; + + let (_, queried_documents) = drive_document_query + .verify_proof(&proof.grovedb_proof, platform_version) + .expect("expected to verify proof"); + + assert_eq!(queried_documents.len(), 2); + assert_eq!( + queried_documents.get(1).expect("last"), + documents_by_created_at + .values() + .nth(16) + .expect("expected to get 2nd document") + ); + } + + #[test] + fn test_documents_proof_secondary_index_in_query() { + let (platform, state, version) = setup_platform(Some((1, 1)), Network::Testnet, None); + + let platform_version = PlatformVersion::latest(); + let withdrawals = platform + .drive + .cache + .system_data_contracts + .load_withdrawals(); + + let data_contract_id = withdrawals.id(); + let document_type_name = "withdrawal"; + let document_type = withdrawals + .document_type_for_name(document_type_name) + .expect("expected document type"); + + let mut std_rng = StdRng::seed_from_u64(393); + let mut documents_by_id = BTreeMap::new(); + + // Define the base time as the current system time + let base_time = 1730028481000; + + for i in 0..20 { + let created_at = base_time + i * 20000; + // Create a Document with the desired properties + let random_document: Document = DocumentV0 { + id: Identifier::random_with_rng(&mut std_rng), + owner_id: Identifier::random_with_rng(&mut std_rng), + properties: { + let mut properties = BTreeMap::new(); + properties.insert("status".to_string(), Value::I64(i as i64 % 4)); // Always queued + properties.insert("pooling".to_string(), Value::I64(0)); // Always 0 + properties.insert("coreFeePerByte".to_string(), Value::I64(1)); // Always 1 + properties.insert("amount".to_string(), Value::I64(1000)); // Set a minimum amount of 1000 + properties.insert("outputScript".to_string(), Value::Bytes(vec![])); // Set an empty output script + properties + }, + revision: Some(1), // Example revision + created_at: Some(created_at), // Set created_at + updated_at: Some(created_at), // Set updated_at + transferred_at: None, + created_at_block_height: None, + updated_at_block_height: None, + transferred_at_block_height: None, + created_at_core_block_height: None, + updated_at_core_block_height: None, + transferred_at_core_block_height: None, + } + .into(); + store_document( + &platform, + &withdrawals, + document_type, + &random_document, + platform_version, + ); + documents_by_id.insert(random_document.id(), random_document); + } + + let drive_document_query = DriveDocumentQuery { + contract: &withdrawals, + document_type, + internal_clauses: InternalClauses { + primary_key_in_clause: None, + primary_key_equal_clause: None, + in_clause: Some(WhereClause { + field: "status".to_string(), + operator: WhereOperator::In, + value: Value::Array(vec![ + Value::I64(0), + Value::I64(1), + Value::I64(2), + Value::I64(3), + Value::I64(4), + ]), + }), + range_clause: None, + equal_clauses: BTreeMap::default(), + }, + offset: None, + limit: Some(3), + order_by: IndexMap::from([ + ( + "status".to_string(), + OrderClause { + field: "status".to_string(), + ascending: true, + }, + ), + ( + "transactionIndex".to_string(), + OrderClause { + field: "transactionIndex".to_string(), + ascending: true, + }, + ), + ]), + start_at: None, + start_at_included: false, + block_time_ms: None, + }; + + let mut where_clauses: Vec<_> = drive_document_query + .internal_clauses + .equal_clauses + .values() + .cloned() + .collect(); + + where_clauses.insert( + 0, + drive_document_query + .internal_clauses + .in_clause + .clone() + .unwrap(), + ); + + let where_clauses_serialized = serialize_vec_to_cbor(where_clauses) + .expect("where clauses serialization should never fail"); + let order_by = + serialize_vec_to_cbor(drive_document_query.order_by.values().cloned().collect()) + .expect("order by clauses serialization should never fail"); + + let request = GetDocumentsRequestV0 { + data_contract_id: data_contract_id.to_vec(), + document_type: document_type_name.to_string(), + r#where: where_clauses_serialized, + limit: 3, + order_by, + prove: true, + start: None, + }; + + let result = platform + .query_documents_v0(request, &state, version) + .expect("expected query to succeed"); + + assert!(result.errors.is_empty(), "errors are {:?}", result.errors); + + let Some(GetDocumentsResponseV0 { + result: Some(get_documents_response_v0::Result::Proof(proof)), + metadata: Some(_), + }) = result.data + else { + panic!("expected proof") + }; + + let (_, queried_documents) = drive_document_query + .verify_proof(&proof.grovedb_proof, platform_version) + .expect("expected to verify proof"); + + assert_eq!(queried_documents.len(), 3); + } + + #[test] + fn test_documents_start_after_proof_secondary_index_in_query() { + let (platform, state, version) = setup_platform(Some((1, 1)), Network::Testnet, None); + + let platform_version = PlatformVersion::latest(); + let withdrawals = platform + .drive + .cache + .system_data_contracts + .load_withdrawals(); + + let data_contract_id = withdrawals.id(); + let document_type_name = "withdrawal"; + let document_type = withdrawals + .document_type_for_name(document_type_name) + .expect("expected document type"); + + let mut std_rng = StdRng::seed_from_u64(393); + let mut documents_by_created_at = BTreeMap::new(); + + // Define the base time as the current system time + let base_time = 1730028481000; + + for i in 0..20 { + let created_at = base_time + i * 20000; + // Create a Document with the desired properties + let random_document: Document = DocumentV0 { + id: Identifier::random_with_rng(&mut std_rng), + owner_id: Identifier::random_with_rng(&mut std_rng), + properties: { + let mut properties = BTreeMap::new(); + properties.insert("status".to_string(), Value::I64(i as i64 % 4)); // Always queued + properties.insert("pooling".to_string(), Value::I64(0)); // Always 0 + properties.insert("coreFeePerByte".to_string(), Value::I64(1)); // Always 1 + properties.insert("amount".to_string(), Value::I64(1000)); // Set a minimum amount of 1000 + properties.insert("outputScript".to_string(), Value::Bytes(vec![])); // Set an empty output script + properties + }, + revision: Some(1), // Example revision + created_at: Some(created_at), // Set created_at + updated_at: Some(created_at), // Set updated_at + transferred_at: None, + created_at_block_height: None, + updated_at_block_height: None, + transferred_at_block_height: None, + created_at_core_block_height: None, + updated_at_core_block_height: None, + transferred_at_core_block_height: None, + } + .into(); + store_document( + &platform, + &withdrawals, + document_type, + &random_document, + platform_version, + ); + documents_by_created_at.insert(created_at, random_document); + } + + let after = documents_by_created_at + .values() + .nth(4) + .expect("expected to get 9th document") + .id(); + + let drive_document_query = DriveDocumentQuery { + contract: &withdrawals, + document_type, + internal_clauses: InternalClauses { + primary_key_in_clause: None, + primary_key_equal_clause: None, + in_clause: Some(WhereClause { + field: "status".to_string(), + operator: WhereOperator::In, + value: Value::Array(vec![ + Value::I64(0), + Value::I64(1), + Value::I64(2), + Value::I64(3), + Value::I64(4), + ]), + }), + range_clause: None, + equal_clauses: BTreeMap::default(), + }, + offset: None, + limit: Some(3), + order_by: IndexMap::from([ + ( + "status".to_string(), + OrderClause { + field: "status".to_string(), + ascending: true, + }, + ), + ( + "transactionIndex".to_string(), + OrderClause { + field: "transactionIndex".to_string(), + ascending: true, + }, + ), + ]), + start_at: Some(after.to_buffer()), + start_at_included: false, + block_time_ms: None, + }; + + let mut where_clauses: Vec<_> = drive_document_query + .internal_clauses + .equal_clauses + .values() + .cloned() + .collect(); + + where_clauses.insert( + 0, + drive_document_query + .internal_clauses + .in_clause + .clone() + .unwrap(), + ); + + let where_clauses_serialized = serialize_vec_to_cbor(where_clauses) + .expect("where clauses serialization should never fail"); + let order_by = + serialize_vec_to_cbor(drive_document_query.order_by.values().cloned().collect()) + .expect("order by clauses serialization should never fail"); + + let request = GetDocumentsRequestV0 { + data_contract_id: data_contract_id.to_vec(), + document_type: document_type_name.to_string(), + r#where: where_clauses_serialized, + limit: 3, + order_by, + prove: true, + start: Some(Start::StartAfter(after.to_vec())), + }; + + let result = platform + .query_documents_v0(request, &state, version) + .expect("expected query to succeed"); + + assert!(result.errors.is_empty(), "errors are {:?}", result.errors); + + let Some(GetDocumentsResponseV0 { + result: Some(get_documents_response_v0::Result::Proof(proof)), + metadata: Some(_), + }) = result.data + else { + panic!("expected proof") + }; + + let (_, queried_documents) = drive_document_query + .verify_proof(&proof.grovedb_proof, platform_version) + .expect("expected to verify proof"); + + assert_eq!(queried_documents.len(), 3); + assert_eq!( + queried_documents.get(1).expect("last"), + documents_by_created_at + .values() + .nth(16) + .expect("expected to get 2nd document") + ); + } + + //todo: this should be possible + #[test] + #[ignore] + fn test_documents_start_after_proof_secondary_index_in_query_2() { + let (platform, state, version) = setup_platform(Some((1, 1)), Network::Testnet, None); + + let platform_version = PlatformVersion::latest(); + let withdrawals = platform + .drive + .cache + .system_data_contracts + .load_withdrawals(); + + let data_contract_id = withdrawals.id(); + let document_type_name = "withdrawal"; + let document_type = withdrawals + .document_type_for_name(document_type_name) + .expect("expected document type"); + + let mut std_rng = StdRng::seed_from_u64(393); + let mut documents_by_created_at = BTreeMap::new(); + + // Define the base time as the current system time + let base_time = 1730028481000; + + for i in 0..20 { + let created_at = base_time + i * 20000; + // Create a Document with the desired properties + let random_document: Document = DocumentV0 { + id: Identifier::random_with_rng(&mut std_rng), + owner_id: Identifier::random_with_rng(&mut std_rng), + properties: { + let mut properties = BTreeMap::new(); + properties.insert("status".to_string(), Value::I64(i as i64 % 4)); // Always queued + properties.insert("pooling".to_string(), Value::I64(0)); // Always 0 + properties.insert("coreFeePerByte".to_string(), Value::I64(1)); // Always 1 + properties.insert("amount".to_string(), Value::I64(1000)); // Set a minimum amount of 1000 + properties.insert("outputScript".to_string(), Value::Bytes(vec![])); // Set an empty output script + properties + }, + revision: Some(1), // Example revision + created_at: Some(created_at), // Set created_at + updated_at: Some(created_at), // Set updated_at + transferred_at: None, + created_at_block_height: None, + updated_at_block_height: None, + transferred_at_block_height: None, + created_at_core_block_height: None, + updated_at_core_block_height: None, + transferred_at_core_block_height: None, + } + .into(); + store_document( + &platform, + &withdrawals, + document_type, + &random_document, + platform_version, + ); + documents_by_created_at.insert(created_at, random_document); + } + + let after = documents_by_created_at + .values() + .nth(9) + .expect("expected to get 9th document") + .id(); + + let drive_document_query = DriveDocumentQuery { + contract: &withdrawals, + document_type, + internal_clauses: InternalClauses { + primary_key_in_clause: None, + primary_key_equal_clause: None, + in_clause: Some(WhereClause { + field: "status".to_string(), + operator: WhereOperator::In, + value: Value::Array(vec![ + Value::I64(0), + Value::I64(1), + Value::I64(2), + Value::I64(3), + Value::I64(4), + ]), + }), + range_clause: None, + equal_clauses: BTreeMap::from([ + ( + "pooling".to_string(), + WhereClause { + field: "pooling".to_string(), + operator: WhereOperator::Equal, + value: Value::I64(0), + }, + ), + ( + "coreFeePerByte".to_string(), + WhereClause { + field: "coreFeePerByte".to_string(), + operator: WhereOperator::Equal, + value: Value::I64(1), + }, + ), + ]), + }, + offset: None, + limit: Some(3), + order_by: IndexMap::from([( + "$updatedAt".to_string(), + OrderClause { + field: "$updatedAt".to_string(), + ascending: true, + }, + )]), + start_at: Some(after.to_buffer()), + start_at_included: false, + block_time_ms: None, + }; + + let mut where_clauses: Vec<_> = drive_document_query + .internal_clauses + .equal_clauses + .values() + .cloned() + .collect(); + + where_clauses.insert( + 0, + drive_document_query + .internal_clauses + .in_clause + .clone() + .unwrap(), + ); + + let where_clauses_serialized = serialize_vec_to_cbor(where_clauses) + .expect("where clauses serialization should never fail"); + let order_by = + serialize_vec_to_cbor(drive_document_query.order_by.values().cloned().collect()) + .expect("order by clauses serialization should never fail"); + + let request = GetDocumentsRequestV0 { + data_contract_id: data_contract_id.to_vec(), + document_type: document_type_name.to_string(), + r#where: where_clauses_serialized, + limit: 3, + order_by, + prove: true, + start: Some(Start::StartAfter(after.to_vec())), + }; + + let result = platform + .query_documents_v0(request, &state, version) + .expect("expected query to succeed"); + + assert!(result.errors.is_empty(), "errors are {:?}", result.errors); + + let Some(GetDocumentsResponseV0 { + result: Some(get_documents_response_v0::Result::Proof(proof)), + metadata: Some(_), + }) = result.data + else { + panic!("expected proof") + }; + + let (_, queried_documents) = drive_document_query + .verify_proof(&proof.grovedb_proof, platform_version) + .expect("expected to verify proof"); + + assert_eq!(queried_documents.len(), 2); + assert_eq!( + queried_documents.get(1).expect("last"), + documents_by_created_at + .values() + .nth(16) + .expect("expected to get 2nd document") + ); + } } diff --git a/packages/rs-drive-abci/src/query/mod.rs b/packages/rs-drive-abci/src/query/mod.rs index a7d58c7358d..6be97cc1cfa 100644 --- a/packages/rs-drive-abci/src/query/mod.rs +++ b/packages/rs-drive-abci/src/query/mod.rs @@ -32,9 +32,15 @@ pub(crate) mod tests { use crate::config::PlatformConfig; use dpp::dashcore::Network; + use dpp::data_contract::document_type::DocumentTypeRef; + use dpp::document::Document; use dpp::prelude::{CoreBlockHeight, TimestampMillis}; - use drive::util::batch::DataContractOperationType; - use drive::util::batch::DriveOperation::DataContractOperation; + use drive::util::batch::DriveOperation::{DataContractOperation, DocumentOperation}; + use drive::util::batch::{DataContractOperationType, DocumentOperationType}; + use drive::util::object_size_info::{ + DataContractInfo, DocumentInfo, DocumentTypeInfo, OwnedDocumentInfo, + }; + use drive::util::storage_flags::StorageFlags; use platform_version::version::{PlatformVersion, ProtocolVersion}; use std::borrow::Cow; use std::sync::Arc; @@ -108,6 +114,40 @@ pub(crate) mod tests { .expect("expected to apply drive operations"); } + pub fn store_document( + platform: &Platform, + data_contract: &DataContract, + document_type: DocumentTypeRef, + document: &Document, + platform_version: &PlatformVersion, + ) { + let storage_flags = Some(Cow::Owned(StorageFlags::SingleEpoch(0))); + + let operation = DocumentOperation(DocumentOperationType::AddDocument { + owned_document_info: OwnedDocumentInfo { + document_info: DocumentInfo::DocumentRefInfo((document, storage_flags)), + owner_id: None, + }, + contract_info: DataContractInfo::BorrowedDataContract(data_contract), + document_type_info: DocumentTypeInfo::DocumentTypeRef(document_type), + override_document: false, + }); + + let block_info = BlockInfo::genesis(); + + platform + .drive + .apply_drive_operations( + vec![operation], + true, + &block_info, + None, + platform_version, + None, + ) + .expect("expected to apply drive operations"); + } + pub fn assert_invalid_identifier( validation_result: QueryValidationResult, ) { diff --git a/packages/rs-drive/src/query/mod.rs b/packages/rs-drive/src/query/mod.rs index 1d7cf24371f..7491454a647 100644 --- a/packages/rs-drive/src/query/mod.rs +++ b/packages/rs-drive/src/query/mod.rs @@ -1275,23 +1275,39 @@ impl<'a> DriveDocumentQuery<'a> { #[cfg(any(feature = "server", feature = "verify"))] /// Returns a `Query` that either starts at or after the given key. fn inner_query_starts_from_key( - start_at_key: Vec, + start_at_key: Option>, left_to_right: bool, included: bool, ) -> Query { // We only need items after the start at document let mut inner_query = Query::new_with_direction(left_to_right); + if left_to_right { - if included { - inner_query.insert_range_from(start_at_key..); + if let Some(start_at_key) = start_at_key { + if included { + inner_query.insert_range_from(start_at_key..); + } else { + inner_query.insert_range_after(start_at_key..); + } } else { - inner_query.insert_range_after(start_at_key..); + inner_query.insert_all(); } } else if included { - inner_query.insert_range_to_inclusive(..=start_at_key); + if let Some(start_at_key) = start_at_key { + inner_query.insert_range_to_inclusive(..=start_at_key); + } else { + inner_query.insert_key(vec![]); + } } else { - inner_query.insert_range_to(..start_at_key); + if let Some(start_at_key) = start_at_key { + inner_query.insert_range_to(..start_at_key); + } else { + //todo: really not sure if this is correct + // Should investigate more + inner_query.insert_key(vec![]); + } } + inner_query } @@ -1434,11 +1450,11 @@ impl<'a> DriveDocumentQuery<'a> { platform_version, ) .ok() - .flatten() - .unwrap_or_default(); + .flatten(); // We should always include if we have left_over - let non_conditional_included = !left_over.is_empty() | *included; + let non_conditional_included = + !left_over.is_empty() | *included | start_at_key.is_none(); let mut non_conditional_query = Self::inner_query_starts_from_key( start_at_key, @@ -1849,6 +1865,7 @@ impl<'a> DriveDocumentQuery<'a> { drive_operations, platform_version, )?; + let query_result = drive.grove_get_path_query_serialized_results( &path_query, transaction, @@ -1976,6 +1993,8 @@ mod tests { use dpp::data_contract::document_type::accessors::DocumentTypeV0Getters; use dpp::prelude::Identifier; + use grovedb::Query; + use indexmap::IndexMap; use rand::prelude::StdRng; use rand::SeedableRng; use serde_json::json; @@ -1995,11 +2014,16 @@ mod tests { use serde_json::Value::Null; use crate::config::DriveConfig; - use crate::util::test_helpers::setup::setup_drive_with_initial_state_structure; + use crate::util::test_helpers::setup::{ + setup_drive_with_initial_state_structure, setup_system_data_contract, + }; use dpp::block::block_info::BlockInfo; use dpp::data_contract::accessors::v0::DataContractV0Getters; + use dpp::data_contracts::SystemDataContract; + use dpp::document::DocumentV0; use dpp::platform_value::string_encoding::Encoding; use dpp::platform_value::Value; + use dpp::system_data_contracts::load_system_data_contract; use dpp::tests::fixtures::{get_data_contract_fixture, get_dpns_data_contract_fixture}; use dpp::tests::json_document::json_document_to_contract; use dpp::util::cbor_serializer; @@ -2037,6 +2061,36 @@ mod tests { (drive, contract) } + fn setup_withdrawal_contract() -> (Drive, DataContract) { + let tmp_dir = TempDir::new().unwrap(); + + let (drive, _) = Drive::open(tmp_dir, None).expect("expected to open Drive successfully"); + + let platform_version = PlatformVersion::latest(); + + drive + .create_initial_state_structure(None, platform_version) + .expect("expected to create root tree successfully"); + + // let's construct the grovedb structure for the dashpay data contract + let contract = load_system_data_contract(SystemDataContract::Withdrawals, platform_version) + .expect("load system contact"); + + let storage_flags = Some(Cow::Owned(StorageFlags::SingleEpoch(0))); + drive + .apply_contract( + &contract, + BlockInfo::default(), + true, + storage_flags, + None, + platform_version, + ) + .expect("expected to apply contract successfully"); + + (drive, contract) + } + fn setup_family_birthday_contract() -> (Drive, DataContract) { let drive = setup_drive_with_initial_state_structure(None); @@ -2667,4 +2721,99 @@ mod tests { ) .expect_err("starts with can not start with an empty string"); } + + #[test] + fn test_withdrawal_query_with_missing_transaction_index() { + // Setup the withdrawal contract + let (_, contract) = setup_withdrawal_contract(); + let platform_version = PlatformVersion::latest(); + + let document_type_name = "withdrawal"; + let document_type = contract + .document_type_for_name(document_type_name) + .expect("expected to get document type"); + + // Create a DriveDocumentQuery that simulates missing 'transactionIndex' in documents + let drive_document_query = DriveDocumentQuery { + contract: &contract, + document_type, + internal_clauses: InternalClauses { + primary_key_in_clause: None, + primary_key_equal_clause: None, + in_clause: Some(WhereClause { + field: "status".to_string(), + operator: WhereOperator::In, + value: Value::Array(vec![ + Value::U64(0), + Value::U64(1), + Value::U64(2), + Value::U64(3), + Value::U64(4), + ]), + }), + range_clause: None, + equal_clauses: BTreeMap::default(), + }, + offset: None, + limit: Some(3), + order_by: IndexMap::from([ + ( + "status".to_string(), + OrderClause { + field: "status".to_string(), + ascending: true, + }, + ), + ( + "transactionIndex".to_string(), + OrderClause { + field: "transactionIndex".to_string(), + ascending: true, + }, + ), + ]), + start_at: Some([3u8; 32]), + start_at_included: false, + block_time_ms: None, + }; + + // Create a document that we are starting at, which may be missing 'transactionIndex' + let mut properties = BTreeMap::new(); + properties.insert("status".to_string(), Value::U64(0)); + // We intentionally omit 'transactionIndex' to simulate missing field + + let starts_at_document = DocumentV0 { + id: Identifier::from([3u8; 32]), // The same as start_at + owner_id: Identifier::random(), + properties, + revision: None, + created_at: None, + updated_at: None, + transferred_at: None, + created_at_block_height: None, + updated_at_block_height: None, + transferred_at_block_height: None, + created_at_core_block_height: None, + updated_at_core_block_height: None, + transferred_at_core_block_height: None, + } + .into(); + + // Attempt to construct the path query + let result = drive_document_query + .construct_path_query(Some(starts_at_document), platform_version) + .expect("expected to construct a path query"); + + assert_eq!( + result + .clone() + .query + .query + .default_subquery_branch + .subquery + .expect("expected subquery") + .items, + Query::new_range_full().items + ); + } } diff --git a/packages/rs-drive/src/verify/document/verify_proof_keep_serialized/v0/mod.rs b/packages/rs-drive/src/verify/document/verify_proof_keep_serialized/v0/mod.rs index dc362c7ead9..51c1bad4c44 100644 --- a/packages/rs-drive/src/verify/document/verify_proof_keep_serialized/v0/mod.rs +++ b/packages/rs-drive/src/verify/document/verify_proof_keep_serialized/v0/mod.rs @@ -39,6 +39,7 @@ impl<'a> DriveDocumentQuery<'a> { } else { self.construct_path_query(None, platform_version) }?; + let (root_hash, proved_key_values) = if self.start_at.is_some() { GroveDb::verify_subset_query(proof, &path_query, &platform_version.drive.grove_version)? } else { diff --git a/packages/rs-drive/tests/query_tests.rs b/packages/rs-drive/tests/query_tests.rs index 5c792aa8733..23d84918859 100644 --- a/packages/rs-drive/tests/query_tests.rs +++ b/packages/rs-drive/tests/query_tests.rs @@ -4342,6 +4342,8 @@ fn test_dpns_query_start_after_with_null_id() { let domain1_id = Identifier::random_with_rng(&mut rng); + assert!(domain0_id > domain1_id); + let domain1 = Domain { id: domain1_id, owner_id: Identifier::random_with_rng(&mut rng), @@ -4428,7 +4430,7 @@ fn test_dpns_query_start_after_with_null_id() { ["normalizedParentDomainName", "==", "dash"] ], "startAfter": encoded_start_at, - "limit": 2, + "limit": 3, "orderBy": [ ["normalizedLabel", "asc"] ] diff --git a/packages/rs-sdk/src/platform/document_query.rs b/packages/rs-sdk/src/platform/document_query.rs index a6b1f7c7738..c8136cf0f18 100644 --- a/packages/rs-sdk/src/platform/document_query.rs +++ b/packages/rs-sdk/src/platform/document_query.rs @@ -19,7 +19,7 @@ use dpp::{ document::Document, platform_value::{platform_value, Value}, prelude::{DataContract, Identifier}, - ProtocolError, + InvalidVectorSizeError, ProtocolError, }; use drive::query::{DriveDocumentQuery, InternalClauses, OrderClause, WhereClause, WhereOperator}; use drive_proof_verifier::{types::Documents, ContextProvider, FromProof}; @@ -326,6 +326,26 @@ impl<'a> TryFrom<&'a DocumentQuery> for DriveDocumentQuery<'a> { } else { None }; + + let (start_at, start_at_included) = match request.start.as_ref() { + None => (None, false), + Some(Start::StartAt(at)) => ( + Some(at.clone().try_into().map_err(|_| { + ProtocolError::InvalidVectorSizeError(InvalidVectorSizeError::new(32, at.len())) + })?), + true, + ), + Some(Start::StartAfter(after)) => ( + Some(after.clone().try_into().map_err(|_| { + ProtocolError::InvalidVectorSizeError(InvalidVectorSizeError::new( + 32, + after.len(), + )) + })?), + true, + ), + }; + let query = Self { contract: &request.data_contract, document_type, @@ -338,8 +358,8 @@ impl<'a> TryFrom<&'a DocumentQuery> for DriveDocumentQuery<'a> { .into_iter() .map(|v| (v.field.clone(), v)) .collect(), - start_at: None, - start_at_included: false, + start_at, + start_at_included, block_time_ms: None, };