diff --git a/Cargo.lock b/Cargo.lock index 9c4b5a0dd9d..e1724b3f6dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1370,6 +1370,8 @@ dependencies = [ "sanitize-filename", "serde", "serde_json", + "sha2", + "test-case 3.3.1", "thiserror", "tokio", "tokio-test", @@ -1631,7 +1633,7 @@ dependencies = [ "serde_repr", "sha2", "strum 0.25.0", - "test-case", + "test-case 2.2.2", "thiserror", "tokio", ] @@ -5075,8 +5077,8 @@ dependencies = [ [[package]] name = "tenderdash-abci" -version = "1.0.0" -source = "git+https://github.com/dashpay/rs-tenderdash-abci#1f29dbc549dc1abe5de295e55728576b1a0c8a6b" +version = "0.14.0-dev.12" +source = "git+https://github.com/dashpay/rs-tenderdash-abci?tag=v0.14.0-dev.12#17e57f509c2bb7015042d69976bd1f056fef923d" dependencies = [ "bytes", "futures", @@ -5097,8 +5099,8 @@ dependencies = [ [[package]] name = "tenderdash-proto" -version = "1.0.0" -source = "git+https://github.com/dashpay/rs-tenderdash-abci#1f29dbc549dc1abe5de295e55728576b1a0c8a6b" +version = "0.14.0-dev.12" +source = "git+https://github.com/dashpay/rs-tenderdash-abci?tag=v0.14.0-dev.12#17e57f509c2bb7015042d69976bd1f056fef923d" dependencies = [ "bytes", "chrono", @@ -5117,8 +5119,8 @@ dependencies = [ [[package]] name = "tenderdash-proto-compiler" -version = "1.0.0" -source = "git+https://github.com/dashpay/rs-tenderdash-abci#1f29dbc549dc1abe5de295e55728576b1a0c8a6b" +version = "0.14.0-dev.12" +source = "git+https://github.com/dashpay/rs-tenderdash-abci?tag=v0.14.0-dev.12#17e57f509c2bb7015042d69976bd1f056fef923d" dependencies = [ "fs_extra", "prost-build 0.12.6", @@ -5151,7 +5153,28 @@ version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21d6cf5a7dffb3f9dceec8e6b8ca528d9bd71d36c9f074defb548ce161f598c0" dependencies = [ - "test-case-macros", + "test-case-macros 2.2.2", +] + +[[package]] +name = "test-case" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb2550dd13afcd286853192af8601920d959b14c401fcece38071d53bf0768a8" +dependencies = [ + "test-case-macros 3.3.1", +] + +[[package]] +name = "test-case-core" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adcb7fd841cd518e279be3d5a3eb0636409487998a4aff22f3de87b81e88384f" +dependencies = [ + "cfg-if", + "proc-macro2", + "quote", + "syn 2.0.68", ] [[package]] @@ -5167,6 +5190,18 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "test-case-macros" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c89e72a01ed4c579669add59014b9a524d609c0c88c6a585ce37485879f6ffb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.68", + "test-case-core", +] + [[package]] name = "textwrap" version = "0.11.0" diff --git a/packages/dapi-grpc/Cargo.toml b/packages/dapi-grpc/Cargo.toml index 085300789d1..ba95f370d87 100644 --- a/packages/dapi-grpc/Cargo.toml +++ b/packages/dapi-grpc/Cargo.toml @@ -42,7 +42,9 @@ tonic = { version = "0.11", features = [ serde = { version = "1.0.197", optional = true, features = ["derive"] } serde_bytes = { version = "0.11.12", optional = true } serde_json = { version = "1.0", optional = true } -tenderdash-proto = { git = "https://github.com/dashpay/rs-tenderdash-abci", version = "1.0.0-dev.1", default-features = false } +tenderdash-proto = { git = "https://github.com/dashpay/rs-tenderdash-abci", version = "0.14.0-dev.12", tag = "v0.14.0-dev.12", default-features = false, features = [ + "grpc", +] } dapi-grpc-macros = { path = "../rs-dapi-grpc-macros" } platform-version = { path = "../rs-platform-version" } diff --git a/packages/rs-drive-abci/Cargo.toml b/packages/rs-drive-abci/Cargo.toml index 5d759599614..5b95221b011 100644 --- a/packages/rs-drive-abci/Cargo.toml +++ b/packages/rs-drive-abci/Cargo.toml @@ -55,7 +55,7 @@ tracing-subscriber = { version = "0.3.16", default-features = false, features = "tracing-log", ], optional = false } atty = { version = "0.2.14", optional = false } -tenderdash-abci = { git = "https://github.com/dashpay/rs-tenderdash-abci", version = "1.0.0-dev.1", features = [ +tenderdash-abci = { git = "https://github.com/dashpay/rs-tenderdash-abci", version = "0.14.0-dev.12", tag = "v0.14.0-dev.12", features = [ "grpc", ] } lazy_static = "1.4.0" diff --git a/packages/rs-drive-proof-verifier/Cargo.toml b/packages/rs-drive-proof-verifier/Cargo.toml index d8647821b8c..5b29db2a529 100644 --- a/packages/rs-drive-proof-verifier/Cargo.toml +++ b/packages/rs-drive-proof-verifier/Cargo.toml @@ -32,7 +32,7 @@ dpp = { path = "../rs-dpp", features = [ bincode = { version = "2.0.0-rc.3", features = ["serde"], optional = true } platform-serialization-derive = { path = "../rs-platform-serialization-derive", optional = true } platform-serialization = { path = "../rs-platform-serialization", optional = true } -tenderdash-abci = { git = "https://github.com/dashpay/rs-tenderdash-abci", version = "1.0.0-dev.1", features = [ +tenderdash-abci = { git = "https://github.com/dashpay/rs-tenderdash-abci", version = "0.14.0-dev.12", tag = "v0.14.0-dev.12", features = [ "crypto", ], default-features = false } tracing = { version = "0.1.37" } diff --git a/packages/rs-drive-proof-verifier/src/proof.rs b/packages/rs-drive-proof-verifier/src/proof.rs index 5d1a96ce0bf..618e9b1e468 100644 --- a/packages/rs-drive-proof-verifier/src/proof.rs +++ b/packages/rs-drive-proof-verifier/src/proof.rs @@ -1460,7 +1460,7 @@ impl FromProof for VotePollsGroupedByTim let mtd = response.metadata().or(Err(Error::EmptyResponseMetadata))?; let (root_hash, vote_polls) = drive_query - .verify_vote_polls_by_end_date_proof::>( + .verify_vote_polls_by_end_date_proof::>( &proof.grovedb_proof, platform_version, ) @@ -1470,7 +1470,7 @@ impl FromProof for VotePollsGroupedByTim verify_tenderdash_proof(proof, mtd, &root_hash, provider)?; - let response = VotePollsGroupedByTimestamp(vote_polls); + let response = VotePollsGroupedByTimestamp(vote_polls).sorted(drive_query.order_ascending); Ok((response.into_option(), mtd.clone())) } @@ -1655,7 +1655,7 @@ define_length!(Contenders, |x: &Contenders| x.contenders.len()); define_length!(Voters, |x: &Voters| x.0.len()); define_length!( VotePollsGroupedByTimestamp, - |x: &VotePollsGroupedByTimestamp| x.0.values().map(|v| v.len()).sum() + |x: &VotePollsGroupedByTimestamp| x.0.iter().map(|v| v.1.len()).sum() ); trait IntoOption where diff --git a/packages/rs-drive-proof-verifier/src/types.rs b/packages/rs-drive-proof-verifier/src/types.rs index 5955c040de9..df390d3cd2c 100644 --- a/packages/rs-drive-proof-verifier/src/types.rs +++ b/packages/rs-drive-proof-verifier/src/types.rs @@ -26,6 +26,7 @@ use dpp::{ }; use drive::grovedb::Element; use std::collections::{BTreeMap, BTreeSet}; + #[cfg(feature = "mocks")] use { bincode::{Decode, Encode}, @@ -293,19 +294,35 @@ impl From<&PrefundedSpecializedBalance> for Credits { derive(Encode, Decode, PlatformSerialize, PlatformDeserialize), platform_serialize(unversioned) )] -pub struct VotePollsGroupedByTimestamp(pub BTreeMap>); +pub struct VotePollsGroupedByTimestamp(pub Vec<(TimestampMillis, Vec)>); +impl VotePollsGroupedByTimestamp { + /// Sort the vote polls by timestamp. + pub fn sorted(mut self, ascending: bool) -> Self { + self.0.sort_by(|a, b| { + if ascending { + a.0.cmp(&b.0) + } else { + b.0.cmp(&a.0) + } + }); + + self + } +} /// Insert items into the map, appending them to the existing values for the same key. impl FromIterator<(u64, Vec)> for VotePollsGroupedByTimestamp { fn from_iter)>>(iter: T) -> Self { - let mut map = BTreeMap::new(); - - for (timestamp, vote_poll) in iter { - let entry = map.entry(timestamp).or_insert_with(Vec::new); - entry.extend(vote_poll); - } - - Self(map) + // collect all vote polls for the same timestamp into a single vector + let data = iter + .into_iter() + .fold(BTreeMap::new(), |mut acc, (timestamp, vote_poll)| { + let entry: &mut Vec = acc.entry(timestamp).or_default(); + entry.extend(vote_poll); + acc + }); + + Self(data.into_iter().collect()) } } @@ -329,7 +346,7 @@ impl FromIterator<(u64, Option)> for VotePollsGroupedByTimestamp { impl IntoIterator for VotePollsGroupedByTimestamp { type Item = (u64, Vec); - type IntoIter = std::collections::btree_map::IntoIter>; + type IntoIter = std::vec::IntoIter<(u64, Vec)>; fn into_iter(self) -> Self::IntoIter { self.0.into_iter() diff --git a/packages/rs-sdk/Cargo.toml b/packages/rs-sdk/Cargo.toml index fd1a5498a6e..b37f5848a70 100644 --- a/packages/rs-sdk/Cargo.toml +++ b/packages/rs-sdk/Cargo.toml @@ -36,6 +36,7 @@ dashcore-rpc = { git = "https://github.com/dashpay/rust-dashcore-rpc", tag = "v0 lru = { version = "0.12.3", optional = true } bip37-bloom-filter = { git = "https://github.com/dashpay/rs-bip37-bloom-filter", branch = "develop" } pollster = { version = "0.3.0" } +sha2 = "0.10.8" [dev-dependencies] tokio = { version = "1.36.0", features = ["macros", "rt-multi-thread"] } @@ -52,6 +53,7 @@ tokio-test = { version = "0.4.4" } clap = { version = "4.5.4", features = ["derive"] } sanitize-filename = { version = "0.5.0" } chrono = { version = "0.4.38" } +test-case = { version = "3.3.1" } [features] default = ["mocks", "offline-testing"] diff --git a/packages/rs-sdk/scripts/generate_test_vectors.sh b/packages/rs-sdk/scripts/generate_test_vectors.sh index d02f193687e..a5530e7d93e 100755 --- a/packages/rs-sdk/scripts/generate_test_vectors.sh +++ b/packages/rs-sdk/scripts/generate_test_vectors.sh @@ -23,6 +23,6 @@ pushd "$CARGO_DIR" cargo test -p dash-sdk \ --no-default-features \ --features generate-test-vectors \ - "$1" + "$@" popd diff --git a/packages/rs-sdk/src/platform/query.rs b/packages/rs-sdk/src/platform/query.rs index 87500822006..2d21fa5f8ca 100644 --- a/packages/rs-sdk/src/platform/query.rs +++ b/packages/rs-sdk/src/platform/query.rs @@ -520,3 +520,27 @@ impl Query for VoteQuery { .into()) } } + +impl Query for LimitQuery { + fn query(self, prove: bool) -> Result { + if !prove { + unimplemented!("queries without proofs are not supported yet"); + } + use proto::get_contested_resource_identity_votes_request::{ + get_contested_resource_identity_votes_request_v0::StartAtVotePollIdInfo, Version, + }; + + Ok(match self.query.query(prove)?.version { + None => return Err(Error::Protocol(dpp::ProtocolError::NoProtocolVersionError)), + Some(Version::V0(v0)) => GetContestedResourceIdentityVotesRequestV0 { + limit: self.limit, + start_at_vote_poll_id_info: self.start_info.map(|v| StartAtVotePollIdInfo { + start_at_poll_identifier: v.start_key.to_vec(), + start_poll_identifier_included: v.start_included, + }), + ..v0 + }, + } + .into()) + } +} diff --git a/packages/rs-sdk/src/platform/transition/vote.rs b/packages/rs-sdk/src/platform/transition/vote.rs index f03e0a1f692..56864a760ca 100644 --- a/packages/rs-sdk/src/platform/transition/vote.rs +++ b/packages/rs-sdk/src/platform/transition/vote.rs @@ -5,6 +5,8 @@ use crate::platform::transition::put_settings::PutSettings; use crate::platform::Fetch; use crate::{Error, Sdk}; use dapi_grpc::platform::VersionedGrpcResponse; +use dpp::identifier::MasternodeIdentifiers; +use dpp::identity::hash::IdentityPublicKeyHashMethodsV0; use dpp::identity::signer::Signer; use dpp::identity::IdentityPublicKey; use dpp::prelude::Identifier; @@ -50,8 +52,10 @@ impl PutVote for Vote { signer: &S, settings: Option, ) -> Result<(), Error> { + let voting_identity_id = get_voting_identity_id(voter_pro_tx_hash, voting_public_key)?; + let new_masternode_voting_nonce = sdk - .get_identity_nonce(voter_pro_tx_hash, true, settings) + .get_identity_nonce(voting_identity_id, true, settings) .await?; let settings = settings.unwrap_or_default(); @@ -80,8 +84,10 @@ impl PutVote for Vote { signer: &S, settings: Option, ) -> Result { + let voting_identity_id = get_voting_identity_id(voter_pro_tx_hash, voting_public_key)?; + let new_masternode_voting_nonce = sdk - .get_identity_nonce(voter_pro_tx_hash, true, settings) + .get_identity_nonce(voting_identity_id, true, settings) .await?; let settings = settings.unwrap_or_default(); @@ -147,3 +153,15 @@ impl PutVote for Vote { } } } + +fn get_voting_identity_id( + voter_pro_tx_hash: Identifier, + voting_public_key: &IdentityPublicKey, +) -> Result { + let pub_key_hash = voting_public_key.public_key_hash()?; + + Ok(Identifier::create_voter_identifier( + voter_pro_tx_hash.as_bytes(), + &pub_key_hash, + )) +} diff --git a/packages/rs-sdk/tests/.env.example b/packages/rs-sdk/tests/.env.example index 23b48e84df8..1a9222032eb 100644 --- a/packages/rs-sdk/tests/.env.example +++ b/packages/rs-sdk/tests/.env.example @@ -4,6 +4,9 @@ DASH_SDK_PLATFORM_HOST="127.0.0.1" DASH_SDK_PLATFORM_PORT=2443 DASH_SDK_PLATFORM_SSL=false +# ProTxHash of masternode that has at least 1 vote casted for DPNS name `testname` +DASH_SDK_MASTERNODE_OWNER_PRO_REG_TX_HASH="6ac88f64622d9bc0cb79ad0f69657aa9488b213157d20ae0ca371fa5f04fb222" + DASH_SDK_CORE_PORT=20002 DASH_SDK_CORE_USER="someuser" DASH_SDK_CORE_PASSWORD="verysecretpassword" diff --git a/packages/rs-sdk/tests/fetch/common.rs b/packages/rs-sdk/tests/fetch/common.rs index 12bfeb77617..da109c4da0b 100644 --- a/packages/rs-sdk/tests/fetch/common.rs +++ b/packages/rs-sdk/tests/fetch/common.rs @@ -1,4 +1,12 @@ +use dash_sdk::{mock::Mockable, platform::Query, Sdk}; use dpp::{data_contract::DataContractFactory, prelude::Identifier}; +use hex::ToHex; +use rs_dapi_client::transport::TransportRequest; + +use super::config::Config; + +/// Test DPNS name for testing of the Sdk; at least 3 identities should request this name to be reserved +pub(crate) const TEST_DPNS_NAME: &str = "testname"; /// Create a mock document type for testing of mock API pub fn mock_document_type() -> dpp::data_contract::document_type::DocumentType { @@ -80,3 +88,24 @@ pub fn setup_logs() { .try_init() .ok(); } + +/// Configure test case generated with [::test_case] crate. +/// +/// This function is intended to use with multiple test cases in a single function. +/// As a test case shares function body, we need to generate unique name for each of them to isolate generated +/// test vectors. It is done by hashing query and using it as a suffix for test case name. +/// +/// ## Returns +/// +/// Returns unique name of test case (generated from `name_prefix` and hash of query) and configured SDK. +pub(crate) async fn setup_sdk_for_test_case>( + cfg: Config, + query: Q, + name_prefix: &str, +) -> (String, Sdk) { + let key = rs_dapi_client::mock::Key::new(&query.query(true).expect("valid query")); + let test_case_id = format!("{}_{}", name_prefix, key.encode_hex::()); + + // create new sdk to ensure that test cases don't interfere with each other + (test_case_id.clone(), cfg.setup_api(&test_case_id).await) +} diff --git a/packages/rs-sdk/tests/fetch/config.rs b/packages/rs-sdk/tests/fetch/config.rs index 839f3dec9d2..27738cb552a 100644 --- a/packages/rs-sdk/tests/fetch/config.rs +++ b/packages/rs-sdk/tests/fetch/config.rs @@ -3,7 +3,10 @@ //! This module contains [Config] struct that can be used to configure dash-platform-sdk. //! It's mainly used for testing. -use dpp::prelude::Identifier; +use dpp::{ + dashcore::{hashes::Hash, ProTxHash}, + prelude::Identifier, +}; use rs_dapi_client::AddressList; use serde::Deserialize; use std::{path::PathBuf, str::FromStr}; @@ -68,6 +71,9 @@ pub struct Config { /// in [`existing_data_contract_id`](Config::existing_data_contract_id). #[serde(default = "Config::default_document_id")] pub existing_document_id: Identifier, + // Hex-encoded ProTxHash of the existing HP masternode + #[serde(default)] + pub masternode_owner_pro_reg_tx_hash: String, } impl Config { @@ -147,7 +153,11 @@ impl Config { pub async fn setup_api(&self, namespace: &str) -> dash_sdk::Sdk { let dump_dir = match namespace.is_empty() { true => self.dump_dir.clone(), - false => self.dump_dir.join(sanitize_filename::sanitize(namespace)), + false => { + // looks like spaces are not replaced by sanitize_filename, and we don't want them as they are confusing + let namespace = namespace.replace(' ', "_"); + self.dump_dir.join(sanitize_filename::sanitize(namespace)) + } }; if dump_dir.is_relative() { @@ -180,9 +190,12 @@ impl Config { if let Err(err) = std::fs::remove_dir_all(&dump_dir) { tracing::warn!(?err, ?dump_dir, "failed to remove dump dir"); } - std::fs::create_dir_all(&dump_dir).expect("create dump dir"); + std::fs::create_dir_all(&dump_dir) + .expect(format!("create dump dir {}", dump_dir.display()).as_str()); // ensure dump dir is committed to git - std::fs::write(dump_dir.join(".gitkeep"), "").expect("create .gitkeep file") + let gitkeep = dump_dir.join(".gitkeep"); + std::fs::write(&gitkeep, "") + .expect(format!("create {} file", gitkeep.display()).as_str()); } builder.with_dump_dir(&dump_dir) @@ -231,6 +244,21 @@ impl Config { .join("tests") .join("vectors") } + + /// Return ProTxHash of an existing evo node, or None if not set + pub fn existing_protxhash(&self) -> Result { + hex::decode(&self.masternode_owner_pro_reg_tx_hash) + .map_err(|e| e.to_string()) + .and_then(|b| ProTxHash::from_slice(&b).map_err(|e| e.to_string())) + .map_err(|e| { + format!( + "Invalid {}MASTERNODE_OWNER_PRO_REG_TX_HASH {}: {}", + Self::CONFIG_PREFIX, + self.masternode_owner_pro_reg_tx_hash, + e + ) + }) + } } impl Default for Config { diff --git a/packages/rs-sdk/tests/fetch/contested_resource.rs b/packages/rs-sdk/tests/fetch/contested_resource.rs index f21257cef66..9faf4b41461 100644 --- a/packages/rs-sdk/tests/fetch/contested_resource.rs +++ b/packages/rs-sdk/tests/fetch/contested_resource.rs @@ -1,8 +1,10 @@ //! Tests of ContestedResource object -use crate::fetch::{common::setup_logs, config::Config}; -use core::panic; -use dash_sdk::platform::FetchMany; +use crate::fetch::{ + common::{setup_logs, setup_sdk_for_test_case, TEST_DPNS_NAME}, + config::Config, +}; +use dash_sdk::{platform::FetchMany, Error}; use dpp::{ platform_value::Value, voting::{ @@ -17,8 +19,7 @@ use drive::query::{ vote_polls_by_document_type_query::VotePollsByDocumentTypeQuery, }; use drive_proof_verifier::types::ContestedResource; - -pub(crate) const INDEX_VALUE: &str = "dada"; +use std::panic::catch_unwind; /// Test that we can fetch contested resources /// @@ -40,24 +41,26 @@ async fn test_contested_resources_ok() { .await .expect("prerequisities"); - let index_name = "parentNameAndLabel"; + let query = base_query(&cfg); - let query = VotePollsByDocumentTypeQuery { + let rss = ContestedResource::fetch_many(&sdk, query) + .await + .expect("fetch contested resources"); + tracing::debug!(contested_resources=?rss, "Contested resources"); + assert!(!rss.0.is_empty()); +} + +fn base_query(cfg: &Config) -> VotePollsByDocumentTypeQuery { + VotePollsByDocumentTypeQuery { contract_id: cfg.existing_data_contract_id, document_type_name: cfg.existing_document_type_name.clone(), - index_name: index_name.to_string(), + index_name: "parentNameAndLabel".to_string(), start_at_value: None, - start_index_values: vec![Value::Text("dash".into())], + start_index_values: vec![Value::Text("dash".to_string())], end_index_values: vec![], limit: None, order_ascending: false, - }; - - let rss = ContestedResource::fetch_many(&sdk, query) - .await - .expect("fetch contested resources"); - tracing::debug!(contested_resources=?rss, "Contested resources"); - assert!(!rss.0.is_empty()); + } } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] @@ -140,13 +143,14 @@ async fn contested_resources_start_at_value() { /// ## Preconditions /// /// 1. At least 3 contested resources (eg. different DPNS names) exist -// TODO: fails due to PLAN-656, not tested enough so it can be faulty #[tokio::test(flavor = "multi_thread", worker_threads = 1)] #[cfg_attr( feature = "network-testing", ignore = "requires manual DPNS names setup for masternode voting tests; see fn check_mn_voting_prerequisities()" )] -async fn contested_resources_limit() { +#[allow(non_snake_case)] +async fn contested_resources_limit_PLAN_656() { + // TODO: fails due to PLAN-656, not tested enough so it can be faulty setup_logs(); let cfg = Config::new(); @@ -222,202 +226,130 @@ async fn contested_resources_limit() { assert_eq!(i, count_all, "all contested resources fetched"); } } - /// Check various queries for [ContestedResource] that contain invalid field values /// /// ## Preconditions /// /// None +#[test_case::test_case(|_q| {}, Ok("ContestedResources([Value(Text(".into()); "unmodified base query is Ok")] +#[test_case::test_case(|q| q.start_index_values = vec![Value::Text("".to_string())], Ok("".into()); "index value empty string is Ok")] +#[test_case::test_case(|q| q.document_type_name = "some random non-existing name".to_string(), Err(r#"code: InvalidArgument, message: "document type some random non-existing name not found"#); "non existing document type returns InvalidArgument")] +#[test_case::test_case(|q| q.index_name = "nx index".to_string(), Err(r#"code: InvalidArgument, message: "index with name nx index is not the contested index"#); "non existing index returns InvalidArgument")] +#[test_case::test_case(|q| q.index_name = "dashIdentityId".to_string(), Err(r#"code: InvalidArgument, message: "index with name dashIdentityId is not the contested index"#); "existing non-contested index returns InvalidArgument")] +#[test_case::test_case(|q| q.start_at_value = Some((Value::Array(vec![]), true)), Err(r#"code: InvalidArgument"#); "start_at_value wrong index type returns InvalidArgument PLAN-653")] +#[test_case::test_case(|q| q.start_index_values = vec![], Ok(r#"ContestedResources([Value(Text("dash"))])"#.into()); "start_index_values empty vec returns top-level keys")] +#[test_case::test_case(|q| q.start_index_values = vec![Value::Text("".to_string())], Ok(r#"ContestedResources([])"#.into()); "start_index_values empty string returns zero results")] +#[test_case::test_case(|q| { + q.start_index_values = vec![ + Value::Text("dash".to_string()), + Value::Text(TEST_DPNS_NAME.to_string()), + ] +}, Err("incorrect index values error: too many start index values were provided, since no end index values were provided, the start index values must be less than the amount of properties in the contested index"); "start_index_values with two values returns error")] +#[test_case::test_case(|q| { + q.start_index_values = vec![]; + q.end_index_values = vec![Value::Text(TEST_DPNS_NAME.to_string())]; +}, Ok(r#"ContestedResources([Value(Text("dash"))])"#.into()); "end_index_values one value with empty start_index_values returns 'dash'")] +#[test_case::test_case(|q| { + q.start_index_values = vec![]; + q.end_index_values = vec![Value::Text(TEST_DPNS_NAME.to_string()), Value::Text("non existing".to_string())]; +}, Err("too many end index values were provided"); "end_index_values two values (1 nx) with empty start_index_values returns error")] +#[test_case::test_case(|q| { + q.start_index_values = vec![]; + q.end_index_values = vec![Value::Text("aaa non existing".to_string())]; +}, Ok(r#"ContestedResources([])"#.into()); "end_index_values with 1 nx value 'aaa*' and empty start_index_values returns zero objects")] +#[test_case::test_case(|q| { + q.start_index_values = vec![]; + q.end_index_values = vec![Value::Text("zzz non existing".to_string())]; +}, Ok(r#"ContestedResources([])"#.into()); "end_index_values with 1 nx value 'zzz*' and empty start_index_values returns zero objects")] +#[test_case::test_case(|q| { + q.start_index_values = vec![ + Value::Text("dash".to_string()), + Value::Text(TEST_DPNS_NAME.to_string()), + Value::Text("eee".to_string()), + ] +}, Err("incorrect index values error: too many start index values were provided, since no end index values were provided, the start index values must be less than the amount of properties in the contested index"); "too many items in start_index_values returns error")] +#[test_case::test_case(|q| { + q.end_index_values = vec![Value::Text("zzz non existing".to_string())] +}, Err("incorrect index values error: too many end index values were provided"); "Both start_ and end_index_values returns error")] +#[test_case::test_case(|q| { + q.start_index_values = vec![]; + q.end_index_values = vec![Value::Text("zzz non existing".to_string())] +}, Ok("ContestedResources([])".into()); "Non-existing end_index_values returns error")] +#[test_case::test_case(|q| q.end_index_values = vec![Value::Array(vec![0.into(), 1.into()])], Err("incorrect index values error: too many end index values were provided"); "wrong type of end_index_values should return InvalidArgument")] +#[test_case::test_case(|q| q.limit = Some(0), Err(r#"code: InvalidArgument"#); "limit 0 returns InvalidArgument")] +#[test_case::test_case(|q| q.limit = Some(std::u16::MAX), Err(r#"code: InvalidArgument"#); "limit std::u16::MAX returns InvalidArgument")] +#[test_case::test_case(|q| { + q.start_index_values = vec![Value::Text("dash".to_string())]; + q.start_at_value = Some((Value::Text(TEST_DPNS_NAME.to_string()), true)); + q.limit = Some(1); +}, Ok(format!(r#"ContestedResources([Value(Text({}))])"#, TEST_DPNS_NAME)); "exact match query returns one object PLAN-656")] #[tokio::test(flavor = "multi_thread", worker_threads = 1)] -async fn contested_resources_fields() { +#[cfg_attr( + feature = "network-testing", + ignore = "requires manual DPNS names setup for masternode voting tests; see fn check_mn_voting_prerequisities()" +)] +async fn contested_resources_fields( + query_mut_fn: fn(&mut VotePollsByDocumentTypeQuery), + expect: Result, +) -> Result<(), String> { setup_logs(); - type MutFn = fn(&mut VotePollsByDocumentTypeQuery); - struct TestCase { - name: &'static str, - query_mut_fn: MutFn, - expect: Result<&'static str, &'static str>, - } - - let test_cases: Vec = vec![ - TestCase { - name: "index value empty string is Ok", - query_mut_fn: |q| q.start_index_values = vec![Value::Text("".to_string())], - expect: Ok(""), - }, - TestCase { - name: "non existing document type returns InvalidArgument", - query_mut_fn: |q| q.document_type_name = "some random non-existing name".to_string(), - expect: Err( - r#"code: InvalidArgument, message: "document type some random non-existing name not found"#, - ), - }, - TestCase { - name: "non existing index returns InvalidArgument", - query_mut_fn: |q| q.index_name = "nx index".to_string(), - expect: Err( - r#"code: InvalidArgument, message: "index with name nx index is not the contested index"#, - ), - }, - TestCase { - name: "existing non-contested index returns InvalidArgument", - query_mut_fn: |q| q.index_name = "dashIdentityId".to_string(), - expect: Err( - r#"code: InvalidArgument, message: "index with name dashIdentityId is not the contested index"#, - ), - }, - TestCase { - // this fails with code: Internal, see PLAN-563 - name: "start_at_value wrong index type returns InvalidArgument PLAN-563", - query_mut_fn: |q| q.start_at_value = Some((Value::Array(vec![]), true)), - expect: Err(r#"code: InvalidArgument"#), - }, - TestCase { - name: "start_index_values empty vec returns top-level keys", - query_mut_fn: |q| q.start_index_values = vec![], - expect: Ok(r#"ContestedResources([Value(Text("dash"))])"#), - }, - TestCase { - name: "start_index_values empty string returns zero results", - query_mut_fn: |q| q.start_index_values = vec![Value::Text("".to_string())], - expect: Ok(r#"ContestedResources([])"#), - }, - TestCase { - // fails due to PLAN-662 - name: "start_index_values with two values PLAN-662", - query_mut_fn: |q| { - q.start_index_values = vec![ - Value::Text("dash".to_string()), - Value::Text("dada".to_string()), - ] - }, - expect: Ok(r#"ContestedResources([Value(Text("dash"))])"#), - }, - TestCase { - // fails due to PLAN-662 - name: "too many items in start_index_values PLAN-662", - query_mut_fn: |q| { - q.start_index_values = vec![ - Value::Text("dash".to_string()), - Value::Text("dada".to_string()), - Value::Text("eee".to_string()), - ] - }, - expect: Ok( - r#"code: InvalidArgument, message: "incorrect index values error: the start index values and the end index"#, - ), - }, - TestCase { - // fails due to PLAN-663 - name: "Non existing end_index_values PLAN-663", - query_mut_fn: |q| q.end_index_values = vec![Value::Text("non existing".to_string())], - expect: Ok(r#"ContestedResources([Value(Text("dash"))])"#), - }, - TestCase { - // fails due to PLAN-663 - name: "wrong type of end_index_values should return InvalidArgument PLAN-663", - query_mut_fn: |q| q.end_index_values = vec![Value::Array(vec![0.into(), 1.into()])], - expect: Ok(r#"code: InvalidArgument"#), - }, - TestCase { - // fails due to PLAN-664 - name: "limit 0 returns InvalidArgument PLAN-664", - query_mut_fn: |q| q.limit = Some(0), - expect: Ok(r#"code: InvalidArgument"#), - }, - TestCase { - name: "limit std::u16::MAX returns InvalidArgument PLAN-664", - query_mut_fn: |q| q.limit = Some(std::u16::MAX), - expect: Ok(r#"code: InvalidArgument"#), - }, - ]; - let cfg = Config::new(); check_mn_voting_prerequisities(&cfg) .await .expect("prerequisities"); - let base_query = VotePollsByDocumentTypeQuery { - contract_id: cfg.existing_data_contract_id, - document_type_name: cfg.existing_document_type_name.clone(), - index_name: "parentNameAndLabel".to_string(), - start_at_value: None, - // start_index_values: vec![], // Value(Text("dash")), Value(Text(""))]) - start_index_values: vec![Value::Text("dash".to_string())], - end_index_values: vec![], - limit: None, - order_ascending: false, + tracing::debug!(?expect, "Running test case"); + // handle panics to not stop other test cases from running + let unwinded = catch_unwind(|| { + { + pollster::block_on(async { + let mut query = base_query(&cfg); + query_mut_fn(&mut query); + + let (test_case_id, sdk) = + setup_sdk_for_test_case(cfg, query.clone(), "contested_resources_fields").await; + tracing::debug!(test_case_id, ?query, "Executing query"); + + ContestedResource::fetch_many(&sdk, query).await + }) + } + }); + let result = match unwinded { + Ok(r) => r, + Err(e) => { + let msg = if let Some(s) = e.downcast_ref::<&str>() { + s.to_string() + } else if let Some(s) = e.downcast_ref::() { + s.to_string() + } else { + format!("unknown panic type: {:?}", std::any::type_name_of_val(&e)) + }; + + tracing::error!("PANIC: {}", msg); + Err(Error::Generic(msg)) + } }; - // check if the base query works - let base_query_sdk = cfg.setup_api("contested_resources_fields_base_query").await; - let result = ContestedResource::fetch_many(&base_query_sdk, base_query.clone()).await; - assert!( - result.is_ok_and(|v| !v.0.is_empty()), - "base query should return some results" - ); - - let mut failures: Vec<(&'static str, String)> = Default::default(); - - for test_case in test_cases { - tracing::debug!("Running test case: {}", test_case.name); - // create new sdk to ensure that test cases don't interfere with each other - let sdk = cfg - .setup_api(&format!("contested_resources_fields_{}", test_case.name)) - .await; - - let mut query = base_query.clone(); - (test_case.query_mut_fn)(&mut query); - - let result = ContestedResource::fetch_many(&sdk, query).await; - match test_case.expect { - Ok(expected) if result.is_ok() => { - let result_string = format!("{:?}", result.as_ref().expect("result")); - if !result_string.contains(expected) { - failures.push(( - test_case.name, - format!("expected: {:#?}\ngot: {:?}\n", expected, result), - )); - } - } - Err(expected) if result.is_err() => { - let result = result.expect_err("error"); - if !result.to_string().contains(expected) { - failures.push(( - test_case.name, - format!("expected: {:#?}\ngot {:?}\n", expected, result), - )); - } - } - expected => { - failures.push(( - test_case.name, - format!("expected: {:#?}\ngot: {:?}\n", expected, result), - )); + match expect { + Ok(expected) if result.is_ok() => { + let result_string = format!("{:?}", result.as_ref().expect("result")); + if !result_string.contains(&expected) { + Err(format!("EXPECTED: {} GOT: {:?}\n", expected, result)) + } else { + Ok(()) } } - } - if !failures.is_empty() { - for failure in &failures { - tracing::error!(?failure, "Failed: {}", failure.0); + Err(expected) if result.is_err() => { + let result = result.expect_err("error"); + if !result.to_string().contains(expected) { + Err(format!("EXPECTED: {} GOT: {:?}\n", expected, result)) + } else { + Ok(()) + } } - let failed_cases = failures - .iter() - .map(|(name, _)| name.to_string()) - .collect::>() - .join("\n* "); - - panic!( - "{} test cases failed:\n{}\n\n{}\n", - failures.len(), - failed_cases, - failures - .iter() - .map(|(name, msg)| format!("===========================\n{}:\n\n{:?}", name, msg)) - .collect::>() - .join("\n") - ); + expected => Err(format!("EXPECTED: {:?} GOT: {:?}\n", expected, result)), } } @@ -460,7 +392,7 @@ pub async fn check_mn_voting_prerequisities(cfg: &Config) -> Result<(), Vec Result<(), Vec `s - Fetch system contract` -> `n - Fetch DPNS contract` -> `q - Back to Contracts ` +/// * press ENTER to enter the fetched contract, then select `domain` -> `c - Query Contested Resources` +/// * Select one of displayed names, use `v - Vote`, select some identity. +/// +/// Now, vote should be casted and you can run this test. +/// #[cfg_attr( feature = "network-testing", - ignore = "requires manual DPNS names setup for masternode voting tests; see fn check_mn_voting_prerequisities()" + ignore = "requires manual DPNS names setup for masternode voting tests; see docs of contested_resource_identity_votes_ok()" )] #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn contested_resource_identity_votes_ok() { @@ -52,18 +80,18 @@ async fn contested_resource_identity_votes_ok() { let cfg = Config::new(); let sdk = cfg.setup_api("contested_resource_identity_votes_ok").await; - // Given some existing identity ID, that is, proTxHash of some Validator - - // TODO: Fetch proTxHash from the network instead of hardcoding; it's not so trivial as it must support our mocking - // mechanisms - let protx_hex = "7624E7D0D7C8837D4D02A19700F4116091A8AD145352420193DE8828F6D00BBF"; - let protx = ProTxHash::from_hex(protx_hex).expect("ProTxHash from hex"); + // Given some existing proTxHash of some Validator that alreday voted + let protx = cfg.existing_protxhash().expect( + "contested_resource_identity_votes_ok requires existing_protxhash to be set in config", + ); // When I query for votes given by this identity let votes = ResourceVote::fetch_many(&sdk, protx) .await .expect("fetch votes for identity"); + tracing::debug!(?protx, ?votes, "votes of masternode"); + // Then I get some votes assert!(!votes.is_empty(), "votes expected for this query"); } diff --git a/packages/rs-sdk/tests/fetch/contested_resource_polls_by_ts.rs b/packages/rs-sdk/tests/fetch/contested_resource_polls_by_ts.rs index 3f970fa6ef6..7554af5ddc3 100644 --- a/packages/rs-sdk/tests/fetch/contested_resource_polls_by_ts.rs +++ b/packages/rs-sdk/tests/fetch/contested_resource_polls_by_ts.rs @@ -1,8 +1,9 @@ //! Test VotePollsByEndDateDriveQuery use crate::fetch::{common::setup_logs, config::Config}; +use chrono::{DateTime, TimeZone, Utc}; use dash_sdk::platform::FetchMany; -use dpp::{identity::TimestampMillis, voting::vote_polls::VotePoll}; +use dpp::voting::vote_polls::VotePoll; use drive::query::VotePollsByEndDateDriveQuery; use std::collections::BTreeMap; @@ -51,8 +52,8 @@ async fn vote_polls_by_ts_ok() { feature = "network-testing", ignore = "requires manual DPNS names setup for masternode voting tests; see fn check_mn_voting_prerequisities()" )] -// fails due to PLAN-661 -async fn vote_polls_by_ts_order() { +#[allow(non_snake_case)] +async fn vote_polls_by_ts_order_PLAN_661() { setup_logs(); let cfg = Config::new(); @@ -86,8 +87,8 @@ async fn vote_polls_by_ts_order() { let (prev_ts, _) = &enumerated[&(i - 1)]; if order_ascending { assert!( - ts >= prev_ts, - "ascending order: item {} ({}) must be >= than item {} ({})", + ts > prev_ts, + "ascending order: item {} ({}) must be > than item {} ({})", ts, i, prev_ts, @@ -95,8 +96,8 @@ async fn vote_polls_by_ts_order() { ); } else { assert!( - ts <= prev_ts, - "descending order: item {} ({}) must be >= than item {} ({})", + ts < prev_ts, + "descending order: item {} ({}) must be < than item {} ({})", ts, i, prev_ts, @@ -129,18 +130,18 @@ async fn vote_polls_by_ts_limit() { .await .expect("prerequisities"); - // Given index with more than 2 contested resources + // Given index with more than 2 contested resources; note LIMIT must be > 1 const LIMIT: usize = 2; const LIMIT_ALL: usize = 100; - let test_start_time: TimestampMillis = chrono::Utc::now().timestamp_millis() as u64; + let end_time: DateTime = Utc.with_ymd_and_hms(2035, 12, 24, 13, 59, 30).unwrap(); let query_all = VotePollsByEndDateDriveQuery { limit: Some(LIMIT_ALL as u16), offset: None, order_ascending: true, start_time: None, - end_time: Some((test_start_time, true)), + end_time: Some((end_time.timestamp_millis() as u64, true)), // 1 month in future }; let all = VotePoll::fetch_many(&sdk, query_all.clone()) @@ -152,13 +153,18 @@ async fn vote_polls_by_ts_limit() { let all_values = all.0.into_iter().collect::>(); - tracing::debug!(count_all_timestamps, "Count all"); - // When we query for 2 contested values at a time, we get all of them - let mut checked_count: usize = 0; - let mut start_time = None; + tracing::debug!( + count = count_all_timestamps, + all = ?all_values, + "All results" + ); for inclusive in [true, false] { - while checked_count < LIMIT_ALL { + // When we query for 2 contested values at a time, we get all of them + let mut checked_count: usize = 0; + let mut start_time = None; + + loop { let query = VotePollsByEndDateDriveQuery { limit: Some(LIMIT as u16), start_time, @@ -169,7 +175,7 @@ async fn vote_polls_by_ts_limit() { .await .expect("fetch vote polls"); - let Some(last) = rss.0.keys().last().copied() else { + let Some(last) = rss.0.last() else { // no more vote polls break; }; @@ -178,31 +184,43 @@ async fn vote_polls_by_ts_limit() { let length = rss.0.len(); for (j, current) in rss.0.iter().enumerate() { - let all_idx = if inclusive && (j + checked_count > 0) { + let all_idx = if inclusive && (checked_count > 0) { j + checked_count - 1 } else { j + checked_count }; let expected = &all_values[all_idx]; - assert_eq!(*current.0, expected.0, "timestamp should match"); - assert_eq!(current.1, &expected.1, "vote polls should match"); + assert_eq!( + current.0, expected.0, + "inclusive {}: timestamp should match", + inclusive + ); + assert_eq!( + ¤t.1, &expected.1, + "inclusive {}: vote polls should match", + inclusive + ); } - let expected = if checked_count + LIMIT > count_all_timestamps { - count_all_timestamps - checked_count + tracing::debug!(polls=?rss, checked_count, ?start_time, "Vote polls"); + + start_time = Some((last.0, inclusive)); + // when inclusive, we include the first item in checked_count only on first iteration + checked_count += if inclusive && checked_count != 0 { + length - 1 } else { - LIMIT + length }; - assert_eq!(length, expected as usize); - tracing::debug!(polls=?rss, checked_count, "Vote polls"); - start_time = Some((last, inclusive)); - checked_count += if inclusive { length - 1 } else { length }; + if (inclusive && length == 1) || (!inclusive && length == 0) { + break; + } } + + assert_eq!( + checked_count, count_all_timestamps, + "all vote polls should be checked when inclusive is {}", + inclusive + ); } - assert_eq!( - checked_count, - count_all_timestamps * 2, - "all vote polls should be checked twice (inclusive and exclusive)" - ); } diff --git a/packages/rs-sdk/tests/fetch/contested_resource_vote_state.rs b/packages/rs-sdk/tests/fetch/contested_resource_vote_state.rs index 6d056a38d7a..f792a9b5ba3 100644 --- a/packages/rs-sdk/tests/fetch/contested_resource_vote_state.rs +++ b/packages/rs-sdk/tests/fetch/contested_resource_vote_state.rs @@ -1,6 +1,8 @@ //! Tests for SDK requests that return one or more [Contender] objects. use crate::fetch::{ - common::setup_logs, config::Config, contested_resource::check_mn_voting_prerequisities, + common::{setup_logs, setup_sdk_for_test_case, TEST_DPNS_NAME}, + config::Config, + contested_resource::check_mn_voting_prerequisities, }; use dash_sdk::platform::{Fetch, FetchMany}; use dpp::{ @@ -19,6 +21,7 @@ use dpp::{ use drive::query::vote_poll_vote_state_query::{ ContestedDocumentVotePollDriveQuery, ContestedDocumentVotePollDriveQueryResultType, }; +use test_case::test_case; /// Ensure we get proof of non-existence when querying for a non-existing index value. #[tokio::test(flavor = "multi_thread", worker_threads = 1)] @@ -40,12 +43,11 @@ async fn contested_resource_vote_states_not_found() { start_at: None, vote_poll: ContestedDocumentResourceVotePoll { index_name: "parentNameAndLabel".to_string(), - index_values: vec![label.into()], + index_values: vec!["nx".into(), label.into()], document_type_name: cfg.existing_document_type_name, contract_id: data_contract_id, }, allow_include_locked_and_abstaining_vote_tally: true, - // TODO test other result types result_type: ContestedDocumentVotePollDriveQueryResultType::DocumentsAndVoteTally, }; @@ -118,7 +120,12 @@ async fn contested_resource_vote_states_nx_contract() { /// /// ## Preconditions /// -/// 1. There must be at least one contender for name "dash" and value "dada". +/// 1. There must be at least one contender for name "dash" and value "[TEST_DPNS_NAME]". +/// +#[cfg_attr( + feature = "network-testing", + ignore = "equires manual DPNS names setup for masternode voting tests; see fn check_mn_voting_prerequisities()" +)] #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn contested_resource_vote_states_ok() { setup_logs(); @@ -126,32 +133,21 @@ async fn contested_resource_vote_states_ok() { let cfg = Config::new(); let sdk = cfg.setup_api("contested_resource_vote_states_ok").await; // Given some existing data contract and existing label - let data_contract_id = cfg.existing_data_contract_id; - let label = Value::Text(convert_to_homograph_safe_chars("dada")); - let document_type_name = "domain".to_string(); + + let query = base_query(&cfg); + + let data_contract_id = query.vote_poll.contract_id; + let document_type_name = &query.vote_poll.document_type_name; let data_contract = DataContract::fetch_by_identifier(&sdk, data_contract_id) .await .expect("fetch data contract") .expect("found data contract"); let document_type = data_contract - .document_type_for_name(&document_type_name) + .document_type_for_name(document_type_name) .expect("found document type"); // When I query for vote poll states with existing index values - let query = ContestedDocumentVotePollDriveQuery { - limit: None, - offset: None, - start_at: None, - vote_poll: ContestedDocumentResourceVotePoll { - index_name: "parentNameAndLabel".to_string(), - index_values: vec![Value::Text("dash".into()), label], - document_type_name, - contract_id: data_contract_id, - }, - allow_include_locked_and_abstaining_vote_tally: true, - result_type: ContestedDocumentVotePollDriveQueryResultType::DocumentsAndVoteTally, - }; let contenders = ContenderWithSerializedDocument::fetch_many(&sdk, query) .await @@ -177,19 +173,42 @@ async fn contested_resource_vote_states_ok() { assert!(seen.insert(doc.id()), "duplicate contender"); let properties = doc.properties(); assert_eq!(properties["parentDomainName"], Value::Text("dash".into())); - assert_eq!(properties["label"], Value::Text("dada".into())); + assert_eq!(properties["label"], Value::Text(TEST_DPNS_NAME.into())); tracing::debug!(?properties, "document properties"); } } +fn base_query(cfg: &Config) -> ContestedDocumentVotePollDriveQuery { + let index_value_2 = Value::Text(convert_to_homograph_safe_chars(TEST_DPNS_NAME)); + + ContestedDocumentVotePollDriveQuery { + limit: None, + offset: None, + start_at: None, + vote_poll: ContestedDocumentResourceVotePoll { + index_name: "parentNameAndLabel".to_string(), + index_values: vec![Value::Text("dash".into()), index_value_2], + document_type_name: cfg.existing_document_type_name.clone(), + contract_id: cfg.existing_data_contract_id, + }, + allow_include_locked_and_abstaining_vote_tally: true, + result_type: ContestedDocumentVotePollDriveQueryResultType::DocumentsAndVoteTally, + } +} + /// Ensure we can limit the number of returned contenders. /// /// ## Preconditions /// -/// 1. There must be at least 3 condenders for name "dash" and value "dada". +/// 1. There must be at least 3 condenders for name "dash" and value [TEST_DPNS_NAME]. /// #[tokio::test(flavor = "multi_thread", worker_threads = 1)] -async fn contested_resource_vote_states_with_limit() { +#[cfg_attr( + feature = "network-testing", + ignore = "equires manual DPNS names setup for masternode voting tests; see fn check_mn_voting_prerequisities()" +)] +#[allow(non_snake_case)] +async fn contested_resource_vote_states_with_limit_PLAN_674() { setup_logs(); let cfg = Config::new(); @@ -203,7 +222,7 @@ async fn contested_resource_vote_states_with_limit() { // Given more contenders for some `label` than the limit let data_contract_id = cfg.existing_data_contract_id; let limit: u16 = 2; - let label = Value::Text("dada".into()); + let label = Value::Text(TEST_DPNS_NAME.into()); // ensure we have enough contenders let query_all = ContestedDocumentVotePollDriveQuery { @@ -223,10 +242,12 @@ async fn contested_resource_vote_states_with_limit() { let all_contenders = ContenderWithSerializedDocument::fetch_many(&sdk, query_all.clone()) .await .expect("fetch many contenders") - .contenders - .len(); + .contenders; + + tracing::debug!(?all_contenders, "All contenders"); + assert!( - all_contenders > limit as usize, + all_contenders.len() > limit as usize, "we need more than {} contenders for this test", limit ); @@ -237,11 +258,11 @@ async fn contested_resource_vote_states_with_limit() { ..query_all }; - let contenders = ContenderWithSerializedDocument::fetch_many(&sdk, query) + let contenders = ContenderWithSerializedDocument::fetch_many(&sdk, query.clone()) .await .expect("fetch many contenders"); // Then I get no more than the limit of contenders - tracing::debug!(contenders=?contenders, "Contenders"); + tracing::debug!(contenders=?contenders, ?query, "Contenders"); assert_eq!( contenders.contenders.len(), @@ -252,235 +273,84 @@ async fn contested_resource_vote_states_with_limit() { ); } -/// Check various queries for [ContenderWithSerializedDocument] that contain invalid field values -/// -/// ## Preconditions -/// -/// None +type MutFn = fn(&mut ContestedDocumentVotePollDriveQuery); + +#[test_case(|q| q.limit = Some(0), Err("limit 0 out of bounds of [1, 100]"); "limit 0")] +#[test_case(|q| q.limit = Some(std::u16::MAX), Err("limit 65535 out of bounds of [1, 100]"); "limit std::u16::MAX")] +#[test_case(|q| q.start_at = Some(([0x11; 32], true)), Ok("Contenders { contenders: {Identifier("); "start_at does not exist should return next contenders")] +#[test_case(|q| q.start_at = Some(([0xff; 32], true)), Ok("Contenders { contenders: {}, abstain_vote_tally: None, lock_vote_tally: None }"); "start_at 0xff;32 should return zero contenders")] +#[test_case(|q| q.vote_poll.document_type_name = "nx doctype".to_string(), Err(r#"code: InvalidArgument, message: "document type nx doctype not found"#); "non existing document type returns InvalidArgument")] +#[test_case(|q| q.vote_poll.index_name = "nx index".to_string(), Err(r#"code: InvalidArgument, message: "index with name nx index is not the contested index"#); "non existing index returns InvalidArgument")] +#[test_case(|q| q.vote_poll.index_name = "dashIdentityId".to_string(), Err(r#"code: InvalidArgument, message: "index with name dashIdentityId is not the contested index"#); "existing non-contested index returns InvalidArgument")] +#[test_case(|q| q.vote_poll.index_values = vec![], Err("query uses index parentNameAndLabel, this index has 2 properties, but the query provided 0 index values instead"); "index_values empty vec returns error")] +#[test_case(|q| q.vote_poll.index_values = vec![Value::Text("".to_string())], Err("query uses index parentNameAndLabel, this index has 2 properties, but the query provided 1 index values instead"); "index_values empty string returns error")] +#[test_case(|q| q.vote_poll.index_values = vec![Value::Text("dash".to_string())], Err("query uses index parentNameAndLabel, this index has 2 properties, but the query provided 1 index values instead"); "index_values with one value returns error")] +#[test_case(|q| { + q.vote_poll.index_values = vec![ + Value::Text("dash".to_string()), + Value::Text(TEST_DPNS_NAME.to_string()), + ] +}, Ok("contenders: {Identifier("); "index_values with two values returns contenders")] +#[test_case(|q| { + q.vote_poll.index_values = vec![ + Value::Text("dash".to_string()), + Value::Text(TEST_DPNS_NAME.to_string()), + Value::Text("eee".to_string()), + ] +}, Err("query uses index parentNameAndLabel, this index has 2 properties, but the query provided 3 index values instead"); "index_values too many items should return error")] +#[test_case(|q| q.vote_poll.contract_id = Identifier::from([0xff; 32]), Err(r#"InvalidArgument, message: "contract not found error"#); "invalid contract id should cause InvalidArgument error")] +#[test_case(|q| q.allow_include_locked_and_abstaining_vote_tally = false, Ok(r#"contenders: {Identifier(IdentifierBytes32"#); "allow_include_locked_and_abstaining_vote_tally false should return some contenders")] +#[test_case(|q| { + q.result_type = ContestedDocumentVotePollDriveQueryResultType::Documents +}, Ok(r#"]), vote_tally: None })"#); "result_type Documents")] +#[test_case(|q| { + q.result_type = ContestedDocumentVotePollDriveQueryResultType::DocumentsAndVoteTally +}, Ok(r#"]), vote_tally: Some("#); "result_type DocumentsAndVoteTally")] +#[test_case(|q| { + q.result_type = ContestedDocumentVotePollDriveQueryResultType::VoteTally +}, Ok(r#"serialized_document: None, vote_tally: Some"#); "result_type VoteTally")] #[tokio::test(flavor = "multi_thread", worker_threads = 1)] -async fn contested_resource_vote_states_fields() { +#[cfg_attr( + feature = "network-testing", + ignore = "equires manual DPNS names setup for masternode voting tests; see fn check_mn_voting_prerequisities()" +)] +async fn contested_rss_vote_state_fields( + query_mut_fn: MutFn, + expect: Result<&'static str, &'static str>, +) -> Result<(), String> { setup_logs(); - type MutFn = fn(&mut ContestedDocumentVotePollDriveQuery); - struct TestCase { - name: &'static str, - query_mut_fn: MutFn, - expect: Result<&'static str, &'static str>, - } - - let test_cases: Vec = vec![ - TestCase { - name: "limit 0 PLAN-664", - query_mut_fn: |q| q.limit = Some(0), - expect: Ok("..."), - }, - TestCase { - name: "limit std::u16::MAX PLAN-664", - query_mut_fn: |q| q.limit = Some(std::u16::MAX), - expect: Ok("..."), - }, - TestCase { - name: "offset not None", - query_mut_fn: |q| q.offset = Some(1), - expect: Err( - r#"Generic("ContestedDocumentVotePollDriveQuery.offset field is internal and must be set to None")"#, - ), - }, - TestCase { - // TODO: pagination test - name: "start_at does not exist", - query_mut_fn: |q| q.start_at = Some(([0x11; 32], true)), - expect: Ok("Contenders { contenders: {Identifier("), - }, - TestCase { - name: "start_at 0xff;32", - query_mut_fn: |q| q.start_at = Some(([0xff; 32], true)), - expect: Ok("Contenders { contenders: {Identifier("), - }, - TestCase { - name: "non existing document type returns InvalidArgument", - query_mut_fn: |q| q.vote_poll.document_type_name = "nx doctype".to_string(), - expect: Err(r#"code: InvalidArgument, message: "document type nx doctype not found"#), - }, - TestCase { - name: "non existing index returns InvalidArgument", - query_mut_fn: |q| q.vote_poll.index_name = "nx index".to_string(), - expect: Err( - r#"code: InvalidArgument, message: "index with name nx index is not the contested index"#, - ), - }, - TestCase { - name: "existing non-contested index returns InvalidArgument", - query_mut_fn: |q| q.vote_poll.index_name = "dashIdentityId".to_string(), - expect: Err( - r#"code: InvalidArgument, message: "index with name dashIdentityId is not the contested index"#, - ), - }, - TestCase { - // todo maybe this should fail? or return everything? - name: "index_values empty vec returns zero results PLAN-665", - query_mut_fn: |q| q.vote_poll.index_values = vec![], - expect: Ok(r#"Contenders { contenders: {},"#), - }, - TestCase { - name: "index_values empty string returns zero results", - query_mut_fn: |q| q.vote_poll.index_values = vec![Value::Text("".to_string())], - expect: Ok("contenders: {}"), - }, - TestCase { - name: "index_values with one value returns results PLAN-665", - query_mut_fn: |q| q.vote_poll.index_values = vec![Value::Text("dash".to_string())], - expect: Ok("contenders: {...}"), - }, - TestCase { - name: "index_values with two values returns contenders ", - query_mut_fn: |q| { - q.vote_poll.index_values = vec![ - Value::Text("dash".to_string()), - Value::Text("dada".to_string()), - ] - }, - expect: Ok("contenders: {Identifier("), - }, - TestCase { - name: "index_values too many items should return error PLAN-665", - query_mut_fn: |q| { - q.vote_poll.index_values = vec![ - Value::Text("dash".to_string()), - Value::Text("dada".to_string()), - Value::Text("eee".to_string()), - ] - }, - expect: Ok( - r#"code: InvalidArgument, message: "incorrect index values error: the start index values and the end index"#, - ), - }, - TestCase { - name: "invalid contract id should cause InvalidArgument error", - query_mut_fn: |q| q.vote_poll.contract_id = Identifier::from([0xff; 32]), - expect: Err(r#"InvalidArgument, message: "contract not found error"#), - }, - TestCase { - name: - "allow_include_locked_and_abstaining_vote_tally false should return some contenders", - query_mut_fn: |q| q.allow_include_locked_and_abstaining_vote_tally = false, - expect: Ok(r#"contenders: {Identifier(IdentifierBytes32"#), - }, - TestCase { - name: "result_type Documents", - query_mut_fn: |q| { - q.result_type = ContestedDocumentVotePollDriveQueryResultType::Documents - }, - expect: Ok(r#"]), vote_tally: None })"#), - }, - TestCase { - name: "result_type DocumentsAndVoteTally", - query_mut_fn: |q| { - q.result_type = ContestedDocumentVotePollDriveQueryResultType::DocumentsAndVoteTally - }, - expect: Ok(r#"]), vote_tally: Some("#), - }, - TestCase { - name: "result_type VoteTally", - query_mut_fn: |q| { - q.result_type = ContestedDocumentVotePollDriveQueryResultType::VoteTally - }, - expect: Ok(r#"serialized_document: None, vote_tally: Some"#), - }, - ]; - let cfg = Config::new(); check_mn_voting_prerequisities(&cfg) .await .expect("prerequisities"); - let base_query = ContestedDocumentVotePollDriveQuery { - limit: None, - offset: None, - start_at: None, - vote_poll: ContestedDocumentResourceVotePoll { - index_name: "parentNameAndLabel".to_string(), - index_values: vec![Value::Text("dash".into()), Value::Text("dada".into())], - document_type_name: cfg.existing_document_type_name.clone(), - contract_id: cfg.existing_data_contract_id, - }, - allow_include_locked_and_abstaining_vote_tally: true, - result_type: ContestedDocumentVotePollDriveQueryResultType::DocumentsAndVoteTally, - }; - - // check if the base query works - let base_query_sdk = cfg - .setup_api("contested_resource_vote_states_fields_base_query") - .await; - let result = - ContenderWithSerializedDocument::fetch_many(&base_query_sdk, base_query.clone()).await; - assert!( - result.is_ok_and(|v| !v.contenders.is_empty()), - "base query should return some results" - ); - - let mut failures: Vec<(&'static str, String)> = Default::default(); - - for test_case in test_cases { - tracing::debug!("Running test case: {}", test_case.name); - // create new sdk to ensure that test cases don't interfere with each other - let sdk = cfg - .setup_api(&format!( - "contested_resources_vote_states_fields_{}", - test_case.name - )) - .await; - - let mut query = base_query.clone(); - (test_case.query_mut_fn)(&mut query); - - let result = ContenderWithSerializedDocument::fetch_many(&sdk, query).await; - match test_case.expect { - Ok(expected) if result.is_ok() => { - let result_string = format!("{:?}", result.as_ref().expect("result")); - if !result_string.contains(expected) { - failures.push(( - test_case.name, - format!("expected: {:#?}\ngot: {:?}\n", expected, result), - )); - } - } - Err(expected) if result.is_err() => { - let result = result.expect_err("error"); - if !result.to_string().contains(expected) { - failures.push(( - test_case.name, - format!("expected: {:#?}\ngot {:?}\n", expected, result), - )); - } - } - expected => { - failures.push(( - test_case.name, - format!("expected: {:#?}\ngot: {:?}\n", expected, result), - )); + let mut query = base_query(&cfg); + query_mut_fn(&mut query); + let (test_case_id, sdk) = + setup_sdk_for_test_case(cfg, query.clone(), "contested_rss_vote_state_fields_").await; + + tracing::debug!(test_case_id, ?query, "Executing test case query"); + + let result = ContenderWithSerializedDocument::fetch_many(&sdk, query).await; + tracing::debug!(?result, "Result of test case"); + match expect { + Ok(expected) if result.is_ok() => { + let result_string = format!("{:?}", result.as_ref().expect("result")); + if !result_string.contains(expected) { + Err(format!("expected: {:#?}\ngot: {:?}\n", expected, result)) + } else { + Ok(()) } } - } - if !failures.is_empty() { - for failure in &failures { - tracing::error!(?failure, "Failed: {}", failure.0); + Err(expected) if result.is_err() => { + let result = result.expect_err("error"); + if !result.to_string().contains(expected) { + Err(format!("expected: {:#?}\ngot {:?}\n", expected, result)) + } else { + Ok(()) + } } - let failed_cases = failures - .iter() - .map(|(name, _)| name.to_string()) - .collect::>() - .join("\n* "); - - panic!( - "{} test cases failed:\n* {}\n\n{}\n", - failures.len(), - failed_cases, - failures - .iter() - .map(|(name, msg)| format!("===========================\n{}:\n\n{:?}", name, msg)) - .collect::>() - .join("\n") - ); + expected => Err(format!("expected: {:#?}\ngot: {:?}\n", expected, result)), } } diff --git a/packages/rs-sdk/tests/fetch/contested_resource_voters.rs b/packages/rs-sdk/tests/fetch/contested_resource_voters.rs index 20ef80e0aa9..8bf8f1badd4 100644 --- a/packages/rs-sdk/tests/fetch/contested_resource_voters.rs +++ b/packages/rs-sdk/tests/fetch/contested_resource_voters.rs @@ -1,13 +1,29 @@ //! Test GetContestedResourceVotersForIdentityRequest -use dash_sdk::platform::{Fetch, FetchMany}; -use dpp::{identifier::Identifier, identity::Identity, platform_value::Value}; -use drive::query::vote_poll_contestant_votes_query::ContestedDocumentVotePollVotesDriveQuery; +use dash_sdk::platform::FetchMany; +use dpp::{ + identifier::Identifier, + platform_value::{string_encoding::Encoding, Value}, + voting::contender_structs::ContenderWithSerializedDocument, +}; +use drive::query::{ + vote_poll_contestant_votes_query::ContestedDocumentVotePollVotesDriveQuery, + vote_poll_vote_state_query::{ + ContestedDocumentVotePollDriveQuery, ContestedDocumentVotePollDriveQueryResultType, + }, +}; use drive_proof_verifier::types::Voter; -use crate::fetch::{common::setup_logs, config::Config}; +use crate::fetch::{ + common::{setup_logs, TEST_DPNS_NAME}, + config::Config, +}; /// When we request votes for a non-existing identity, we should get no votes. +#[cfg_attr( + feature = "network-testing", + ignore = "requires manual DPNS names setup for masternode voting tests; see docs of contested_resource_identity_votes_ok()" +)] #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_contested_resource_voters_for_identity_not_found() { setup_logs(); @@ -46,8 +62,12 @@ async fn test_contested_resource_voters_for_identity_not_found() { /// /// ## Preconditions /// -/// 1. Votes exist for the given contestant. +/// 1. Votes exist for DPNS name [TEST_DPNS_NAME]. #[tokio::test(flavor = "multi_thread", worker_threads = 1)] +#[cfg_attr( + feature = "network-testing", + ignore = "requires manual DPNS names setup for masternode voting tests; see fn check_mn_voting_prerequisities()" +)] async fn contested_resource_voters_for_existing_contestant() { setup_logs(); @@ -56,44 +76,74 @@ async fn contested_resource_voters_for_existing_contestant() { .setup_api("contested_resource_voters_for_existing_contestant") .await; - // Given a known contestant ID that has votes - // TODO: lookup contestant ID - let contestant_id = Identifier::from_string( - "D63rWKSagCgEE53XPkouP3swN9n87jHvjesFEZEh1cLr", - dpp::platform_value::string_encoding::Encoding::Base58, - ) - .expect("valid contestant ID"); + super::contested_resource::check_mn_voting_prerequisities(&cfg) + .await + .expect("prerequisites"); - let index_name = "parentNameAndLabel"; - let index_value = Value::Text("dada".to_string()); - // double-check that the contestant identity exist - let _contestant_identity = Identity::fetch(&sdk, contestant_id) + let index_name = "parentNameAndLabel".to_string(); + let index_value = Value::Text(TEST_DPNS_NAME.to_string()); + + // fetch contestant + let contestants_query = ContestedDocumentVotePollDriveQuery { + vote_poll: dpp::voting::vote_polls::contested_document_resource_vote_poll::ContestedDocumentResourceVotePoll { + contract_id: cfg.existing_data_contract_id, + document_type_name: cfg.existing_document_type_name.clone(), + index_name:index_name.clone(), + index_values: vec![Value::Text("dash".into()),index_value.clone()], + }, + limit: None, // TODO: Change to Some(1) when PLAN-656 is fixed + offset:None, + allow_include_locked_and_abstaining_vote_tally:true, + start_at: None, + result_type: ContestedDocumentVotePollDriveQueryResultType::DocumentsAndVoteTally, + }; + + let contenders = ContenderWithSerializedDocument::fetch_many(&sdk, contestants_query) .await - .expect("fetch identity") - .expect("contestant identity must exist"); + .expect("fetch contenders"); + let contender_ids = contenders + .contenders + .keys() + .map(|id| id.to_string(Encoding::Base58)) + .collect::>(); + tracing::debug!( + contenders = ?contender_ids, + "contenders for {}", + &index_value + ); - // When I query for votes given to this contestant - let query = ContestedDocumentVotePollVotesDriveQuery { + let mut votes = 0; + + for contestant in contenders.contenders.keys() { + let query = ContestedDocumentVotePollVotesDriveQuery { limit: None, offset: None, - order_ascending: true, start_at: None, + order_ascending: true, vote_poll: dpp::voting::vote_polls::contested_document_resource_vote_poll::ContestedDocumentResourceVotePoll { contract_id: cfg.existing_data_contract_id, - document_type_name: cfg.existing_document_type_name, + document_type_name: cfg.existing_document_type_name.clone(), index_name: index_name.to_string(), - index_values: vec!["dash".into(), index_value], + index_values: vec!["dash".into(), index_value.clone()], }, - contestant_id, + contestant_id:*contestant, }; - let rss = Voter::fetch_many(&sdk, query) - .await - .expect("fetch contested resources"); + let rss = Voter::fetch_many(&sdk, query) + .await + .expect("fetch contested resources"); + + tracing::debug!( + ?rss, + contender = contestant.to_string(Encoding::Base58), + "votes retrieved" + ); + votes += rss.0.len(); + } // We expect to find votes for the known contestant - assert!( - !rss.0.is_empty(), - "Expected to find votes for the existing contestant" + assert_ne!( + votes, 0, + "Expected to find at least one vote for any of the contestants" ); } diff --git a/packages/rs-sdk/tests/fetch/document.rs b/packages/rs-sdk/tests/fetch/document.rs index 942687eaa61..cc44b325671 100644 --- a/packages/rs-sdk/tests/fetch/document.rs +++ b/packages/rs-sdk/tests/fetch/document.rs @@ -188,7 +188,8 @@ async fn document_list_document_query() { /// then I don't get error: /// `query: storage: protocol: value error: structure error: value was a string, but could not be decoded from base 58`. #[tokio::test(flavor = "multi_thread", worker_threads = 1)] -async fn document_list_bug_value_text_decode_base58() { +#[allow(non_snake_case)] +async fn document_list_bug_value_text_decode_base58_PLAN_653() { setup_logs(); let cfg = Config::new(); diff --git a/packages/rs-sdk/tests/fetch/prefunded_specialized_balance.rs b/packages/rs-sdk/tests/fetch/prefunded_specialized_balance.rs index 88d016783db..2355a932aba 100644 --- a/packages/rs-sdk/tests/fetch/prefunded_specialized_balance.rs +++ b/packages/rs-sdk/tests/fetch/prefunded_specialized_balance.rs @@ -26,6 +26,10 @@ async fn test_prefunded_specialized_balance_not_found() { } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] +#[cfg_attr( + feature = "network-testing", + ignore = "requires manual DPNS names setup for masternode voting tests; see fn check_mn_voting_prerequisities()" +)] async fn test_prefunded_specialized_balance_ok() { setup_logs(); @@ -49,7 +53,7 @@ async fn test_prefunded_specialized_balance_ok() { let poll = polls .0 - .first_key_value() + .first() .expect("need at least one vote poll timestamp") .1 .first()