From 89b0d3366f5170e5fdf7eb1ee41fb2e64bcaf54b Mon Sep 17 00:00:00 2001 From: chloefeal <188809157+chloefeal@users.noreply.github.com> Date: Sat, 28 Dec 2024 12:37:51 +0800 Subject: [PATCH] chore: fix some typos in comment --- core/store/src/metadata.rs | 2 +- docs/practices/workflows/benchmarking_synthetic_workloads.md | 2 +- integration-tests/src/test_loop/utils/trie_sanity.rs | 2 +- integration-tests/src/tests/runtime/test_yield_resume.rs | 2 +- pytest/tests/replay/README.md | 2 +- pytest/tests/sanity/split_storage.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/core/store/src/metadata.rs b/core/store/src/metadata.rs index ebff29084c9..0f685ab850f 100644 --- a/core/store/src/metadata.rs +++ b/core/store/src/metadata.rs @@ -56,7 +56,7 @@ pub(super) struct DbMetadata { impl DbMetadata { /// Reads metadata from the database. This method enforces the invariant - /// that version and kind must alwasy be set. + /// that version and kind must always be set. /// /// If the database version is not present, returns an error. Similarly, if /// database version is ≥ [`DB_VERSION_WITH_KIND`] but the kind is not diff --git a/docs/practices/workflows/benchmarking_synthetic_workloads.md b/docs/practices/workflows/benchmarking_synthetic_workloads.md index 0f76f18f58e..55dc4048974 100644 --- a/docs/practices/workflows/benchmarking_synthetic_workloads.md +++ b/docs/practices/workflows/benchmarking_synthetic_workloads.md @@ -11,7 +11,7 @@ This approach has the following benefits: The main drawbacks of synthetic benchmarks are: -- Drawing conclusions is limited as real world traffic is not homogenous. +- Drawing conclusions is limited as real world traffic is not homogeneous. - Calibrating traffic generation parameters can be cumbersome. The tooling for synthetic benchmarks is available in [`benchmarks/bm-synth`](../../../benchmarks/bm-synth). diff --git a/integration-tests/src/test_loop/utils/trie_sanity.rs b/integration-tests/src/test_loop/utils/trie_sanity.rs index c3d5147742d..56e102339bf 100644 --- a/integration-tests/src/test_loop/utils/trie_sanity.rs +++ b/integration-tests/src/test_loop/utils/trie_sanity.rs @@ -118,7 +118,7 @@ impl TrieSanityCheck { check_shard_uids } - // Check trie sanity and keep track of which shards were succesfully fully checked + // Check trie sanity and keep track of which shards were successfully fully checked pub fn assert_state_sanity(&mut self, clients: &[&Client], new_num_shards: NumShards) { for client in clients { let signer = client.validator_signer.get(); diff --git a/integration-tests/src/tests/runtime/test_yield_resume.rs b/integration-tests/src/tests/runtime/test_yield_resume.rs index c108796a0ce..8df6bff83aa 100644 --- a/integration-tests/src/tests/runtime/test_yield_resume.rs +++ b/integration-tests/src/tests/runtime/test_yield_resume.rs @@ -162,7 +162,7 @@ fn resume_without_yield() { ) .unwrap(); - // expect the execution to suceed, but return 'false' + // expect the execution to succeed, but return 'false' assert_eq!( res.status, FinalExecutionStatus::SuccessValue(vec![0u8]), diff --git a/pytest/tests/replay/README.md b/pytest/tests/replay/README.md index 7368c2c6cb6..3c13422a3d9 100644 --- a/pytest/tests/replay/README.md +++ b/pytest/tests/replay/README.md @@ -15,7 +15,7 @@ Prerequisites: In order to set up and launch replay, we take the following steps: -Make sure you have the right enviroment variables: +Make sure you have the right environment variables: ```shell export PYTHONPATH=./pytest/lib ``` diff --git a/pytest/tests/sanity/split_storage.py b/pytest/tests/sanity/split_storage.py index 6b60193b6cc..31a808e0251 100644 --- a/pytest/tests/sanity/split_storage.py +++ b/pytest/tests/sanity/split_storage.py @@ -125,7 +125,7 @@ def step2_archival_node_sync_test(self): logger.info(f"Starting the archival <- split storage sync test") # Archival nodes do not run state sync. This means that if peers - # ran away further than epoch_lenght * gc_epoch_num, archival nodes + # ran away further than epoch_length * gc_epoch_num, archival nodes # will not be able to further sync. In practice it means we need a long # enough epoch_length or more gc_epoch_num to keep. epoch_length = 10