diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..f3b696156 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,12 @@ +version: 2 +updates: + - package-ecosystem: "cargo" + directory: "/" + labels: ["A2-insubstantial", "M5-dependencies"] + schedule: + interval: "daily" + - package-ecosystem: github-actions + directory: '/' + labels: ["A2-insubstantial", "M5-dependencies"] + schedule: + interval: daily diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 000000000..d499b2515 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,152 @@ +on: + pull_request: + push: + branches: + - master + +name: Continuous integration + +jobs: + check: + name: Check + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - name: Rust Cache + uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 + + - uses: actions-rs/cargo@v1 + with: + command: check + args: --workspace --all-targets + + test: + name: Test + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: + - ubuntu-latest + - macOS-latest + steps: + - uses: actions/checkout@v4 + - uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: 1.75.0 + override: true + + - name: Rust Cache + uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 + + - run: rustup target add wasm32-unknown-unknown + + - name: Test no-default-features + uses: actions-rs/cargo@v1 + with: + command: test + args: --workspace --no-default-features + + - name: Test default features + uses: actions-rs/cargo@v1 + with: + command: test + args: --workspace + + - name: Test uint + uses: actions-rs/cargo@v1 + with: + command: test + args: -p uint --all-features + + - name: Test fixed-hash no_std + run: cargo test -p fixed-hash --no-default-features --features='rustc-hex' + + - name: Test fixed-hash all-features + uses: actions-rs/cargo@v1 + with: + command: test + args: -p fixed-hash --all-features + + - name: Test primitive-types no_std + run: cargo test -p primitive-types --no-default-features --features='scale-info,num-traits,serde_no_std' + + - name: Test primitive-types all-features + uses: actions-rs/cargo@v1 + with: + command: test + args: -p primitive-types --all-features + + - name: Build ethereum-types no_std + run: cargo build -p ethereum-types --no-default-features --features='serialize,rlp' --target=wasm32-unknown-unknown + + - name: Test ethereum-types all-features + uses: actions-rs/cargo@v1 + with: + command: test + args: -p ethereum-types --all-features + + - name: Test ethbloom all-features + uses: actions-rs/cargo@v1 + with: + command: test + args: -p ethbloom --all-features + + - name: Test bounded-collections no_std + uses: actions-rs/cargo@v1 + with: + command: test + args: -p bounded-collections --no-default-features + + - name: Test bounded-collections no_std,serde + uses: actions-rs/cargo@v1 + with: + command: test + args: -p bounded-collections --no-default-features --features=serde + + - name: Test bounded-collections all-features + uses: actions-rs/cargo@v1 + with: + command: test + args: -p bounded-collections --all-features + + test_windows: + name: Test Windows + runs-on: windows-latest + steps: + - uses: actions/checkout@v4 + - uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - name: Rust Cache + uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 + + - uses: actions-rs/cargo@v1 + with: + command: test + args: --workspace --exclude kvdb-rocksdb + + fmt: + name: Rustfmt + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: nightly + override: true + - run: rustup component add rustfmt + - uses: actions-rs/cargo@v1 + with: + command: fmt + args: --all -- --check diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index b38c5ff5c..000000000 --- a/.travis.yml +++ /dev/null @@ -1,42 +0,0 @@ -language: rust -branches: - only: - - master -matrix: - include: - - os: linux -# without this line -# travis downgrades the image to trusty somehow -# which doesn't have C11 support -# see https://travis-ci.org/paritytech/parity-common/jobs/557850274 - dist: xenial - rust: stable - - os: linux - dist: xenial - rust: beta - - os: linux - dist: xenial - rust: nightly - - os: osx - osx_image: xcode11 - rust: stable - allow_failures: - - rust: nightly -script: - - cargo check --all --tests - - cargo build --all - - cargo test --all --exclude uint --exclude fixed-hash - - if [ "$TRAVIS_RUST_VERSION" == "nightly" ]; then - cd contract-address/ && cargo test --features=external_doc && cd ..; - fi - - cd ethbloom/ && cargo test --no-default-features --features="rustc-hex" && cargo check --benches && cd .. - - cd fixed-hash/ && cargo test --all-features && cargo test --no-default-features --features="byteorder,rustc-hex" && cd .. - - cd uint/ && cargo test --all-features && cargo test --no-default-features && cd .. - - cd keccak-hash/ && cargo test --no-default-features && cd .. - - cd plain_hasher/ && cargo test --no-default-features && cargo check --benches && cd .. - - cd parity-bytes/ && cargo test --no-default-features && cd .. - - cd parity-util-mem/ && cargo test --features=estimate-heapsize && cd .. - - cd parity-util-mem/ && cargo test --features=jemalloc-global && cd .. - - cd parity-util-mem/ && cargo test --features=mimalloc-global && cd .. - - cd rlp/ && cargo test --no-default-features && cargo check --benches && cd .. - - cd triehash/ && cargo check --benches && cd .. diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 000000000..7eb3137e4 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,22 @@ +# Lists some code owners. +# +# A codeowner just oversees some part of the codebase. If an owned file is changed then the +# corresponding codeowner receives a review request. An approval of the codeowner might be +# required for merging a PR (depends on repository settings). +# +# For details about syntax, see: +# https://help.github.com/en/articles/about-code-owners +# But here are some important notes: +# +# - Glob syntax is git-like, e.g. `/core` means the core directory in the root, unlike `core` +# which can be everywhere. +# - Multiple owners are supported. +# - Either handle (e.g, @github_user or @github_org/team) or email can be used. Keep in mind, +# that handles might work better because they are more recognizable on GitHub, +# eyou can use them for mentioning unlike an email. +# - The latest matching rule, if multiple, takes precedence. + +# main codeowner + +# CI +/.github/ @paritytech/ci diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..961bb363b --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,53 @@ +# Contributing to parity-common + +parity-common welcomes contribution from everyone in the form of suggestions, bug +reports, pull requests, and feedback. This document gives some guidance if you +are thinking of helping us. + +Please reach out here in a GitHub issue if we can do anything to help you contribute. + +## Submitting bug reports and feature requests + +When reporting a bug or asking for help, please include enough details so that +the people helping you can reproduce the behavior you are seeing. For some tips +on how to approach this, read about how to produce a [Minimal, Complete, and +Verifiable example]. + +[Minimal, Complete, and Verifiable example]: https://stackoverflow.com/help/mcve + +When making a feature request, please make it clear what problem you intend to +solve with the feature, any ideas for how parity-common could support solving that problem, any possible alternatives, and any disadvantages. + +## Versioning + +As many crates in the rust ecosystem, all crates in parity-common follow [semantic versioning]. This means bumping PATCH version on bug fixes that don't break backwards compatibility, MINOR version on new features and MAJOR version otherwise (MAJOR.MINOR.PATCH). Versions < 1.0 are considered to have the format 0.MAJOR.MINOR, which means bumping MINOR version for all non-breaking changes. + +For checking whether a change is SemVer-breaking, please refer to https://doc.rust-lang.org/cargo/reference/semver.html. + +Bumping versions should be done in a separate from regular code changes PR. + +[semantic versioning]: https://semver.org/ + +## Releasing a new version + +This part of the guidelines is for parity-common maintainers. + +When making a new release make sure to follow these steps: +* Submit a PR with a version bump and list all major and breaking changes in the crate's changelog + +After the PR is merged into master: +* `cargo publish` on the latest master (try with `--dry-run` first) +* Add a git tag in format `-v`, +e.g. `git tag impl-serde-v0.2.2` and push it with `git push origin impl-serde-v0.2.2` + +## Conduct + +We follow [Substrate Code of Conduct]. + +[Substrate Code of Conduct]: https://github.com/paritytech/substrate/blob/master/CODE_OF_CONDUCT.adoc + +## Attribution + +This guideline is adapted from [Serde's CONTRIBUTING guide]. + +[Serde's CONTRIBUTING guide]: https://github.com/serde-rs/serde/blob/master/CONTRIBUTING.md diff --git a/Cargo.toml b/Cargo.toml index c4dc4ea9e..2608dd2a4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,22 +1,18 @@ [workspace] +resolver = "2" members = [ - "contract-address", "fixed-hash", "keccak-hash", "kvdb", "kvdb-memorydb", "kvdb-rocksdb", + "kvdb-shared-tests", "parity-bytes", - "parity-crypto", - "parity-path", - "plain_hasher", "rlp", - "transaction-pool", - "trace-time", - "triehash", + "rlp-derive", "uint", - "parity-util-mem", "primitive-types", + "bounded-collections", "ethereum-types", "ethbloom", ] diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 94a9ed024..000000000 --- a/LICENSE +++ /dev/null @@ -1,674 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/rlp/LICENSE-APACHE2 b/LICENSE-APACHE2 similarity index 100% rename from rlp/LICENSE-APACHE2 rename to LICENSE-APACHE2 diff --git a/rlp/LICENSE-MIT b/LICENSE-MIT similarity index 95% rename from rlp/LICENSE-MIT rename to LICENSE-MIT index cd8fdd2b9..b2d52b66d 100644 --- a/rlp/LICENSE-MIT +++ b/LICENSE-MIT @@ -1,4 +1,4 @@ -Copyright (c) 2015-2017 Parity Technologies +Copyright (c) 2015-2020 Parity Technologies Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index b9a2936ac..2a97d14e0 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,4 @@ -[![Build Status travis][travis-image]][travis-url] -[![Build Status appveyor][appveyor-image]][appveyor-url] - -[travis-image]: https://travis-ci.org/paritytech/parity-common.svg?branch=master -[travis-url]: https://travis-ci.org/paritytech/parity-common -[appveyor-image]: https://ci.appveyor.com/api/projects/status/github/paritytech/parity-common/branch/master?svg=true -[appveyor-url]: https://ci.appveyor.com/project/paritytech/parity-common/branch/master +[![Continuous integration](https://github.com/paritytech/parity-common/actions/workflows/ci.yml/badge.svg)](https://github.com/paritytech/parity-common/actions/workflows/ci.yml) # parity-common Collection of crates used in [Parity Technologies](https://www.paritytech.io/) projects diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index d930e318c..000000000 --- a/appveyor.yml +++ /dev/null @@ -1,29 +0,0 @@ -environment: - matrix: - - FEATURES: "" - -platform: - - x86_64-pc-windows-msvc - -# avoid running tests twice -branches: - only: - - master - -install: - - curl -sSf -o rustup-init.exe https://win.rustup.rs/ - - rustup-init.exe -y --default-host %PLATFORM% - - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin - - rustc -vV - - cargo -vV - -build_script: - - cargo check --tests --features "%FEATURES%" - - cargo build --all --features "%FEATURES%" - -test_script: - - cargo test --all --features "%FEATURES%" --exclude uint --exclude fixed-hash - - cd fixed-hash/ && cargo test --all-features && cd .. - - cd uint/ && cargo test --features=std,quickcheck --release && cd .. - - cd plain_hasher/ && cargo test --no-default-features && cd .. - - cd parity-util-mem/ && cargo test --features=estimate-heapsize && cd .. diff --git a/bounded-collections/CHANGELOG.md b/bounded-collections/CHANGELOG.md new file mode 100644 index 000000000..101024fe0 --- /dev/null +++ b/bounded-collections/CHANGELOG.md @@ -0,0 +1,49 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [0.2.2] - 2024-11-08 +- Added `ConstInt` and `ConstUint` types. [#878](https://github.com/paritytech/parity-common/pull/878) + +## [0.2.1] - 2024-10-08 +- Added `serde` support for `BoundedBTreeMap`. [#870](https://github.com/paritytech/parity-common/pull/870) + +## [0.2.0] - 2024-01-29 +- Added `try_rotate_left` and `try_rotate_right` to `BoundedVec`. [#800](https://github.com/paritytech/parity-common/pull/800) + +## [0.1.9] - 2023-10-10 +- Added `serde` support for `BoundedBTreeSet`. [#781](https://github.com/paritytech/parity-common/pull/781) + +## [0.1.8] - 2023-06-11 +- Altered return types of `BoundedVec::force_insert_keep_` functions to return the element in case of error. +- Added `new` and `clear` to `BoundedVec`. + +## [0.1.7] - 2023-05-05 +- Added `serde` feature, which can be enabled for no `std` deployments. + +## [0.1.6] - 2023-04-27 +- Added `Clone` and `Default` derive to the `impl_const_get!` macro and thereby all `Const*` types. +- Fixed `Debug` impl for `impl_const_get!` and all `Const*` types to also print the value and not just the type name. + +## [0.1.5] - 2023-02-13 +- Fixed `Hash` impl (previously it could not be used in practice, because the size bound was required to also implement `Hash`). + +## [0.1.4] - 2023-01-28 +- Fixed unnecessary decoding and allocations for bounded types, when the decoded length is greater than the allowed bound. +- Add `Hash` derivation (when `feature = "std"`) for bounded types. + +## [0.1.3] - 2023-01-27 +- Removed non-existent `bounded` mod reference. [#715](https://github.com/paritytech/parity-common/pull/715) + +## [0.1.2] - 2023-01-27 +- Ensured `bounded-collections` crate compiles under `no_std`. [#712](https://github.com/paritytech/parity-common/pull/712) + +## [0.1.1] - 2023-01-26 +- Made `alloc` public. [#711](https://github.com/paritytech/parity-common/pull/711) +- Removed a reference to `sp_core` in the comments. [#710](https://github.com/paritytech/parity-common/pull/710) + +## [0.1.0] - 2023-01-26 +- Wrote better description for `bounded-collections`. [#709](https://github.com/paritytech/parity-common/pull/709) +- Added `bounded-collections` crate. [#708](https://github.com/paritytech/parity-common/pull/708) diff --git a/bounded-collections/Cargo.toml b/bounded-collections/Cargo.toml new file mode 100644 index 000000000..3f72b5e99 --- /dev/null +++ b/bounded-collections/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "bounded-collections" +version = "0.2.2" +authors = ["Parity Technologies "] +license = "MIT OR Apache-2.0" +homepage = "https://github.com/paritytech/parity-common" +description = "Bounded types and their supporting traits" +edition = "2021" +rust-version = "1.75.0" + +[dependencies] +serde = { version = "1.0.101", default-features = false, optional = true, features=["alloc", "derive"] } +codec = { version = "3.3.0", default-features = false, features = ["max-encoded-len"], package = "parity-scale-codec" } +scale-info = { version = ">=1.0, <3", features = ["derive"], default-features = false } +log = { version = "0.4.17", default-features = false } +schemars = { version = ">=0.8.12", default-features = true, optional = true } + +[dev-dependencies] +serde_json = "1.0.41" + +[features] +default = ["std"] +json-schema = ["dep:schemars"] +std = [ + "log/std", + "codec/std", + "scale-info/std", + "serde/std", +] diff --git a/bounded-collections/README.md b/bounded-collections/README.md new file mode 100644 index 000000000..b7cda37f9 --- /dev/null +++ b/bounded-collections/README.md @@ -0,0 +1,3 @@ +# Bounded Collections + +Bounded types and their supporting traits. \ No newline at end of file diff --git a/bounded-collections/src/bounded_btree_map.rs b/bounded-collections/src/bounded_btree_map.rs new file mode 100644 index 000000000..574f074fa --- /dev/null +++ b/bounded-collections/src/bounded_btree_map.rs @@ -0,0 +1,804 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2023 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits, types and structs to support a bounded BTreeMap. + +use crate::{Get, TryCollect}; +use alloc::collections::BTreeMap; +use codec::{Compact, Decode, Encode, MaxEncodedLen}; +use core::{borrow::Borrow, marker::PhantomData, ops::Deref}; +#[cfg(feature = "serde")] +use serde::{ + de::{Error, MapAccess, Visitor}, + Deserialize, Deserializer, Serialize, +}; + +/// A bounded map based on a B-Tree. +/// +/// B-Trees represent a fundamental compromise between cache-efficiency and actually minimizing +/// the amount of work performed in a search. See [`BTreeMap`] for more details. +/// +/// Unlike a standard `BTreeMap`, there is an enforced upper limit to the number of items in the +/// map. All internal operations ensure this bound is respected. +#[cfg_attr(feature = "serde", derive(Serialize), serde(transparent))] +#[derive(Encode, scale_info::TypeInfo)] +#[scale_info(skip_type_params(S))] +pub struct BoundedBTreeMap( + BTreeMap, + #[cfg_attr(feature = "serde", serde(skip_serializing))] PhantomData, +); + +#[cfg(feature = "serde")] +impl<'de, K, V, S: Get> Deserialize<'de> for BoundedBTreeMap +where + K: Deserialize<'de> + Ord, + V: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + // Create a visitor to visit each element in the map + struct BTreeMapVisitor(PhantomData<(K, V, S)>); + + impl<'de, K, V, S> Visitor<'de> for BTreeMapVisitor + where + K: Deserialize<'de> + Ord, + V: Deserialize<'de>, + S: Get, + { + type Value = BTreeMap; + + fn expecting(&self, formatter: &mut core::fmt::Formatter) -> core::fmt::Result { + formatter.write_str("a map") + } + + fn visit_map(self, mut map: A) -> Result + where + A: MapAccess<'de>, + { + let size = map.size_hint().unwrap_or(0); + let max = S::get() as usize; + if size > max { + Err(A::Error::custom("map exceeds the size of the bounds")) + } else { + let mut values = BTreeMap::new(); + + while let Some(key) = map.next_key()? { + if values.len() >= max { + return Err(A::Error::custom("map exceeds the size of the bounds")); + } + let value = map.next_value()?; + values.insert(key, value); + } + + Ok(values) + } + } + } + + let visitor: BTreeMapVisitor = BTreeMapVisitor(PhantomData); + deserializer.deserialize_map(visitor).map(|v| { + BoundedBTreeMap::::try_from(v) + .map_err(|_| Error::custom("failed to create a BoundedBTreeMap from the provided map")) + })? + } +} + +impl Decode for BoundedBTreeMap +where + K: Decode + Ord, + V: Decode, + S: Get, +{ + fn decode(input: &mut I) -> Result { + // Same as the underlying implementation for `Decode` on `BTreeMap`, except we fail early if + // the len is too big. + let len: u32 = >::decode(input)?.into(); + if len > S::get() { + return Err("BoundedBTreeMap exceeds its limit".into()); + } + input.descend_ref()?; + let inner = Result::from_iter((0..len).map(|_| Decode::decode(input)))?; + input.ascend_ref(); + Ok(Self(inner, PhantomData)) + } + + fn skip(input: &mut I) -> Result<(), codec::Error> { + BTreeMap::::skip(input) + } +} + +impl BoundedBTreeMap +where + S: Get, +{ + /// Get the bound of the type in `usize`. + pub fn bound() -> usize { + S::get() as usize + } +} + +impl BoundedBTreeMap +where + K: Ord, + S: Get, +{ + /// Create `Self` from `t` without any checks. + fn unchecked_from(t: BTreeMap) -> Self { + Self(t, Default::default()) + } + + /// Exactly the same semantics as `BTreeMap::retain`. + /// + /// The is a safe `&mut self` borrow because `retain` can only ever decrease the length of the + /// inner map. + pub fn retain bool>(&mut self, f: F) { + self.0.retain(f) + } + + /// Create a new `BoundedBTreeMap`. + /// + /// Does not allocate. + pub fn new() -> Self { + BoundedBTreeMap(BTreeMap::new(), PhantomData) + } + + /// Consume self, and return the inner `BTreeMap`. + /// + /// This is useful when a mutating API of the inner type is desired, and closure-based mutation + /// such as provided by [`try_mutate`][Self::try_mutate] is inconvenient. + pub fn into_inner(self) -> BTreeMap { + debug_assert!(self.0.len() <= Self::bound()); + self.0 + } + + /// Consumes self and mutates self via the given `mutate` function. + /// + /// If the outcome of mutation is within bounds, `Some(Self)` is returned. Else, `None` is + /// returned. + /// + /// This is essentially a *consuming* shorthand [`Self::into_inner`] -> `...` -> + /// [`Self::try_from`]. + pub fn try_mutate(mut self, mut mutate: impl FnMut(&mut BTreeMap)) -> Option { + mutate(&mut self.0); + (self.0.len() <= Self::bound()).then(move || self) + } + + /// Clears the map, removing all elements. + pub fn clear(&mut self) { + self.0.clear() + } + + /// Return a mutable reference to the value corresponding to the key. + /// + /// The key may be any borrowed form of the map's key type, but the ordering on the borrowed + /// form _must_ match the ordering on the key type. + pub fn get_mut(&mut self, key: &Q) -> Option<&mut V> + where + K: Borrow, + Q: Ord + ?Sized, + { + self.0.get_mut(key) + } + + /// Exactly the same semantics as [`BTreeMap::insert`], but returns an `Err` (and is a noop) if + /// the new length of the map exceeds `S`. + /// + /// In the `Err` case, returns the inserted pair so it can be further used without cloning. + pub fn try_insert(&mut self, key: K, value: V) -> Result, (K, V)> { + if self.len() < Self::bound() || self.0.contains_key(&key) { + Ok(self.0.insert(key, value)) + } else { + Err((key, value)) + } + } + + /// Remove a key from the map, returning the value at the key if the key was previously in the + /// map. + /// + /// The key may be any borrowed form of the map's key type, but the ordering on the borrowed + /// form _must_ match the ordering on the key type. + pub fn remove(&mut self, key: &Q) -> Option + where + K: Borrow, + Q: Ord + ?Sized, + { + self.0.remove(key) + } + + /// Remove a key from the map, returning the value at the key if the key was previously in the + /// map. + /// + /// The key may be any borrowed form of the map's key type, but the ordering on the borrowed + /// form _must_ match the ordering on the key type. + pub fn remove_entry(&mut self, key: &Q) -> Option<(K, V)> + where + K: Borrow, + Q: Ord + ?Sized, + { + self.0.remove_entry(key) + } + + /// Gets a mutable iterator over the entries of the map, sorted by key. + /// + /// See [`BTreeMap::iter_mut`] for more information. + pub fn iter_mut(&mut self) -> alloc::collections::btree_map::IterMut { + self.0.iter_mut() + } + + /// Consume the map, applying `f` to each of it's values and returning a new map. + pub fn map(self, mut f: F) -> BoundedBTreeMap + where + F: FnMut((&K, V)) -> T, + { + BoundedBTreeMap::::unchecked_from( + self.0 + .into_iter() + .map(|(k, v)| { + let t = f((&k, v)); + (k, t) + }) + .collect(), + ) + } + + /// Consume the map, applying `f` to each of it's values as long as it returns successfully. If + /// an `Err(E)` is ever encountered, the mapping is short circuited and the error is returned; + /// otherwise, a new map is returned in the contained `Ok` value. + pub fn try_map(self, mut f: F) -> Result, E> + where + F: FnMut((&K, V)) -> Result, + { + Ok(BoundedBTreeMap::::unchecked_from( + self.0 + .into_iter() + .map(|(k, v)| (f((&k, v)).map(|t| (k, t)))) + .collect::, _>>()?, + )) + } + + /// Returns true if this map is full. + pub fn is_full(&self) -> bool { + self.len() >= Self::bound() + } +} + +impl Default for BoundedBTreeMap +where + K: Ord, + S: Get, +{ + fn default() -> Self { + Self::new() + } +} + +impl Clone for BoundedBTreeMap +where + BTreeMap: Clone, +{ + fn clone(&self) -> Self { + BoundedBTreeMap(self.0.clone(), PhantomData) + } +} + +impl core::fmt::Debug for BoundedBTreeMap +where + BTreeMap: core::fmt::Debug, + S: Get, +{ + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_tuple("BoundedBTreeMap").field(&self.0).field(&Self::bound()).finish() + } +} + +// Custom implementation of `Hash` since deriving it would require all generic bounds to also +// implement it. +#[cfg(feature = "std")] +impl std::hash::Hash for BoundedBTreeMap { + fn hash(&self, state: &mut H) { + self.0.hash(state); + } +} + +impl PartialEq> for BoundedBTreeMap +where + BTreeMap: PartialEq, + S1: Get, + S2: Get, +{ + fn eq(&self, other: &BoundedBTreeMap) -> bool { + S1::get() == S2::get() && self.0 == other.0 + } +} + +impl Eq for BoundedBTreeMap +where + BTreeMap: Eq, + S: Get, +{ +} + +impl PartialEq> for BoundedBTreeMap +where + BTreeMap: PartialEq, +{ + fn eq(&self, other: &BTreeMap) -> bool { + self.0 == *other + } +} + +impl PartialOrd for BoundedBTreeMap +where + BTreeMap: PartialOrd, + S: Get, +{ + fn partial_cmp(&self, other: &Self) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl Ord for BoundedBTreeMap +where + BTreeMap: Ord, + S: Get, +{ + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.0.cmp(&other.0) + } +} + +impl IntoIterator for BoundedBTreeMap { + type Item = (K, V); + type IntoIter = alloc::collections::btree_map::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl<'a, K, V, S> IntoIterator for &'a BoundedBTreeMap { + type Item = (&'a K, &'a V); + type IntoIter = alloc::collections::btree_map::Iter<'a, K, V>; + + fn into_iter(self) -> Self::IntoIter { + self.0.iter() + } +} + +impl<'a, K, V, S> IntoIterator for &'a mut BoundedBTreeMap { + type Item = (&'a K, &'a mut V); + type IntoIter = alloc::collections::btree_map::IterMut<'a, K, V>; + + fn into_iter(self) -> Self::IntoIter { + self.0.iter_mut() + } +} + +impl MaxEncodedLen for BoundedBTreeMap +where + K: MaxEncodedLen, + V: MaxEncodedLen, + S: Get, +{ + fn max_encoded_len() -> usize { + Self::bound() + .saturating_mul(K::max_encoded_len().saturating_add(V::max_encoded_len())) + .saturating_add(codec::Compact(S::get()).encoded_size()) + } +} + +impl Deref for BoundedBTreeMap +where + K: Ord, +{ + type Target = BTreeMap; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl AsRef> for BoundedBTreeMap +where + K: Ord, +{ + fn as_ref(&self) -> &BTreeMap { + &self.0 + } +} + +impl From> for BTreeMap +where + K: Ord, +{ + fn from(map: BoundedBTreeMap) -> Self { + map.0 + } +} + +impl TryFrom> for BoundedBTreeMap +where + K: Ord, + S: Get, +{ + type Error = (); + + fn try_from(value: BTreeMap) -> Result { + (value.len() <= Self::bound()) + .then(move || BoundedBTreeMap(value, PhantomData)) + .ok_or(()) + } +} + +impl codec::DecodeLength for BoundedBTreeMap { + fn len(self_encoded: &[u8]) -> Result { + // `BoundedBTreeMap` is stored just a `BTreeMap`, which is stored as a + // `Compact` with its length followed by an iteration of its items. We can just use + // the underlying implementation. + as codec::DecodeLength>::len(self_encoded) + } +} + +impl codec::EncodeLike> for BoundedBTreeMap where BTreeMap: Encode {} + +impl TryCollect> for I +where + K: Ord, + I: ExactSizeIterator + Iterator, + Bound: Get, +{ + type Error = &'static str; + + fn try_collect(self) -> Result, Self::Error> { + if self.len() > Bound::get() as usize { + Err("iterator length too big") + } else { + Ok(BoundedBTreeMap::::unchecked_from(self.collect::>())) + } + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::ConstU32; + use alloc::{vec, vec::Vec}; + use codec::CompactLen; + + fn map_from_keys(keys: &[K]) -> BTreeMap + where + K: Ord + Copy, + { + keys.iter().copied().zip(core::iter::repeat(())).collect() + } + + fn boundedmap_from_keys(keys: &[K]) -> BoundedBTreeMap + where + K: Ord + Copy, + S: Get, + { + map_from_keys(keys).try_into().unwrap() + } + + #[test] + fn encoding_same_as_unbounded_map() { + let b = boundedmap_from_keys::>(&[1, 2, 3, 4, 5, 6]); + let m = map_from_keys(&[1, 2, 3, 4, 5, 6]); + + assert_eq!(b.encode(), m.encode()); + } + + #[test] + fn try_insert_works() { + let mut bounded = boundedmap_from_keys::>(&[1, 2, 3]); + bounded.try_insert(0, ()).unwrap(); + assert_eq!(*bounded, map_from_keys(&[1, 0, 2, 3])); + + assert!(bounded.try_insert(9, ()).is_err()); + assert_eq!(*bounded, map_from_keys(&[1, 0, 2, 3])); + } + + #[test] + fn deref_coercion_works() { + let bounded = boundedmap_from_keys::>(&[1, 2, 3]); + // these methods come from deref-ed vec. + assert_eq!(bounded.len(), 3); + assert!(bounded.iter().next().is_some()); + assert!(!bounded.is_empty()); + } + + #[test] + fn try_mutate_works() { + let bounded = boundedmap_from_keys::>(&[1, 2, 3, 4, 5, 6]); + let bounded = bounded + .try_mutate(|v| { + v.insert(7, ()); + }) + .unwrap(); + assert_eq!(bounded.len(), 7); + assert!(bounded + .try_mutate(|v| { + v.insert(8, ()); + }) + .is_none()); + } + + #[test] + fn btree_map_eq_works() { + let bounded = boundedmap_from_keys::>(&[1, 2, 3, 4, 5, 6]); + assert_eq!(bounded, map_from_keys(&[1, 2, 3, 4, 5, 6])); + } + + #[test] + fn too_big_fail_to_decode() { + let v: Vec<(u32, u32)> = vec![(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)]; + assert_eq!( + BoundedBTreeMap::>::decode(&mut &v.encode()[..]), + Err("BoundedBTreeMap exceeds its limit".into()), + ); + } + + #[test] + fn dont_consume_more_data_than_bounded_len() { + let m = map_from_keys(&[1, 2, 3, 4, 5, 6]); + let data = m.encode(); + let data_input = &mut &data[..]; + + BoundedBTreeMap::>::decode(data_input).unwrap_err(); + assert_eq!(data_input.len(), data.len() - Compact::::compact_len(&(data.len() as u32))); + } + + #[test] + fn unequal_eq_impl_insert_works() { + // given a struct with a strange notion of equality + #[derive(Debug)] + struct Unequal(u32, bool); + + impl PartialEq for Unequal { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } + } + impl Eq for Unequal {} + + impl Ord for Unequal { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.0.cmp(&other.0) + } + } + + impl PartialOrd for Unequal { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + + let mut map = BoundedBTreeMap::>::new(); + + // when the set is full + + for i in 0..4 { + map.try_insert(Unequal(i, false), i).unwrap(); + } + + // can't insert a new distinct member + map.try_insert(Unequal(5, false), 5).unwrap_err(); + + // but _can_ insert a distinct member which compares equal, though per the documentation, + // neither the set length nor the actual member are changed, but the value is + map.try_insert(Unequal(0, true), 6).unwrap(); + assert_eq!(map.len(), 4); + let (zero_key, zero_value) = map.get_key_value(&Unequal(0, true)).unwrap(); + assert_eq!(zero_key.0, 0); + assert_eq!(zero_key.1, false); + assert_eq!(*zero_value, 6); + } + + #[test] + fn eq_works() { + // of same type + let b1 = boundedmap_from_keys::>(&[1, 2]); + let b2 = boundedmap_from_keys::>(&[1, 2]); + assert_eq!(b1, b2); + + // of different type, but same value and bound. + crate::parameter_types! { + B1: u32 = 7; + B2: u32 = 7; + } + let b1 = boundedmap_from_keys::(&[1, 2]); + let b2 = boundedmap_from_keys::(&[1, 2]); + assert_eq!(b1, b2); + } + + #[test] + fn can_be_collected() { + let b1 = boundedmap_from_keys::>(&[1, 2, 3, 4]); + let b2: BoundedBTreeMap> = b1.iter().map(|(k, v)| (k + 1, *v)).try_collect().unwrap(); + assert_eq!(b2.into_iter().map(|(k, _)| k).collect::>(), vec![2, 3, 4, 5]); + + // can also be collected into a collection of length 4. + let b2: BoundedBTreeMap> = b1.iter().map(|(k, v)| (k + 1, *v)).try_collect().unwrap(); + assert_eq!(b2.into_iter().map(|(k, _)| k).collect::>(), vec![2, 3, 4, 5]); + + // can be mutated further into iterators that are `ExactSizedIterator`. + let b2: BoundedBTreeMap> = + b1.iter().map(|(k, v)| (k + 1, *v)).rev().skip(2).try_collect().unwrap(); + // note that the binary tree will re-sort this, so rev() is not really seen + assert_eq!(b2.into_iter().map(|(k, _)| k).collect::>(), vec![2, 3]); + + let b2: BoundedBTreeMap> = + b1.iter().map(|(k, v)| (k + 1, *v)).take(2).try_collect().unwrap(); + assert_eq!(b2.into_iter().map(|(k, _)| k).collect::>(), vec![2, 3]); + + // but these won't work + let b2: Result>, _> = b1.iter().map(|(k, v)| (k + 1, *v)).try_collect(); + assert!(b2.is_err()); + + let b2: Result>, _> = + b1.iter().map(|(k, v)| (k + 1, *v)).skip(2).try_collect(); + assert!(b2.is_err()); + } + + #[test] + fn test_iter_mut() { + let mut b1: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap(); + + let b2: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, k * 2)).try_collect().unwrap(); + + b1.iter_mut().for_each(|(_, v)| *v *= 2); + + assert_eq!(b1, b2); + } + + #[test] + fn map_retains_size() { + let b1 = boundedmap_from_keys::>(&[1, 2]); + let b2 = b1.clone(); + + assert_eq!(b1.len(), b2.map(|(_, _)| 5_u32).len()); + } + + #[test] + fn map_maps_properly() { + let b1: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, k * 2)).try_collect().unwrap(); + let b2: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap(); + + assert_eq!(b1, b2.map(|(_, v)| v * 2)); + } + + #[test] + fn try_map_retains_size() { + let b1 = boundedmap_from_keys::>(&[1, 2]); + let b2 = b1.clone(); + + assert_eq!(b1.len(), b2.try_map::<_, (), _>(|(_, _)| Ok(5_u32)).unwrap().len()); + } + + #[test] + fn try_map_maps_properly() { + let b1: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, k * 2)).try_collect().unwrap(); + let b2: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap(); + + assert_eq!(b1, b2.try_map::<_, (), _>(|(_, v)| Ok(v * 2)).unwrap()); + } + + #[test] + fn try_map_short_circuit() { + let b1: BoundedBTreeMap> = [1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap(); + + assert_eq!(Err("overflow"), b1.try_map(|(_, v)| v.checked_mul(100).ok_or("overflow"))); + } + + #[test] + fn try_map_ok() { + let b1: BoundedBTreeMap> = [1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap(); + let b2: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, (k as u16) * 100)).try_collect().unwrap(); + + assert_eq!(Ok(b2), b1.try_map(|(_, v)| (v as u16).checked_mul(100_u16).ok_or("overflow"))); + } + + // Just a test that structs containing `BoundedBTreeMap` can derive `Hash`. (This was broken + // when it was deriving `Hash`). + #[test] + #[cfg(feature = "std")] + fn container_can_derive_hash() { + #[derive(Hash, Default)] + struct Foo { + bar: u8, + map: BoundedBTreeMap>, + } + let _foo = Foo::default(); + } + + #[cfg(feature = "serde")] + mod serde { + use super::*; + use crate::alloc::string::ToString; + + #[test] + fn test_bounded_btreemap_serializer() { + let mut map = BoundedBTreeMap::>::new(); + map.try_insert(0, 100).unwrap(); + map.try_insert(1, 101).unwrap(); + map.try_insert(2, 102).unwrap(); + + let serialized = serde_json::to_string(&map).unwrap(); + assert_eq!(serialized, r#"{"0":100,"1":101,"2":102}"#); + } + + #[test] + fn test_bounded_btreemap_deserializer() { + let json_str = r#"{"0":100,"1":101,"2":102}"#; + let map: Result>, serde_json::Error> = serde_json::from_str(json_str); + assert!(map.is_ok()); + let map = map.unwrap(); + + assert_eq!(map.len(), 3); + assert_eq!(map.get(&0), Some(&100)); + assert_eq!(map.get(&1), Some(&101)); + assert_eq!(map.get(&2), Some(&102)); + } + + #[test] + fn test_bounded_btreemap_deserializer_bound() { + let json_str = r#"{"0":100,"1":101,"2":102}"#; + let map: Result>, serde_json::Error> = serde_json::from_str(json_str); + assert!(map.is_ok()); + let map = map.unwrap(); + + assert_eq!(map.len(), 3); + assert_eq!(map.get(&0), Some(&100)); + assert_eq!(map.get(&1), Some(&101)); + assert_eq!(map.get(&2), Some(&102)); + } + + #[test] + fn test_bounded_btreemap_deserializer_failed() { + let json_str = r#"{"0":100,"1":101,"2":102,"3":103,"4":104}"#; + let map: Result>, serde_json::Error> = serde_json::from_str(json_str); + + match map { + Err(e) => { + assert!(e.to_string().contains("map exceeds the size of the bounds")); + }, + _ => unreachable!("deserializer must raise error"), + } + } + } + + #[test] + fn is_full_works() { + let mut bounded = boundedmap_from_keys::>(&[1, 2, 3]); + assert!(!bounded.is_full()); + bounded.try_insert(0, ()).unwrap(); + assert_eq!(*bounded, map_from_keys(&[1, 0, 2, 3])); + + assert!(bounded.is_full()); + assert!(bounded.try_insert(9, ()).is_err()); + assert_eq!(*bounded, map_from_keys(&[1, 0, 2, 3])); + } +} diff --git a/bounded-collections/src/bounded_btree_set.rs b/bounded-collections/src/bounded_btree_set.rs new file mode 100644 index 000000000..0942f34e8 --- /dev/null +++ b/bounded-collections/src/bounded_btree_set.rs @@ -0,0 +1,659 @@ +// This file is part of Substrate. + +// Copyright (C) 2023 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits, types and structs to support a bounded `BTreeSet`. + +use crate::{Get, TryCollect}; +use alloc::collections::BTreeSet; +use codec::{Compact, Decode, Encode, MaxEncodedLen}; +use core::{borrow::Borrow, marker::PhantomData, ops::Deref}; +#[cfg(feature = "serde")] +use serde::{ + de::{Error, SeqAccess, Visitor}, + Deserialize, Deserializer, Serialize, +}; + +/// A bounded set based on a B-Tree. +/// +/// B-Trees represent a fundamental compromise between cache-efficiency and actually minimizing +/// the amount of work performed in a search. See [`BTreeSet`] for more details. +/// +/// Unlike a standard `BTreeSet`, there is an enforced upper limit to the number of items in the +/// set. All internal operations ensure this bound is respected. +#[cfg_attr(feature = "serde", derive(Serialize), serde(transparent))] +#[derive(Encode, scale_info::TypeInfo)] +#[scale_info(skip_type_params(S))] +pub struct BoundedBTreeSet(BTreeSet, #[cfg_attr(feature = "serde", serde(skip_serializing))] PhantomData); + +#[cfg(feature = "serde")] +impl<'de, T, S: Get> Deserialize<'de> for BoundedBTreeSet +where + T: Ord + Deserialize<'de>, + S: Clone, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + // Create a visitor to visit each element in the sequence + struct BTreeSetVisitor(PhantomData<(T, S)>); + + impl<'de, T, S> Visitor<'de> for BTreeSetVisitor + where + T: Ord + Deserialize<'de>, + S: Get + Clone, + { + type Value = BTreeSet; + + fn expecting(&self, formatter: &mut core::fmt::Formatter) -> core::fmt::Result { + formatter.write_str("a sequence") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let size = seq.size_hint().unwrap_or(0); + let max = match usize::try_from(S::get()) { + Ok(n) => n, + Err(_) => return Err(A::Error::custom("can't convert to usize")), + }; + if size > max { + Err(A::Error::custom("out of bounds")) + } else { + let mut values = BTreeSet::new(); + + while let Some(value) = seq.next_element()? { + if values.len() >= max { + return Err(A::Error::custom("out of bounds")) + } + values.insert(value); + } + + Ok(values) + } + } + } + + let visitor: BTreeSetVisitor = BTreeSetVisitor(PhantomData); + deserializer + .deserialize_seq(visitor) + .map(|v| BoundedBTreeSet::::try_from(v).map_err(|_| Error::custom("out of bounds")))? + } +} + +impl Decode for BoundedBTreeSet +where + T: Decode + Ord, + S: Get, +{ + fn decode(input: &mut I) -> Result { + // Same as the underlying implementation for `Decode` on `BTreeSet`, except we fail early if + // the len is too big. + let len: u32 = >::decode(input)?.into(); + if len > S::get() { + return Err("BoundedBTreeSet exceeds its limit".into()) + } + input.descend_ref()?; + let inner = Result::from_iter((0..len).map(|_| Decode::decode(input)))?; + input.ascend_ref(); + Ok(Self(inner, PhantomData)) + } + + fn skip(input: &mut I) -> Result<(), codec::Error> { + BTreeSet::::skip(input) + } +} + +impl BoundedBTreeSet +where + S: Get, +{ + /// Get the bound of the type in `usize`. + pub fn bound() -> usize { + S::get() as usize + } +} + +impl BoundedBTreeSet +where + T: Ord, + S: Get, +{ + /// Create `Self` from `t` without any checks. + fn unchecked_from(t: BTreeSet) -> Self { + Self(t, Default::default()) + } + + /// Create a new `BoundedBTreeSet`. + /// + /// Does not allocate. + pub fn new() -> Self { + BoundedBTreeSet(BTreeSet::new(), PhantomData) + } + + /// Consume self, and return the inner `BTreeSet`. + /// + /// This is useful when a mutating API of the inner type is desired, and closure-based mutation + /// such as provided by [`try_mutate`][Self::try_mutate] is inconvenient. + pub fn into_inner(self) -> BTreeSet { + debug_assert!(self.0.len() <= Self::bound()); + self.0 + } + + /// Consumes self and mutates self via the given `mutate` function. + /// + /// If the outcome of mutation is within bounds, `Some(Self)` is returned. Else, `None` is + /// returned. + /// + /// This is essentially a *consuming* shorthand [`Self::into_inner`] -> `...` -> + /// [`Self::try_from`]. + pub fn try_mutate(mut self, mut mutate: impl FnMut(&mut BTreeSet)) -> Option { + mutate(&mut self.0); + (self.0.len() <= Self::bound()).then(move || self) + } + + /// Clears the set, removing all elements. + pub fn clear(&mut self) { + self.0.clear() + } + + /// Exactly the same semantics as [`BTreeSet::insert`], but returns an `Err` (and is a noop) if + /// the new length of the set exceeds `S`. + /// + /// In the `Err` case, returns the inserted item so it can be further used without cloning. + pub fn try_insert(&mut self, item: T) -> Result { + if self.len() < Self::bound() || self.0.contains(&item) { + Ok(self.0.insert(item)) + } else { + Err(item) + } + } + + /// Remove an item from the set, returning whether it was previously in the set. + /// + /// The item may be any borrowed form of the set's item type, but the ordering on the borrowed + /// form _must_ match the ordering on the item type. + pub fn remove(&mut self, item: &Q) -> bool + where + T: Borrow, + Q: Ord + ?Sized, + { + self.0.remove(item) + } + + /// Removes and returns the value in the set, if any, that is equal to the given one. + /// + /// The value may be any borrowed form of the set's value type, but the ordering on the borrowed + /// form _must_ match the ordering on the value type. + pub fn take(&mut self, value: &Q) -> Option + where + T: Borrow + Ord, + Q: Ord + ?Sized, + { + self.0.take(value) + } + + /// Returns true if this set is full. + pub fn is_full(&self) -> bool { + self.len() >= Self::bound() + } +} + +impl Default for BoundedBTreeSet +where + T: Ord, + S: Get, +{ + fn default() -> Self { + Self::new() + } +} + +impl Clone for BoundedBTreeSet +where + BTreeSet: Clone, +{ + fn clone(&self) -> Self { + BoundedBTreeSet(self.0.clone(), PhantomData) + } +} + +impl core::fmt::Debug for BoundedBTreeSet +where + BTreeSet: core::fmt::Debug, + S: Get, +{ + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_tuple("BoundedBTreeSet").field(&self.0).field(&Self::bound()).finish() + } +} + +// Custom implementation of `Hash` since deriving it would require all generic bounds to also +// implement it. +#[cfg(feature = "std")] +impl std::hash::Hash for BoundedBTreeSet { + fn hash(&self, state: &mut H) { + self.0.hash(state); + } +} + +impl PartialEq> for BoundedBTreeSet +where + BTreeSet: PartialEq, + S1: Get, + S2: Get, +{ + fn eq(&self, other: &BoundedBTreeSet) -> bool { + S1::get() == S2::get() && self.0 == other.0 + } +} + +impl Eq for BoundedBTreeSet +where + BTreeSet: Eq, + S: Get, +{ +} + +impl PartialEq> for BoundedBTreeSet +where + BTreeSet: PartialEq, + S: Get, +{ + fn eq(&self, other: &BTreeSet) -> bool { + self.0 == *other + } +} + +impl PartialOrd for BoundedBTreeSet +where + BTreeSet: PartialOrd, + S: Get, +{ + fn partial_cmp(&self, other: &Self) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl Ord for BoundedBTreeSet +where + BTreeSet: Ord, + S: Get, +{ + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.0.cmp(&other.0) + } +} + +impl IntoIterator for BoundedBTreeSet { + type Item = T; + type IntoIter = alloc::collections::btree_set::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl<'a, T, S> IntoIterator for &'a BoundedBTreeSet { + type Item = &'a T; + type IntoIter = alloc::collections::btree_set::Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.0.iter() + } +} + +impl MaxEncodedLen for BoundedBTreeSet +where + T: MaxEncodedLen, + S: Get, +{ + fn max_encoded_len() -> usize { + Self::bound() + .saturating_mul(T::max_encoded_len()) + .saturating_add(codec::Compact(S::get()).encoded_size()) + } +} + +impl Deref for BoundedBTreeSet +where + T: Ord, +{ + type Target = BTreeSet; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl AsRef> for BoundedBTreeSet +where + T: Ord, +{ + fn as_ref(&self) -> &BTreeSet { + &self.0 + } +} + +impl From> for BTreeSet +where + T: Ord, +{ + fn from(set: BoundedBTreeSet) -> Self { + set.0 + } +} + +impl TryFrom> for BoundedBTreeSet +where + T: Ord, + S: Get, +{ + type Error = (); + + fn try_from(value: BTreeSet) -> Result { + (value.len() <= Self::bound()) + .then(move || BoundedBTreeSet(value, PhantomData)) + .ok_or(()) + } +} + +impl codec::DecodeLength for BoundedBTreeSet { + fn len(self_encoded: &[u8]) -> Result { + // `BoundedBTreeSet` is stored just a `BTreeSet`, which is stored as a + // `Compact` with its length followed by an iteration of its items. We can just use + // the underlying implementation. + as codec::DecodeLength>::len(self_encoded) + } +} + +impl codec::EncodeLike> for BoundedBTreeSet where BTreeSet: Encode {} + +impl TryCollect> for I +where + T: Ord, + I: ExactSizeIterator + Iterator, + Bound: Get, +{ + type Error = &'static str; + + fn try_collect(self) -> Result, Self::Error> { + if self.len() > Bound::get() as usize { + Err("iterator length too big") + } else { + Ok(BoundedBTreeSet::::unchecked_from(self.collect::>())) + } + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::ConstU32; + use alloc::{vec, vec::Vec}; + use codec::CompactLen; + + fn set_from_keys(keys: &[T]) -> BTreeSet + where + T: Ord + Copy, + { + keys.iter().copied().collect() + } + + fn boundedset_from_keys(keys: &[T]) -> BoundedBTreeSet + where + T: Ord + Copy, + S: Get, + { + set_from_keys(keys).try_into().unwrap() + } + + #[test] + fn encoding_same_as_unbounded_set() { + let b = boundedset_from_keys::>(&[1, 2, 3, 4, 5, 6]); + let m = set_from_keys(&[1, 2, 3, 4, 5, 6]); + + assert_eq!(b.encode(), m.encode()); + } + + #[test] + fn try_insert_works() { + let mut bounded = boundedset_from_keys::>(&[1, 2, 3]); + bounded.try_insert(0).unwrap(); + assert_eq!(*bounded, set_from_keys(&[1, 0, 2, 3])); + + assert!(bounded.try_insert(9).is_err()); + assert_eq!(*bounded, set_from_keys(&[1, 0, 2, 3])); + } + + #[test] + fn deref_coercion_works() { + let bounded = boundedset_from_keys::>(&[1, 2, 3]); + // these methods come from deref-ed vec. + assert_eq!(bounded.len(), 3); + assert!(bounded.iter().next().is_some()); + assert!(!bounded.is_empty()); + } + + #[test] + fn try_mutate_works() { + let bounded = boundedset_from_keys::>(&[1, 2, 3, 4, 5, 6]); + let bounded = bounded + .try_mutate(|v| { + v.insert(7); + }) + .unwrap(); + assert_eq!(bounded.len(), 7); + assert!(bounded + .try_mutate(|v| { + v.insert(8); + }) + .is_none()); + } + + #[test] + fn btree_map_eq_works() { + let bounded = boundedset_from_keys::>(&[1, 2, 3, 4, 5, 6]); + assert_eq!(bounded, set_from_keys(&[1, 2, 3, 4, 5, 6])); + } + + #[test] + fn too_big_fail_to_decode() { + let v: Vec = vec![1, 2, 3, 4, 5]; + assert_eq!( + BoundedBTreeSet::>::decode(&mut &v.encode()[..]), + Err("BoundedBTreeSet exceeds its limit".into()), + ); + } + + #[test] + fn dont_consume_more_data_than_bounded_len() { + let s = set_from_keys(&[1, 2, 3, 4, 5, 6]); + let data = s.encode(); + let data_input = &mut &data[..]; + + BoundedBTreeSet::>::decode(data_input).unwrap_err(); + assert_eq!(data_input.len(), data.len() - Compact::::compact_len(&(data.len() as u32))); + } + + #[test] + fn unequal_eq_impl_insert_works() { + // given a struct with a strange notion of equality + #[derive(Debug)] + struct Unequal(u32, bool); + + impl PartialEq for Unequal { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } + } + impl Eq for Unequal {} + + impl Ord for Unequal { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.0.cmp(&other.0) + } + } + + impl PartialOrd for Unequal { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + + let mut set = BoundedBTreeSet::>::new(); + + // when the set is full + + for i in 0..4 { + set.try_insert(Unequal(i, false)).unwrap(); + } + + // can't insert a new distinct member + set.try_insert(Unequal(5, false)).unwrap_err(); + + // but _can_ insert a distinct member which compares equal, though per the documentation, + // neither the set length nor the actual member are changed + set.try_insert(Unequal(0, true)).unwrap(); + assert_eq!(set.len(), 4); + let zero_item = set.get(&Unequal(0, true)).unwrap(); + assert_eq!(zero_item.0, 0); + assert_eq!(zero_item.1, false); + } + + #[test] + fn eq_works() { + // of same type + let b1 = boundedset_from_keys::>(&[1, 2]); + let b2 = boundedset_from_keys::>(&[1, 2]); + assert_eq!(b1, b2); + + // of different type, but same value and bound. + crate::parameter_types! { + B1: u32 = 7; + B2: u32 = 7; + } + let b1 = boundedset_from_keys::(&[1, 2]); + let b2 = boundedset_from_keys::(&[1, 2]); + assert_eq!(b1, b2); + } + + #[test] + fn can_be_collected() { + let b1 = boundedset_from_keys::>(&[1, 2, 3, 4]); + let b2: BoundedBTreeSet> = b1.iter().map(|k| k + 1).try_collect().unwrap(); + assert_eq!(b2.into_iter().collect::>(), vec![2, 3, 4, 5]); + + // can also be collected into a collection of length 4. + let b2: BoundedBTreeSet> = b1.iter().map(|k| k + 1).try_collect().unwrap(); + assert_eq!(b2.into_iter().collect::>(), vec![2, 3, 4, 5]); + + // can be mutated further into iterators that are `ExactSizedIterator`. + let b2: BoundedBTreeSet> = b1.iter().map(|k| k + 1).rev().skip(2).try_collect().unwrap(); + // note that the binary tree will re-sort this, so rev() is not really seen + assert_eq!(b2.into_iter().collect::>(), vec![2, 3]); + + let b2: BoundedBTreeSet> = b1.iter().map(|k| k + 1).take(2).try_collect().unwrap(); + assert_eq!(b2.into_iter().collect::>(), vec![2, 3]); + + // but these worn't work + let b2: Result>, _> = b1.iter().map(|k| k + 1).try_collect(); + assert!(b2.is_err()); + + let b2: Result>, _> = b1.iter().map(|k| k + 1).skip(2).try_collect(); + assert!(b2.is_err()); + } + + // Just a test that structs containing `BoundedBTreeSet` can derive `Hash`. (This was broken + // when it was deriving `Hash`). + #[test] + #[cfg(feature = "std")] + fn container_can_derive_hash() { + #[derive(Hash, Default)] + struct Foo { + bar: u8, + set: BoundedBTreeSet>, + } + let _foo = Foo::default(); + } + + #[test] + fn is_full_works() { + let mut bounded = boundedset_from_keys::>(&[1, 2, 3]); + assert!(!bounded.is_full()); + bounded.try_insert(0).unwrap(); + assert_eq!(*bounded, set_from_keys(&[1, 0, 2, 3])); + + assert!(bounded.is_full()); + assert!(bounded.try_insert(9).is_err()); + assert_eq!(*bounded, set_from_keys(&[1, 0, 2, 3])); + } + + #[cfg(feature = "serde")] + mod serde { + use super::*; + use crate::alloc::string::ToString as _; + + #[test] + fn test_serializer() { + let mut c = BoundedBTreeSet::>::new(); + c.try_insert(0).unwrap(); + c.try_insert(1).unwrap(); + c.try_insert(2).unwrap(); + + assert_eq!(serde_json::json!(&c).to_string(), r#"[0,1,2]"#); + } + + #[test] + fn test_deserializer() { + let c: Result>, serde_json::error::Error> = + serde_json::from_str(r#"[0,1,2]"#); + assert!(c.is_ok()); + let c = c.unwrap(); + + assert_eq!(c.len(), 3); + assert!(c.contains(&0)); + assert!(c.contains(&1)); + assert!(c.contains(&2)); + } + + #[test] + fn test_deserializer_bound() { + let c: Result>, serde_json::error::Error> = + serde_json::from_str(r#"[0,1,2]"#); + assert!(c.is_ok()); + let c = c.unwrap(); + + assert_eq!(c.len(), 3); + assert!(c.contains(&0)); + assert!(c.contains(&1)); + assert!(c.contains(&2)); + } + + #[test] + fn test_deserializer_failed() { + let c: Result>, serde_json::error::Error> = + serde_json::from_str(r#"[0,1,2,3,4]"#); + + match c { + Err(msg) => assert_eq!(msg.to_string(), "out of bounds at line 1 column 11"), + _ => unreachable!("deserializer must raise error"), + } + } + } +} diff --git a/bounded-collections/src/bounded_vec.rs b/bounded-collections/src/bounded_vec.rs new file mode 100644 index 000000000..1c3a5b34f --- /dev/null +++ b/bounded-collections/src/bounded_vec.rs @@ -0,0 +1,1383 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2023 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits, types and structs to support putting a bounded vector into storage, as a raw value, map +//! or a double map. + +use super::WeakBoundedVec; +use crate::{Get, TryCollect}; +use alloc::vec::Vec; +use codec::{decode_vec_with_len, Compact, Decode, Encode, EncodeLike, MaxEncodedLen}; +use core::{ + marker::PhantomData, + ops::{Deref, Index, IndexMut, RangeBounds}, + slice::SliceIndex, +}; +#[cfg(feature = "serde")] +use serde::{ + de::{Error, SeqAccess, Visitor}, + Deserialize, Deserializer, Serialize, +}; + +/// A bounded vector. +/// +/// It has implementations for efficient append and length decoding, as with a normal `Vec<_>`, once +/// put into storage as a raw value, map or double-map. +/// +/// As the name suggests, the length of the queue is always bounded. All internal operations ensure +/// this bound is respected. +#[cfg_attr(feature = "serde", derive(Serialize), serde(transparent))] +#[derive(Encode, scale_info::TypeInfo)] +#[scale_info(skip_type_params(S))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] +pub struct BoundedVec(pub(super) Vec, #[cfg_attr(feature = "serde", serde(skip_serializing))] PhantomData); + +/// Create an object through truncation. +pub trait TruncateFrom { + /// Create an object through truncation. + fn truncate_from(unbound: T) -> Self; +} + +#[cfg(feature = "serde")] +impl<'de, T, S: Get> Deserialize<'de> for BoundedVec +where + T: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct VecVisitor>(PhantomData<(T, S)>); + + impl<'de, T, S: Get> Visitor<'de> for VecVisitor + where + T: Deserialize<'de>, + { + type Value = Vec; + + fn expecting(&self, formatter: &mut alloc::fmt::Formatter) -> alloc::fmt::Result { + formatter.write_str("a sequence") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let size = seq.size_hint().unwrap_or(0); + let max = match usize::try_from(S::get()) { + Ok(n) => n, + Err(_) => return Err(A::Error::custom("can't convert to usize")), + }; + if size > max { + Err(A::Error::custom("out of bounds")) + } else { + let mut values = Vec::with_capacity(size); + + while let Some(value) = seq.next_element()? { + if values.len() >= max { + return Err(A::Error::custom("out of bounds")) + } + values.push(value); + } + + Ok(values) + } + } + } + + let visitor: VecVisitor = VecVisitor(PhantomData); + deserializer + .deserialize_seq(visitor) + .map(|v| BoundedVec::::try_from(v).map_err(|_| Error::custom("out of bounds")))? + } +} + +/// A bounded slice. +/// +/// Similar to a `BoundedVec`, but not owned and cannot be decoded. +#[derive(Encode, scale_info::TypeInfo)] +pub struct BoundedSlice<'a, T, S>(pub(super) &'a [T], PhantomData); + +// `BoundedSlice`s encode to something which will always decode into a `BoundedVec`, +// `WeakBoundedVec`, or a `Vec`. +impl<'a, T: Encode + Decode, S: Get> EncodeLike> for BoundedSlice<'a, T, S> {} +impl<'a, T: Encode + Decode, S: Get> EncodeLike> for BoundedSlice<'a, T, S> {} +impl<'a, T: Encode + Decode, S: Get> EncodeLike> for BoundedSlice<'a, T, S> {} + +impl<'a, T, BoundSelf, BoundRhs> PartialEq> for BoundedSlice<'a, T, BoundSelf> +where + T: PartialEq, + BoundSelf: Get, + BoundRhs: Get, +{ + fn eq(&self, other: &BoundedSlice<'a, T, BoundRhs>) -> bool { + self.0 == other.0 + } +} + +impl<'a, T, BoundSelf, BoundRhs> PartialEq> for BoundedSlice<'a, T, BoundSelf> +where + T: PartialEq, + BoundSelf: Get, + BoundRhs: Get, +{ + fn eq(&self, other: &BoundedVec) -> bool { + self.0 == other.0 + } +} + +impl<'a, T, BoundSelf, BoundRhs> PartialEq> for BoundedSlice<'a, T, BoundSelf> +where + T: PartialEq, + BoundSelf: Get, + BoundRhs: Get, +{ + fn eq(&self, other: &WeakBoundedVec) -> bool { + self.0 == other.0 + } +} + +impl<'a, T, S: Get> Eq for BoundedSlice<'a, T, S> where T: Eq {} + +impl<'a, T, BoundSelf, BoundRhs> PartialOrd> for BoundedSlice<'a, T, BoundSelf> +where + T: PartialOrd, + BoundSelf: Get, + BoundRhs: Get, +{ + fn partial_cmp(&self, other: &BoundedSlice<'a, T, BoundRhs>) -> Option { + self.0.partial_cmp(other.0) + } +} + +impl<'a, T, BoundSelf, BoundRhs> PartialOrd> for BoundedSlice<'a, T, BoundSelf> +where + T: PartialOrd, + BoundSelf: Get, + BoundRhs: Get, +{ + fn partial_cmp(&self, other: &BoundedVec) -> Option { + self.0.partial_cmp(&*other.0) + } +} + +impl<'a, T, BoundSelf, BoundRhs> PartialOrd> for BoundedSlice<'a, T, BoundSelf> +where + T: PartialOrd, + BoundSelf: Get, + BoundRhs: Get, +{ + fn partial_cmp(&self, other: &WeakBoundedVec) -> Option { + self.0.partial_cmp(&*other.0) + } +} + +impl<'a, T: Ord, Bound: Get> Ord for BoundedSlice<'a, T, Bound> { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.0.cmp(&other.0) + } +} + +impl<'a, T, S: Get> TryFrom<&'a [T]> for BoundedSlice<'a, T, S> { + type Error = &'a [T]; + fn try_from(t: &'a [T]) -> Result { + if t.len() <= S::get() as usize { + Ok(BoundedSlice(t, PhantomData)) + } else { + Err(t) + } + } +} + +impl<'a, T, S> From> for &'a [T] { + fn from(t: BoundedSlice<'a, T, S>) -> Self { + t.0 + } +} + +impl<'a, T, S: Get> TruncateFrom<&'a [T]> for BoundedSlice<'a, T, S> { + fn truncate_from(unbound: &'a [T]) -> Self { + BoundedSlice::::truncate_from(unbound) + } +} + +impl<'a, T, S> Clone for BoundedSlice<'a, T, S> { + fn clone(&self) -> Self { + BoundedSlice(self.0, PhantomData) + } +} + +impl<'a, T, S> core::fmt::Debug for BoundedSlice<'a, T, S> +where + &'a [T]: core::fmt::Debug, + S: Get, +{ + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_tuple("BoundedSlice").field(&self.0).field(&S::get()).finish() + } +} + +// Since a reference `&T` is always `Copy`, so is `BoundedSlice<'a, T, S>`. +impl<'a, T, S> Copy for BoundedSlice<'a, T, S> {} + +// will allow for all immutable operations of `[T]` on `BoundedSlice`. +impl<'a, T, S> Deref for BoundedSlice<'a, T, S> { + type Target = [T]; + + fn deref(&self) -> &Self::Target { + self.0 + } +} + +// Custom implementation of `Hash` since deriving it would require all generic bounds to also +// implement it. +#[cfg(feature = "std")] +impl<'a, T: std::hash::Hash, S> std::hash::Hash for BoundedSlice<'a, T, S> { + fn hash(&self, state: &mut H) { + self.0.hash(state); + } +} + +impl<'a, T, S> core::iter::IntoIterator for BoundedSlice<'a, T, S> { + type Item = &'a T; + type IntoIter = core::slice::Iter<'a, T>; + fn into_iter(self) -> Self::IntoIter { + self.0.iter() + } +} + +impl<'a, T, S: Get> BoundedSlice<'a, T, S> { + /// Create an instance from the first elements of the given slice (or all of it if it is smaller + /// than the length bound). + pub fn truncate_from(s: &'a [T]) -> Self { + Self(&s[0..(s.len().min(S::get() as usize))], PhantomData) + } +} + +impl> Decode for BoundedVec { + fn decode(input: &mut I) -> Result { + // Same as the underlying implementation for `Decode` on `Vec`, except we fail early if the + // len is too big. + let len: u32 = >::decode(input)?.into(); + if len > S::get() { + return Err("BoundedVec exceeds its limit".into()) + } + let inner = decode_vec_with_len(input, len as usize)?; + Ok(Self(inner, PhantomData)) + } + + fn skip(input: &mut I) -> Result<(), codec::Error> { + Vec::::skip(input) + } +} + +// `BoundedVec`s encode to something which will always decode as a `Vec`. +impl> EncodeLike> for BoundedVec {} + +impl BoundedVec { + /// Create `Self` with no items. + pub fn new() -> Self { + Self(Vec::new(), Default::default()) + } + + /// Create `Self` from `t` without any checks. + fn unchecked_from(t: Vec) -> Self { + Self(t, Default::default()) + } + + /// Exactly the same semantics as `Vec::clear`. + pub fn clear(&mut self) { + self.0.clear() + } + + /// Consume self, and return the inner `Vec`. Henceforth, the `Vec<_>` can be altered in an + /// arbitrary way. At some point, if the reverse conversion is required, `TryFrom>` can + /// be used. + /// + /// This is useful for cases if you need access to an internal API of the inner `Vec<_>` which + /// is not provided by the wrapper `BoundedVec`. + pub fn into_inner(self) -> Vec { + self.0 + } + + /// Exactly the same semantics as [`slice::sort_by`]. + /// + /// This is safe since sorting cannot change the number of elements in the vector. + pub fn sort_by(&mut self, compare: F) + where + F: FnMut(&T, &T) -> core::cmp::Ordering, + { + self.0.sort_by(compare) + } + + /// Exactly the same semantics as [`slice::sort_by_key`]. + /// + /// This is safe since sorting cannot change the number of elements in the vector. + pub fn sort_by_key(&mut self, f: F) + where + F: FnMut(&T) -> K, + K: core::cmp::Ord, + { + self.0.sort_by_key(f) + } + + /// Exactly the same semantics as [`slice::sort`]. + /// + /// This is safe since sorting cannot change the number of elements in the vector. + pub fn sort(&mut self) + where + T: core::cmp::Ord, + { + self.0.sort() + } + + /// Exactly the same semantics as `Vec::remove`. + /// + /// # Panics + /// + /// Panics if `index` is out of bounds. + pub fn remove(&mut self, index: usize) -> T { + self.0.remove(index) + } + + /// Exactly the same semantics as `slice::swap_remove`. + /// + /// # Panics + /// + /// Panics if `index` is out of bounds. + pub fn swap_remove(&mut self, index: usize) -> T { + self.0.swap_remove(index) + } + + /// Exactly the same semantics as `Vec::retain`. + pub fn retain bool>(&mut self, f: F) { + self.0.retain(f) + } + + /// Exactly the same semantics as `slice::get_mut`. + pub fn get_mut>(&mut self, index: I) -> Option<&mut >::Output> { + self.0.get_mut(index) + } + + /// Exactly the same semantics as `Vec::truncate`. + /// + /// This is safe because `truncate` can never increase the length of the internal vector. + pub fn truncate(&mut self, s: usize) { + self.0.truncate(s); + } + + /// Exactly the same semantics as `Vec::pop`. + /// + /// This is safe since popping can only shrink the inner vector. + pub fn pop(&mut self) -> Option { + self.0.pop() + } + + /// Exactly the same semantics as [`slice::iter_mut`]. + pub fn iter_mut(&mut self) -> core::slice::IterMut<'_, T> { + self.0.iter_mut() + } + + /// Exactly the same semantics as [`slice::last_mut`]. + pub fn last_mut(&mut self) -> Option<&mut T> { + self.0.last_mut() + } + + /// Exact same semantics as [`Vec::drain`]. + pub fn drain(&mut self, range: R) -> alloc::vec::Drain<'_, T> + where + R: RangeBounds, + { + self.0.drain(range) + } +} + +impl> From> for Vec { + fn from(x: BoundedVec) -> Vec { + x.0 + } +} + +impl> BoundedVec { + /// Pre-allocate `capacity` items in self. + /// + /// If `capacity` is greater than [`Self::bound`], then the minimum of the two is used. + pub fn with_bounded_capacity(capacity: usize) -> Self { + let capacity = capacity.min(Self::bound()); + Self(Vec::with_capacity(capacity), Default::default()) + } + + /// Allocate self with the maximum possible capacity. + pub fn with_max_capacity() -> Self { + Self::with_bounded_capacity(Self::bound()) + } + + /// Consume and truncate the vector `v` in order to create a new instance of `Self` from it. + pub fn truncate_from(mut v: Vec) -> Self { + v.truncate(Self::bound()); + Self::unchecked_from(v) + } + + /// Get the bound of the type in `usize`. + pub fn bound() -> usize { + S::get() as usize + } + + /// Returns true if this collection is full. + pub fn is_full(&self) -> bool { + self.len() >= Self::bound() + } + + /// Forces the insertion of `element` into `self` retaining all items with index at least + /// `index`. + /// + /// If `index == 0` and `self.len() == Self::bound()`, then this is a no-op. + /// + /// If `Self::bound() < index` or `self.len() < index`, then this is also a no-op. + /// + /// Returns `Ok(maybe_removed)` if the item was inserted, where `maybe_removed` is + /// `Some(removed)` if an item was removed to make room for the new one. Returns `Err(element)` + /// if `element` cannot be inserted. + pub fn force_insert_keep_right(&mut self, index: usize, mut element: T) -> Result, T> { + // Check against panics. + if Self::bound() < index || self.len() < index { + Err(element) + } else if self.len() < Self::bound() { + // Cannot panic since self.len() >= index; + self.0.insert(index, element); + Ok(None) + } else { + if index == 0 { + return Err(element) + } + core::mem::swap(&mut self[0], &mut element); + // `[0..index] cannot panic since self.len() >= index. + // `rotate_left(1)` cannot panic because there is at least 1 element. + self[0..index].rotate_left(1); + Ok(Some(element)) + } + } + + /// Forces the insertion of `element` into `self` retaining all items with index at most + /// `index`. + /// + /// If `index == Self::bound()` and `self.len() == Self::bound()`, then this is a no-op. + /// + /// If `Self::bound() < index` or `self.len() < index`, then this is also a no-op. + /// + /// Returns `Ok(maybe_removed)` if the item was inserted, where `maybe_removed` is + /// `Some(removed)` if an item was removed to make room for the new one. Returns `Err(element)` + /// if `element` cannot be inserted. + pub fn force_insert_keep_left(&mut self, index: usize, element: T) -> Result, T> { + // Check against panics. + if Self::bound() < index || self.len() < index || Self::bound() == 0 { + return Err(element) + } + // Noop condition. + if Self::bound() == index && self.len() <= Self::bound() { + return Err(element) + } + let maybe_removed = if self.is_full() { + // defensive-only: since we are at capacity, this is a noop. + self.0.truncate(Self::bound()); + // if we truncate anything, it will be the last one. + self.0.pop() + } else { + None + }; + + // Cannot panic since `self.len() >= index`; + self.0.insert(index, element); + Ok(maybe_removed) + } + + /// Move the position of an item from one location to another in the slice. + /// + /// Except for the item being moved, the order of the slice remains the same. + /// + /// - `index` is the location of the item to be moved. + /// - `insert_position` is the index of the item in the slice which should *immediately follow* + /// the item which is being moved. + /// + /// Returns `true` of the operation was successful, otherwise `false` if a noop. + pub fn slide(&mut self, index: usize, insert_position: usize) -> bool { + // Check against panics. + if self.len() <= index || self.len() < insert_position || index == usize::MAX { + return false + } + // Noop conditions. + if index == insert_position || index + 1 == insert_position { + return false + } + if insert_position < index && index < self.len() { + // --- --- --- === === === === @@@ --- --- --- + // ^-- N ^O^ + // ... + // /-----<<<-----\ + // --- --- --- === === === === @@@ --- --- --- + // >>> >>> >>> >>> + // ... + // --- --- --- @@@ === === === === --- --- --- + // ^N^ + self[insert_position..index + 1].rotate_right(1); + return true + } else if insert_position > 0 && index + 1 < insert_position { + // Note that the apparent asymmetry of these two branches is due to the + // fact that the "new" position is the position to be inserted *before*. + // --- --- --- @@@ === === === === --- --- --- + // ^O^ ^-- N + // ... + // /----->>>-----\ + // --- --- --- @@@ === === === === --- --- --- + // <<< <<< <<< <<< + // ... + // --- --- --- === === === === @@@ --- --- --- + // ^N^ + self[index..insert_position].rotate_left(1); + return true + } + + debug_assert!(false, "all noop conditions should have been covered above"); + false + } + + /// Forces the insertion of `s` into `self` truncating first if necessary. + /// + /// Infallible, but if the bound is zero, then it's a no-op. + pub fn force_push(&mut self, element: T) { + if Self::bound() > 0 { + self.0.truncate(Self::bound() as usize - 1); + self.0.push(element); + } + } + + /// Same as `Vec::resize`, but if `size` is more than [`Self::bound`], then [`Self::bound`] is + /// used. + pub fn bounded_resize(&mut self, size: usize, value: T) + where + T: Clone, + { + let size = size.min(Self::bound()); + self.0.resize(size, value); + } + + /// Exactly the same semantics as [`Vec::extend`], but returns an error and does nothing if the + /// length of the outcome is larger than the bound. + pub fn try_extend(&mut self, with: impl IntoIterator + ExactSizeIterator) -> Result<(), ()> { + if with.len().saturating_add(self.len()) <= Self::bound() { + self.0.extend(with); + Ok(()) + } else { + Err(()) + } + } + + /// Exactly the same semantics as [`Vec::append`], but returns an error and does nothing if the + /// length of the outcome is larger than the bound. + pub fn try_append(&mut self, other: &mut Vec) -> Result<(), ()> { + if other.len().saturating_add(self.len()) <= Self::bound() { + self.0.append(other); + Ok(()) + } else { + Err(()) + } + } + + /// Consumes self and mutates self via the given `mutate` function. + /// + /// If the outcome of mutation is within bounds, `Some(Self)` is returned. Else, `None` is + /// returned. + /// + /// This is essentially a *consuming* shorthand [`Self::into_inner`] -> `...` -> + /// [`Self::try_from`]. + pub fn try_mutate(mut self, mut mutate: impl FnMut(&mut Vec)) -> Option { + mutate(&mut self.0); + (self.0.len() <= Self::bound()).then(move || self) + } + + /// Exactly the same semantics as [`Vec::insert`], but returns an `Err` (and is a noop) if the + /// new length of the vector exceeds `S`. + /// + /// # Panics + /// + /// Panics if `index > len`. + pub fn try_insert(&mut self, index: usize, element: T) -> Result<(), T> { + if self.len() < Self::bound() { + self.0.insert(index, element); + Ok(()) + } else { + Err(element) + } + } + + /// Exactly the same semantics as [`Vec::push`], but returns an `Err` (and is a noop) if the + /// new length of the vector exceeds `S`. + /// + /// # Panics + /// + /// Panics if the new capacity exceeds isize::MAX bytes. + pub fn try_push(&mut self, element: T) -> Result<(), T> { + if self.len() < Self::bound() { + self.0.push(element); + Ok(()) + } else { + Err(element) + } + } + + /// Exactly the same semantics as [`Vec::rotate_left`], but returns an `Err` (and is a noop) if `mid` is larger then the current length. + pub fn try_rotate_left(&mut self, mid: usize) -> Result<(), ()> { + if mid > self.len() { + return Err(()) + } + + self.0.rotate_left(mid); + Ok(()) + } + + /// Exactly the same semantics as [`Vec::rotate_right`], but returns an `Err` (and is a noop) if `mid` is larger then the current length. + pub fn try_rotate_right(&mut self, mid: usize) -> Result<(), ()> { + if mid > self.len() { + return Err(()) + } + + self.0.rotate_right(mid); + Ok(()) + } +} + +impl BoundedVec { + /// Return a [`BoundedSlice`] with the content and bound of [`Self`]. + pub fn as_bounded_slice(&self) -> BoundedSlice { + BoundedSlice(&self.0[..], PhantomData::default()) + } +} + +impl Default for BoundedVec { + fn default() -> Self { + // the bound cannot be below 0, which is satisfied by an empty vector + Self::unchecked_from(Vec::default()) + } +} + +impl core::fmt::Debug for BoundedVec +where + Vec: core::fmt::Debug, + S: Get, +{ + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_tuple("BoundedVec").field(&self.0).field(&Self::bound()).finish() + } +} + +impl Clone for BoundedVec +where + T: Clone, +{ + fn clone(&self) -> Self { + // bound is retained + Self::unchecked_from(self.0.clone()) + } +} + +impl> TryFrom> for BoundedVec { + type Error = Vec; + fn try_from(t: Vec) -> Result { + if t.len() <= Self::bound() { + // explicit check just above + Ok(Self::unchecked_from(t)) + } else { + Err(t) + } + } +} + +impl> TruncateFrom> for BoundedVec { + fn truncate_from(unbound: Vec) -> Self { + BoundedVec::::truncate_from(unbound) + } +} + +// Custom implementation of `Hash` since deriving it would require all generic bounds to also +// implement it. +#[cfg(feature = "std")] +impl std::hash::Hash for BoundedVec { + fn hash(&self, state: &mut H) { + self.0.hash(state); + } +} + +// It is okay to give a non-mutable reference of the inner vec to anyone. +impl AsRef> for BoundedVec { + fn as_ref(&self) -> &Vec { + &self.0 + } +} + +impl AsRef<[T]> for BoundedVec { + fn as_ref(&self) -> &[T] { + &self.0 + } +} + +impl AsMut<[T]> for BoundedVec { + fn as_mut(&mut self) -> &mut [T] { + &mut self.0 + } +} + +// will allow for all immutable operations of `Vec` on `BoundedVec`. +impl Deref for BoundedVec { + type Target = Vec; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +// Allows for indexing similar to a normal `Vec`. Can panic if out of bound. +impl Index for BoundedVec +where + I: SliceIndex<[T]>, +{ + type Output = I::Output; + + #[inline] + fn index(&self, index: I) -> &Self::Output { + self.0.index(index) + } +} + +impl IndexMut for BoundedVec +where + I: SliceIndex<[T]>, +{ + #[inline] + fn index_mut(&mut self, index: I) -> &mut Self::Output { + self.0.index_mut(index) + } +} + +impl core::iter::IntoIterator for BoundedVec { + type Item = T; + type IntoIter = alloc::vec::IntoIter; + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl<'a, T, S> core::iter::IntoIterator for &'a BoundedVec { + type Item = &'a T; + type IntoIter = core::slice::Iter<'a, T>; + fn into_iter(self) -> Self::IntoIter { + self.0.iter() + } +} + +impl<'a, T, S> core::iter::IntoIterator for &'a mut BoundedVec { + type Item = &'a mut T; + type IntoIter = core::slice::IterMut<'a, T>; + fn into_iter(self) -> Self::IntoIter { + self.0.iter_mut() + } +} + +impl codec::DecodeLength for BoundedVec { + fn len(self_encoded: &[u8]) -> Result { + // `BoundedVec` stored just a `Vec`, thus the length is at the beginning in + // `Compact` form, and same implementation as `Vec` can be used. + as codec::DecodeLength>::len(self_encoded) + } +} + +impl PartialEq> for BoundedVec +where + T: PartialEq, + BoundSelf: Get, + BoundRhs: Get, +{ + fn eq(&self, rhs: &BoundedVec) -> bool { + self.0 == rhs.0 + } +} + +impl PartialEq> for BoundedVec +where + T: PartialEq, + BoundSelf: Get, + BoundRhs: Get, +{ + fn eq(&self, rhs: &WeakBoundedVec) -> bool { + self.0 == rhs.0 + } +} + +impl<'a, T, BoundSelf, BoundRhs> PartialEq> for BoundedVec +where + T: PartialEq, + BoundSelf: Get, + BoundRhs: Get, +{ + fn eq(&self, rhs: &BoundedSlice<'a, T, BoundRhs>) -> bool { + self.0 == rhs.0 + } +} + +impl<'a, T: PartialEq, S: Get> PartialEq<&'a [T]> for BoundedSlice<'a, T, S> { + fn eq(&self, other: &&'a [T]) -> bool { + &self.0 == other + } +} + +impl> PartialEq> for BoundedVec { + fn eq(&self, other: &Vec) -> bool { + &self.0 == other + } +} + +impl> Eq for BoundedVec where T: Eq {} + +impl PartialOrd> for BoundedVec +where + T: PartialOrd, + BoundSelf: Get, + BoundRhs: Get, +{ + fn partial_cmp(&self, other: &BoundedVec) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl PartialOrd> for BoundedVec +where + T: PartialOrd, + BoundSelf: Get, + BoundRhs: Get, +{ + fn partial_cmp(&self, other: &WeakBoundedVec) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl<'a, T, BoundSelf, BoundRhs> PartialOrd> for BoundedVec +where + T: PartialOrd, + BoundSelf: Get, + BoundRhs: Get, +{ + fn partial_cmp(&self, other: &BoundedSlice<'a, T, BoundRhs>) -> Option { + (&*self.0).partial_cmp(other.0) + } +} + +impl> Ord for BoundedVec { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.0.cmp(&other.0) + } +} + +impl MaxEncodedLen for BoundedVec +where + T: MaxEncodedLen, + S: Get, + BoundedVec: Encode, +{ + fn max_encoded_len() -> usize { + // BoundedVec encodes like Vec which encodes like [T], which is a compact u32 + // plus each item in the slice: + // See: https://docs.substrate.io/reference/scale-codec/ + codec::Compact(S::get()) + .encoded_size() + .saturating_add(Self::bound().saturating_mul(T::max_encoded_len())) + } +} + +impl TryCollect> for I +where + I: ExactSizeIterator + Iterator, + Bound: Get, +{ + type Error = &'static str; + + fn try_collect(self) -> Result, Self::Error> { + if self.len() > Bound::get() as usize { + Err("iterator length too big") + } else { + Ok(BoundedVec::::unchecked_from(self.collect::>())) + } + } +} + +#[cfg(all(test, feature = "std"))] +mod test { + use super::*; + use crate::{bounded_vec, ConstU32}; + use codec::CompactLen; + + #[test] + fn encoding_same_as_unbounded_vec() { + let b: BoundedVec> = bounded_vec![0, 1, 2, 3, 4, 5]; + let v: Vec = vec![0, 1, 2, 3, 4, 5]; + + assert_eq!(b.encode(), v.encode()); + } + + #[test] + fn slice_truncate_from_works() { + let bounded = BoundedSlice::>::truncate_from(&[1, 2, 3, 4, 5]); + assert_eq!(bounded.deref(), &[1, 2, 3, 4]); + let bounded = BoundedSlice::>::truncate_from(&[1, 2, 3, 4]); + assert_eq!(bounded.deref(), &[1, 2, 3, 4]); + let bounded = BoundedSlice::>::truncate_from(&[1, 2, 3]); + assert_eq!(bounded.deref(), &[1, 2, 3]); + } + + #[test] + fn slide_works() { + let mut b: BoundedVec> = bounded_vec![0, 1, 2, 3, 4, 5]; + assert!(b.slide(1, 5)); + assert_eq!(*b, vec![0, 2, 3, 4, 1, 5]); + assert!(b.slide(4, 0)); + assert_eq!(*b, vec![1, 0, 2, 3, 4, 5]); + assert!(b.slide(0, 2)); + assert_eq!(*b, vec![0, 1, 2, 3, 4, 5]); + assert!(b.slide(1, 6)); + assert_eq!(*b, vec![0, 2, 3, 4, 5, 1]); + assert!(b.slide(0, 6)); + assert_eq!(*b, vec![2, 3, 4, 5, 1, 0]); + assert!(b.slide(5, 0)); + assert_eq!(*b, vec![0, 2, 3, 4, 5, 1]); + assert!(!b.slide(6, 0)); + assert!(!b.slide(7, 0)); + assert_eq!(*b, vec![0, 2, 3, 4, 5, 1]); + + let mut c: BoundedVec> = bounded_vec![0, 1, 2]; + assert!(!c.slide(1, 5)); + assert_eq!(*c, vec![0, 1, 2]); + assert!(!c.slide(4, 0)); + assert_eq!(*c, vec![0, 1, 2]); + assert!(!c.slide(3, 0)); + assert_eq!(*c, vec![0, 1, 2]); + assert!(c.slide(2, 0)); + assert_eq!(*c, vec![2, 0, 1]); + } + + #[test] + fn slide_noops_work() { + let mut b: BoundedVec> = bounded_vec![0, 1, 2, 3, 4, 5]; + assert!(!b.slide(3, 3)); + assert_eq!(*b, vec![0, 1, 2, 3, 4, 5]); + assert!(!b.slide(3, 4)); + assert_eq!(*b, vec![0, 1, 2, 3, 4, 5]); + } + + #[test] + fn force_insert_keep_left_works() { + let mut b: BoundedVec> = bounded_vec![]; + assert_eq!(b.force_insert_keep_left(1, 10), Err(10)); + assert!(b.is_empty()); + + assert_eq!(b.force_insert_keep_left(0, 30), Ok(None)); + assert_eq!(b.force_insert_keep_left(0, 10), Ok(None)); + assert_eq!(b.force_insert_keep_left(1, 20), Ok(None)); + assert_eq!(b.force_insert_keep_left(3, 40), Ok(None)); + assert_eq!(*b, vec![10, 20, 30, 40]); + // at capacity. + assert_eq!(b.force_insert_keep_left(4, 41), Err(41)); + assert_eq!(*b, vec![10, 20, 30, 40]); + assert_eq!(b.force_insert_keep_left(3, 31), Ok(Some(40))); + assert_eq!(*b, vec![10, 20, 30, 31]); + assert_eq!(b.force_insert_keep_left(1, 11), Ok(Some(31))); + assert_eq!(*b, vec![10, 11, 20, 30]); + assert_eq!(b.force_insert_keep_left(0, 1), Ok(Some(30))); + assert_eq!(*b, vec![1, 10, 11, 20]); + + let mut z: BoundedVec> = bounded_vec![]; + assert!(z.is_empty()); + assert_eq!(z.force_insert_keep_left(0, 10), Err(10)); + assert!(z.is_empty()); + } + + #[test] + fn force_insert_keep_right_works() { + let mut b: BoundedVec> = bounded_vec![]; + assert_eq!(b.force_insert_keep_right(1, 10), Err(10)); + assert!(b.is_empty()); + + assert_eq!(b.force_insert_keep_right(0, 30), Ok(None)); + assert_eq!(b.force_insert_keep_right(0, 10), Ok(None)); + assert_eq!(b.force_insert_keep_right(1, 20), Ok(None)); + assert_eq!(b.force_insert_keep_right(3, 40), Ok(None)); + assert_eq!(*b, vec![10, 20, 30, 40]); + + // at capacity. + assert_eq!(b.force_insert_keep_right(0, 0), Err(0)); + assert_eq!(*b, vec![10, 20, 30, 40]); + assert_eq!(b.force_insert_keep_right(1, 11), Ok(Some(10))); + assert_eq!(*b, vec![11, 20, 30, 40]); + assert_eq!(b.force_insert_keep_right(3, 31), Ok(Some(11))); + assert_eq!(*b, vec![20, 30, 31, 40]); + assert_eq!(b.force_insert_keep_right(4, 41), Ok(Some(20))); + assert_eq!(*b, vec![30, 31, 40, 41]); + + assert_eq!(b.force_insert_keep_right(5, 69), Err(69)); + assert_eq!(*b, vec![30, 31, 40, 41]); + + let mut z: BoundedVec> = bounded_vec![]; + assert!(z.is_empty()); + assert_eq!(z.force_insert_keep_right(0, 10), Err(10)); + assert!(z.is_empty()); + } + + #[test] + fn bound_returns_correct_value() { + assert_eq!(BoundedVec::>::bound(), 7); + } + + #[test] + fn try_insert_works() { + let mut bounded: BoundedVec> = bounded_vec![1, 2, 3]; + bounded.try_insert(1, 0).unwrap(); + assert_eq!(*bounded, vec![1, 0, 2, 3]); + + assert!(bounded.try_insert(0, 9).is_err()); + assert_eq!(*bounded, vec![1, 0, 2, 3]); + } + + #[test] + fn constructor_macro_works() { + // With values. Use some brackets to make sure the macro doesn't expand. + let bv: BoundedVec<(u32, u32), ConstU32<3>> = bounded_vec![(1, 2), (1, 2), (1, 2)]; + assert_eq!(bv, vec![(1, 2), (1, 2), (1, 2)]); + + // With repetition. + let bv: BoundedVec<(u32, u32), ConstU32<3>> = bounded_vec![(1, 2); 3]; + assert_eq!(bv, vec![(1, 2), (1, 2), (1, 2)]); + } + + #[test] + #[should_panic(expected = "insertion index (is 9) should be <= len (is 3)")] + fn try_inert_panics_if_oob() { + let mut bounded: BoundedVec> = bounded_vec![1, 2, 3]; + bounded.try_insert(9, 0).unwrap(); + } + + #[test] + fn try_push_works() { + let mut bounded: BoundedVec> = bounded_vec![1, 2, 3]; + bounded.try_push(0).unwrap(); + assert_eq!(*bounded, vec![1, 2, 3, 0]); + + assert!(bounded.try_push(9).is_err()); + } + + #[test] + fn deref_vec_coercion_works() { + let bounded: BoundedVec> = bounded_vec![1, 2, 3]; + // these methods come from deref-ed vec. + assert_eq!(bounded.len(), 3); + assert!(bounded.iter().next().is_some()); + assert!(!bounded.is_empty()); + } + + #[test] + fn deref_slice_coercion_works() { + let bounded = BoundedSlice::>::try_from(&[1, 2, 3][..]).unwrap(); + // these methods come from deref-ed slice. + assert_eq!(bounded.len(), 3); + assert!(bounded.iter().next().is_some()); + assert!(!bounded.is_empty()); + } + + #[test] + fn try_mutate_works() { + let bounded: BoundedVec> = bounded_vec![1, 2, 3, 4, 5, 6]; + let bounded = bounded.try_mutate(|v| v.push(7)).unwrap(); + assert_eq!(bounded.len(), 7); + assert!(bounded.try_mutate(|v| v.push(8)).is_none()); + } + + #[test] + fn slice_indexing_works() { + let bounded: BoundedVec> = bounded_vec![1, 2, 3, 4, 5, 6]; + assert_eq!(&bounded[0..=2], &[1, 2, 3]); + } + + #[test] + fn vec_eq_works() { + let bounded: BoundedVec> = bounded_vec![1, 2, 3, 4, 5, 6]; + assert_eq!(bounded, vec![1, 2, 3, 4, 5, 6]); + } + + #[test] + fn too_big_vec_fail_to_decode() { + let v: Vec = vec![1, 2, 3, 4, 5]; + assert_eq!( + BoundedVec::>::decode(&mut &v.encode()[..]), + Err("BoundedVec exceeds its limit".into()), + ); + } + + #[test] + fn dont_consume_more_data_than_bounded_len() { + let v: Vec = vec![1, 2, 3, 4, 5]; + let data = v.encode(); + let data_input = &mut &data[..]; + + BoundedVec::>::decode(data_input).unwrap_err(); + assert_eq!(data_input.len(), data.len() - Compact::::compact_len(&(data.len() as u32))); + } + + #[test] + fn eq_works() { + // of same type + let b1: BoundedVec> = bounded_vec![1, 2, 3]; + let b2: BoundedVec> = bounded_vec![1, 2, 3]; + assert_eq!(b1, b2); + + // of different type, but same value and bound. + crate::parameter_types! { + B1: u32 = 7; + B2: u32 = 7; + } + let b1: BoundedVec = bounded_vec![1, 2, 3]; + let b2: BoundedVec = bounded_vec![1, 2, 3]; + assert_eq!(b1, b2); + } + + #[test] + fn ord_works() { + use std::cmp::Ordering; + let b1: BoundedVec> = bounded_vec![1, 2, 3]; + let b2: BoundedVec> = bounded_vec![1, 3, 2]; + + // ordering for vec is lexicographic. + assert_eq!(b1.cmp(&b2), Ordering::Less); + assert_eq!(b1.cmp(&b2), b1.into_inner().cmp(&b2.into_inner())); + } + + #[test] + fn try_extend_works() { + let mut b: BoundedVec> = bounded_vec![1, 2, 3]; + + assert!(b.try_extend(vec![4].into_iter()).is_ok()); + assert_eq!(*b, vec![1, 2, 3, 4]); + + assert!(b.try_extend(vec![5].into_iter()).is_ok()); + assert_eq!(*b, vec![1, 2, 3, 4, 5]); + + assert!(b.try_extend(vec![6].into_iter()).is_err()); + assert_eq!(*b, vec![1, 2, 3, 4, 5]); + + let mut b: BoundedVec> = bounded_vec![1, 2, 3]; + assert!(b.try_extend(vec![4, 5].into_iter()).is_ok()); + assert_eq!(*b, vec![1, 2, 3, 4, 5]); + + let mut b: BoundedVec> = bounded_vec![1, 2, 3]; + assert!(b.try_extend(vec![4, 5, 6].into_iter()).is_err()); + assert_eq!(*b, vec![1, 2, 3]); + } + + #[test] + fn test_serializer() { + let c: BoundedVec> = bounded_vec![0, 1, 2]; + assert_eq!(serde_json::json!(&c).to_string(), r#"[0,1,2]"#); + } + + #[test] + fn test_deserializer() { + let c: BoundedVec> = serde_json::from_str(r#"[0,1,2]"#).unwrap(); + + assert_eq!(c.len(), 3); + assert_eq!(c[0], 0); + assert_eq!(c[1], 1); + assert_eq!(c[2], 2); + } + + #[test] + fn test_deserializer_bound() { + let c: BoundedVec> = serde_json::from_str(r#"[0,1,2]"#).unwrap(); + + assert_eq!(c.len(), 3); + assert_eq!(c[0], 0); + assert_eq!(c[1], 1); + assert_eq!(c[2], 2); + } + + #[test] + fn test_deserializer_failed() { + let c: Result>, serde_json::error::Error> = serde_json::from_str(r#"[0,1,2,3,4]"#); + + match c { + Err(msg) => assert_eq!(msg.to_string(), "out of bounds at line 1 column 11"), + _ => unreachable!("deserializer must raise error"), + } + } + + #[test] + fn bounded_vec_try_from_works() { + assert!(BoundedVec::>::try_from(vec![0]).is_ok()); + assert!(BoundedVec::>::try_from(vec![0, 1]).is_ok()); + assert!(BoundedVec::>::try_from(vec![0, 1, 2]).is_err()); + } + + #[test] + fn bounded_slice_try_from_works() { + assert!(BoundedSlice::>::try_from(&[0][..]).is_ok()); + assert!(BoundedSlice::>::try_from(&[0, 1][..]).is_ok()); + assert!(BoundedSlice::>::try_from(&[0, 1, 2][..]).is_err()); + } + + #[test] + fn can_be_collected() { + let b1: BoundedVec> = bounded_vec![1, 2, 3, 4]; + let b2: BoundedVec> = b1.iter().map(|x| x + 1).try_collect().unwrap(); + assert_eq!(b2, vec![2, 3, 4, 5]); + + // can also be collected into a collection of length 4. + let b2: BoundedVec> = b1.iter().map(|x| x + 1).try_collect().unwrap(); + assert_eq!(b2, vec![2, 3, 4, 5]); + + // can be mutated further into iterators that are `ExactSizedIterator`. + let b2: BoundedVec> = b1.iter().map(|x| x + 1).rev().try_collect().unwrap(); + assert_eq!(b2, vec![5, 4, 3, 2]); + + let b2: BoundedVec> = b1.iter().map(|x| x + 1).rev().skip(2).try_collect().unwrap(); + assert_eq!(b2, vec![3, 2]); + let b2: BoundedVec> = b1.iter().map(|x| x + 1).rev().skip(2).try_collect().unwrap(); + assert_eq!(b2, vec![3, 2]); + + let b2: BoundedVec> = b1.iter().map(|x| x + 1).rev().take(2).try_collect().unwrap(); + assert_eq!(b2, vec![5, 4]); + let b2: BoundedVec> = b1.iter().map(|x| x + 1).rev().take(2).try_collect().unwrap(); + assert_eq!(b2, vec![5, 4]); + + // but these worn't work + let b2: Result>, _> = b1.iter().map(|x| x + 1).try_collect(); + assert!(b2.is_err()); + + let b2: Result>, _> = b1.iter().map(|x| x + 1).rev().take(2).try_collect(); + assert!(b2.is_err()); + } + + #[test] + fn bounded_vec_debug_works() { + let bound = BoundedVec::>::truncate_from(vec![1, 2, 3]); + assert_eq!(format!("{:?}", bound), "BoundedVec([1, 2, 3], 5)"); + } + + #[test] + fn bounded_slice_debug_works() { + let bound = BoundedSlice::>::truncate_from(&[1, 2, 3]); + assert_eq!(format!("{:?}", bound), "BoundedSlice([1, 2, 3], 5)"); + } + + #[test] + fn bounded_vec_sort_by_key_works() { + let mut v: BoundedVec> = bounded_vec![-5, 4, 1, -3, 2]; + // Sort by absolute value. + v.sort_by_key(|k| k.abs()); + assert_eq!(v, vec![1, 2, -3, 4, -5]); + } + + #[test] + fn bounded_vec_truncate_from_works() { + let unbound = vec![1, 2, 3, 4, 5]; + let bound = BoundedVec::>::truncate_from(unbound.clone()); + assert_eq!(bound, vec![1, 2, 3]); + } + + #[test] + fn bounded_slice_truncate_from_works() { + let unbound = [1, 2, 3, 4, 5]; + let bound = BoundedSlice::>::truncate_from(&unbound); + assert_eq!(bound, &[1, 2, 3][..]); + } + + #[test] + fn bounded_slice_partialeq_slice_works() { + let unbound = [1, 2, 3]; + let bound = BoundedSlice::>::truncate_from(&unbound); + + assert_eq!(bound, &unbound[..]); + assert!(bound == &unbound[..]); + } + + #[test] + fn bounded_vec_try_rotate_left_works() { + let o = BoundedVec::>::truncate_from(vec![1, 2, 3]); + let mut bound = o.clone(); + + bound.try_rotate_left(0).unwrap(); + assert_eq!(bound, o); + bound.try_rotate_left(3).unwrap(); + assert_eq!(bound, o); + + bound.try_rotate_left(4).unwrap_err(); + assert_eq!(bound, o); + + bound.try_rotate_left(1).unwrap(); + assert_eq!(bound, vec![2, 3, 1]); + bound.try_rotate_left(2).unwrap(); + assert_eq!(bound, o); + } + + #[test] + fn bounded_vec_try_rotate_right_works() { + let o = BoundedVec::>::truncate_from(vec![1, 2, 3]); + let mut bound = o.clone(); + + bound.try_rotate_right(0).unwrap(); + assert_eq!(bound, o); + bound.try_rotate_right(3).unwrap(); + assert_eq!(bound, o); + + bound.try_rotate_right(4).unwrap_err(); + assert_eq!(bound, o); + + bound.try_rotate_right(1).unwrap(); + assert_eq!(bound, vec![3, 1, 2]); + bound.try_rotate_right(2).unwrap(); + assert_eq!(bound, o); + } + + // Just a test that structs containing `BoundedVec` and `BoundedSlice` can derive `Hash`. (This was broken when + // they were deriving `Hash`). + #[test] + #[cfg(feature = "std")] + fn container_can_derive_hash() { + #[derive(Hash)] + struct Foo<'a> { + bar: u8, + slice: BoundedSlice<'a, usize, ConstU32<4>>, + map: BoundedVec>, + } + let _foo = Foo { bar: 42, slice: BoundedSlice::truncate_from(&[0, 1][..]), map: BoundedVec::default() }; + } + + #[test] + fn is_full_works() { + let mut bounded: BoundedVec> = bounded_vec![1, 2, 3]; + assert!(!bounded.is_full()); + bounded.try_insert(1, 0).unwrap(); + assert_eq!(*bounded, vec![1, 0, 2, 3]); + + assert!(bounded.is_full()); + assert!(bounded.try_insert(0, 9).is_err()); + assert_eq!(*bounded, vec![1, 0, 2, 3]); + } +} diff --git a/bounded-collections/src/const_int.rs b/bounded-collections/src/const_int.rs new file mode 100644 index 000000000..df1f4a16f --- /dev/null +++ b/bounded-collections/src/const_int.rs @@ -0,0 +1,153 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use crate::{Get, TypedGet}; +use core::marker::PhantomData; + +// Numbers which have constant upper and lower bounds. +trait ConstBounded { + const MIN: T; + const MAX: T; +} + +macro_rules! impl_const_bounded { + ($bound:ty, $t:ty) => { + impl ConstBounded<$bound> for $t { + const MIN: $bound = <$t>::MIN as $bound; + const MAX: $bound = <$t>::MAX as $bound; + } + }; +} + +impl_const_bounded!(u128, u8); +impl_const_bounded!(u128, u16); +impl_const_bounded!(u128, u32); +impl_const_bounded!(u128, u64); +impl_const_bounded!(u128, u128); +impl_const_bounded!(u128, usize); + +impl_const_bounded!(i128, i8); +impl_const_bounded!(i128, i16); +impl_const_bounded!(i128, i32); +impl_const_bounded!(i128, i64); +impl_const_bounded!(i128, i128); + +// Check whether a unsigned integer is within the bounds of a type. +struct CheckOverflowU128, const N: u128>(PhantomData); + +impl, const N: u128> CheckOverflowU128 { + const ASSERTION: () = assert!(N >= T::MIN && N <= T::MAX); +} + +// Check whether an integer is within the bounds of a type. +struct CheckOverflowI128, const N: i128>(PhantomData); + +impl, const N: i128> CheckOverflowI128 { + const ASSERTION: () = assert!(N >= T::MIN && N <= T::MAX); +} + +/// Const getter for unsigned integers. +/// +/// # Compile-time checks +/// +/// ```compile_fail +/// # use bounded_collections::{ConstUint, Get}; +/// let _ = as Get>::get(); +/// ``` +#[derive(Default, Clone)] +pub struct ConstUint; + +impl core::fmt::Debug for ConstUint { + fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result { + fmt.write_str(&alloc::format!("ConstUint<{}>", N)) + } +} + +impl TypedGet for ConstUint { + type Type = u128; + fn get() -> u128 { + N + } +} + +/// Const getter for signed integers. +#[derive(Default, Clone)] +pub struct ConstInt; + +impl core::fmt::Debug for ConstInt { + fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result { + fmt.write_str(&alloc::format!("ConstInt<{}>", N)) + } +} + +impl TypedGet for ConstInt { + type Type = i128; + fn get() -> i128 { + N + } +} + +macro_rules! impl_const_int { + ($t:ident, $check:ident, $bound:ty, $target:ty) => { + impl Get<$target> for $t { + fn get() -> $target { + let _ = <$check<$target, N>>::ASSERTION; + N as $target + } + } + impl Get> for $t { + fn get() -> Option<$target> { + let _ = <$check<$target, N>>::ASSERTION; + Some(N as $target) + } + } + }; +} + +impl_const_int!(ConstUint, CheckOverflowU128, u128, u8); +impl_const_int!(ConstUint, CheckOverflowU128, u128, u16); +impl_const_int!(ConstUint, CheckOverflowU128, u128, u32); +impl_const_int!(ConstUint, CheckOverflowU128, u128, u64); +impl_const_int!(ConstUint, CheckOverflowU128, u128, u128); +impl_const_int!(ConstUint, CheckOverflowU128, u128, usize); + +impl_const_int!(ConstInt, CheckOverflowI128, i128, i8); +impl_const_int!(ConstInt, CheckOverflowI128, i128, i16); +impl_const_int!(ConstInt, CheckOverflowI128, i128, i32); +impl_const_int!(ConstInt, CheckOverflowI128, i128, i64); +impl_const_int!(ConstInt, CheckOverflowI128, i128, i128); + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn const_uint_works() { + assert_eq!( as Get>::get(), 42); + assert_eq!( as Get>>::get(), Some(42)); + assert_eq!( as Get>::get(), 42); + assert_eq!( as Get>::get(), 42); + assert_eq!( as Get>::get(), 42); + assert_eq!( as Get>::get(), 42); + assert_eq!( as Get>::get(), 42); + assert_eq!( as TypedGet>::get(), 42); + // compile-time error + // assert_eq!( as Get>::get() as u128, 256); + } + + #[test] + fn const_int_works() { + assert_eq!( as Get>::get(), -42); + assert_eq!( as Get>>::get(), Some(-42)); + assert_eq!( as Get>::get(), -42); + assert_eq!( as Get>::get(), -42); + assert_eq!( as Get>::get(), -42); + assert_eq!( as Get>::get(), -42); + assert_eq!( as TypedGet>::get(), -42); + } +} diff --git a/bounded-collections/src/lib.rs b/bounded-collections/src/lib.rs new file mode 100644 index 000000000..c7d5d1f7d --- /dev/null +++ b/bounded-collections/src/lib.rs @@ -0,0 +1,277 @@ +// Copyright 2023 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Collection types that have an upper limit on how many elements that they can contain, and +//! supporting traits that aid in defining the limit. + +#![cfg_attr(not(feature = "std"), no_std)] + +pub extern crate alloc; + +pub mod bounded_btree_map; +pub mod bounded_btree_set; +pub mod bounded_vec; +pub mod const_int; +pub mod weak_bounded_vec; + +mod test; + +pub use bounded_btree_map::BoundedBTreeMap; +pub use bounded_btree_set::BoundedBTreeSet; +pub use bounded_vec::{BoundedSlice, BoundedVec}; +pub use const_int::{ConstInt, ConstUint}; +pub use weak_bounded_vec::WeakBoundedVec; + +/// A trait for querying a single value from a type defined in the trait. +/// +/// It is not required that the value is constant. +pub trait TypedGet { + /// The type which is returned. + type Type; + /// Return the current value. + fn get() -> Self::Type; +} + +/// A trait for querying a single value from a type. +/// +/// It is not required that the value is constant. +pub trait Get { + /// Return the current value. + fn get() -> T; +} + +impl Get for () { + fn get() -> T { + T::default() + } +} + +/// Implement Get by returning Default for any type that implements Default. +pub struct GetDefault; +impl Get for GetDefault { + fn get() -> T { + T::default() + } +} + +macro_rules! impl_const_get { + ($name:ident, $t:ty) => { + /// Const getter for a basic type. + #[derive(Default, Clone)] + pub struct $name; + + #[cfg(feature = "std")] + impl core::fmt::Debug for $name { + fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result { + fmt.write_str(&format!("{}<{}>", stringify!($name), T)) + } + } + #[cfg(not(feature = "std"))] + impl core::fmt::Debug for $name { + fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result { + fmt.write_str("") + } + } + impl Get<$t> for $name { + fn get() -> $t { + T + } + } + impl Get> for $name { + fn get() -> Option<$t> { + Some(T) + } + } + impl TypedGet for $name { + type Type = $t; + fn get() -> $t { + T + } + } + }; +} + +impl_const_get!(ConstBool, bool); +impl_const_get!(ConstU8, u8); +impl_const_get!(ConstU16, u16); +impl_const_get!(ConstU32, u32); +impl_const_get!(ConstU64, u64); +impl_const_get!(ConstU128, u128); +impl_const_get!(ConstI8, i8); +impl_const_get!(ConstI16, i16); +impl_const_get!(ConstI32, i32); +impl_const_get!(ConstI64, i64); +impl_const_get!(ConstI128, i128); + +/// Try and collect into a collection `C`. +pub trait TryCollect { + /// The error type that gets returned when a collection can't be made from `self`. + type Error; + /// Consume self and try to collect the results into `C`. + /// + /// This is useful in preventing the undesirable `.collect().try_into()` call chain on + /// collections that need to be converted into a bounded type (e.g. `BoundedVec`). + fn try_collect(self) -> Result; +} + +/// Create new implementations of the [`Get`](crate::Get) trait. +/// +/// The so-called parameter type can be created in four different ways: +/// +/// - Using `const` to create a parameter type that provides a `const` getter. It is required that +/// the `value` is const. +/// +/// - Declare the parameter type without `const` to have more freedom when creating the value. +/// +/// NOTE: A more substantial version of this macro is available in `frame_support` crate which +/// allows mutable and persistant variants. +/// +/// # Examples +/// +/// ``` +/// # use bounded_collections::Get; +/// # use bounded_collections::parameter_types; +/// // This function cannot be used in a const context. +/// fn non_const_expression() -> u64 { 99 } +/// +/// const FIXED_VALUE: u64 = 10; +/// parameter_types! { +/// pub const Argument: u64 = 42 + FIXED_VALUE; +/// /// Visibility of the type is optional +/// OtherArgument: u64 = non_const_expression(); +/// } +/// +/// trait Config { +/// type Parameter: Get; +/// type OtherParameter: Get; +/// } +/// +/// struct Runtime; +/// impl Config for Runtime { +/// type Parameter = Argument; +/// type OtherParameter = OtherArgument; +/// } +/// ``` +/// +/// # Invalid example: +/// +/// ```compile_fail +/// # use bounded_collections::Get; +/// # use bounded_collections::parameter_types; +/// // This function cannot be used in a const context. +/// fn non_const_expression() -> u64 { 99 } +/// +/// parameter_types! { +/// pub const Argument: u64 = non_const_expression(); +/// } +/// ``` +#[macro_export] +macro_rules! parameter_types { + ( + $( #[ $attr:meta ] )* + $vis:vis const $name:ident: $type:ty = $value:expr; + $( $rest:tt )* + ) => ( + $( #[ $attr ] )* + $vis struct $name; + $crate::parameter_types!(@IMPL_CONST $name , $type , $value); + $crate::parameter_types!( $( $rest )* ); + ); + ( + $( #[ $attr:meta ] )* + $vis:vis $name:ident: $type:ty = $value:expr; + $( $rest:tt )* + ) => ( + $( #[ $attr ] )* + $vis struct $name; + $crate::parameter_types!(@IMPL $name, $type, $value); + $crate::parameter_types!( $( $rest )* ); + ); + () => (); + (@IMPL_CONST $name:ident, $type:ty, $value:expr) => { + impl $name { + /// Returns the value of this parameter type. + pub const fn get() -> $type { + $value + } + } + + impl> $crate::Get for $name { + fn get() -> I { + I::from(Self::get()) + } + } + + impl $crate::TypedGet for $name { + type Type = $type; + fn get() -> $type { + Self::get() + } + } + }; + (@IMPL $name:ident, $type:ty, $value:expr) => { + impl $name { + /// Returns the value of this parameter type. + pub fn get() -> $type { + $value + } + } + + impl> $crate::Get for $name { + fn get() -> I { + I::from(Self::get()) + } + } + + impl $crate::TypedGet for $name { + type Type = $type; + fn get() -> $type { + Self::get() + } + } + }; +} + +/// Build a bounded vec from the given literals. +/// +/// The type of the outcome must be known. +/// +/// Will not handle any errors and just panic if the given literals cannot fit in the corresponding +/// bounded vec type. Thus, this is only suitable for testing and non-consensus code. +#[macro_export] +#[cfg(feature = "std")] +macro_rules! bounded_vec { + ($ ($values:expr),* $(,)?) => { + { + $crate::alloc::vec![$($values),*].try_into().unwrap() + } + }; + ( $value:expr ; $repetition:expr ) => { + { + $crate::alloc::vec![$value ; $repetition].try_into().unwrap() + } + } +} + +/// Build a bounded btree-map from the given literals. +/// +/// The type of the outcome must be known. +/// +/// Will not handle any errors and just panic if the given literals cannot fit in the corresponding +/// bounded vec type. Thus, this is only suitable for testing and non-consensus code. +#[macro_export] +#[cfg(feature = "std")] +macro_rules! bounded_btree_map { + ($ ( $key:expr => $value:expr ),* $(,)?) => { + { + $crate::TryCollect::<$crate::BoundedBTreeMap<_, _, _>>::try_collect( + $crate::alloc::vec![$(($key, $value)),*].into_iter() + ).unwrap() + } + }; +} diff --git a/bounded-collections/src/test.rs b/bounded-collections/src/test.rs new file mode 100644 index 000000000..285ad37e8 --- /dev/null +++ b/bounded-collections/src/test.rs @@ -0,0 +1,50 @@ +// Copyright 2023 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Tests for the `bounded-collections` crate. + +#![cfg(test)] + +use crate::*; +use core::fmt::Debug; + +#[test] +#[allow(path_statements)] +fn const_impl_default_clone_debug() { + struct ImplsDefault(T); + + ImplsDefault::>; + ImplsDefault::>; + ImplsDefault::>; + ImplsDefault::>; + ImplsDefault::>; + ImplsDefault::>; + ImplsDefault::>; + ImplsDefault::>; + ImplsDefault::>; + ImplsDefault::>; + ImplsDefault::>; + ImplsDefault::>; +} + +#[test] +#[cfg(feature = "std")] +fn const_debug_fmt() { + assert_eq!(format!("{:?}", ConstBool:: {}), "ConstBool"); + assert_eq!(format!("{:?}", ConstBool:: {}), "ConstBool"); + assert_eq!(format!("{:?}", ConstU8::<255> {}), "ConstU8<255>"); + assert_eq!(format!("{:?}", ConstU16::<50> {}), "ConstU16<50>"); + assert_eq!(format!("{:?}", ConstU32::<10> {}), "ConstU32<10>"); + assert_eq!(format!("{:?}", ConstU64::<99> {}), "ConstU64<99>"); + assert_eq!(format!("{:?}", ConstU128::<100> {}), "ConstU128<100>"); + assert_eq!(format!("{:?}", ConstI8::<-127> {}), "ConstI8<-127>"); + assert_eq!(format!("{:?}", ConstI16::<-50> {}), "ConstI16<-50>"); + assert_eq!(format!("{:?}", ConstI32::<-10> {}), "ConstI32<-10>"); + assert_eq!(format!("{:?}", ConstI64::<-99> {}), "ConstI64<-99>"); + assert_eq!(format!("{:?}", ConstI128::<-100> {}), "ConstI128<-100>"); +} diff --git a/bounded-collections/src/weak_bounded_vec.rs b/bounded-collections/src/weak_bounded_vec.rs new file mode 100644 index 000000000..b6f0846e9 --- /dev/null +++ b/bounded-collections/src/weak_bounded_vec.rs @@ -0,0 +1,537 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2023 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits, types and structs to support putting a bounded vector into storage, as a raw value, map +//! or a double map. + +use super::{BoundedSlice, BoundedVec}; +use crate::Get; +use alloc::vec::Vec; +use codec::{Decode, Encode, MaxEncodedLen}; +use core::{ + marker::PhantomData, + ops::{Deref, Index, IndexMut}, + slice::SliceIndex, +}; +#[cfg(feature = "serde")] +use serde::{ + de::{Error, SeqAccess, Visitor}, + Deserialize, Deserializer, Serialize, +}; + +/// A weakly bounded vector. +/// +/// It has implementations for efficient append and length decoding, as with a normal `Vec<_>`, once +/// put into storage as a raw value, map or double-map. +/// +/// The length of the vec is not strictly bounded. Decoding a vec with more element that the bound +/// is accepted, and some method allow to bypass the restriction with warnings. +#[cfg_attr(feature = "serde", derive(Serialize), serde(transparent))] +#[derive(Encode, scale_info::TypeInfo)] +#[scale_info(skip_type_params(S))] +pub struct WeakBoundedVec( + pub(super) Vec, + #[cfg_attr(feature = "serde", serde(skip_serializing))] PhantomData, +); + +#[cfg(feature = "serde")] +impl<'de, T, S: Get> Deserialize<'de> for WeakBoundedVec +where + T: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct VecVisitor>(PhantomData<(T, S)>); + + impl<'de, T, S: Get> Visitor<'de> for VecVisitor + where + T: Deserialize<'de>, + { + type Value = Vec; + + fn expecting(&self, formatter: &mut alloc::fmt::Formatter) -> alloc::fmt::Result { + formatter.write_str("a sequence") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let size = seq.size_hint().unwrap_or(0); + let max = match usize::try_from(S::get()) { + Ok(n) => n, + Err(_) => return Err(A::Error::custom("can't convert to usize")), + }; + if size > max { + log::warn!( + target: "runtime", + "length of a bounded vector while deserializing is not respected.", + ); + } + let mut values = Vec::with_capacity(size); + + while let Some(value) = seq.next_element()? { + values.push(value); + if values.len() > max { + log::warn!( + target: "runtime", + "length of a bounded vector while deserializing is not respected.", + ); + } + } + + Ok(values) + } + } + + let visitor: VecVisitor = VecVisitor(PhantomData); + deserializer + .deserialize_seq(visitor) + .map(|v| WeakBoundedVec::::try_from(v).map_err(|_| Error::custom("out of bounds")))? + } +} + +impl> Decode for WeakBoundedVec { + fn decode(input: &mut I) -> Result { + let inner = Vec::::decode(input)?; + Ok(Self::force_from(inner, Some("decode"))) + } + + fn skip(input: &mut I) -> Result<(), codec::Error> { + Vec::::skip(input) + } +} + +impl WeakBoundedVec { + /// Create `Self` from `t` without any checks. + fn unchecked_from(t: Vec) -> Self { + Self(t, Default::default()) + } + + /// Consume self, and return the inner `Vec`. Henceforth, the `Vec<_>` can be altered in an + /// arbitrary way. At some point, if the reverse conversion is required, `TryFrom>` can + /// be used. + /// + /// This is useful for cases if you need access to an internal API of the inner `Vec<_>` which + /// is not provided by the wrapper `WeakBoundedVec`. + pub fn into_inner(self) -> Vec { + self.0 + } + + /// Exactly the same semantics as [`Vec::remove`]. + /// + /// # Panics + /// + /// Panics if `index` is out of bounds. + pub fn remove(&mut self, index: usize) -> T { + self.0.remove(index) + } + + /// Exactly the same semantics as [`Vec::swap_remove`]. + /// + /// # Panics + /// + /// Panics if `index` is out of bounds. + pub fn swap_remove(&mut self, index: usize) -> T { + self.0.swap_remove(index) + } + + /// Exactly the same semantics as [`Vec::retain`]. + pub fn retain bool>(&mut self, f: F) { + self.0.retain(f) + } + + /// Exactly the same semantics as [`slice::get_mut`]. + pub fn get_mut>(&mut self, index: I) -> Option<&mut >::Output> { + self.0.get_mut(index) + } +} + +impl> WeakBoundedVec { + /// Get the bound of the type in `usize`. + pub fn bound() -> usize { + S::get() as usize + } + + /// Create `Self` from `t` without any checks. Logs warnings if the bound is not being + /// respected. The additional scope can be used to indicate where a potential overflow is + /// happening. + pub fn force_from(t: Vec, scope: Option<&'static str>) -> Self { + if t.len() > Self::bound() { + log::warn!( + target: "runtime", + "length of a bounded vector in scope {} is not respected.", + scope.unwrap_or("UNKNOWN"), + ); + } + + Self::unchecked_from(t) + } + + /// Consumes self and mutates self via the given `mutate` function. + /// + /// If the outcome of mutation is within bounds, `Some(Self)` is returned. Else, `None` is + /// returned. + /// + /// This is essentially a *consuming* shorthand [`Self::into_inner`] -> `...` -> + /// [`Self::try_from`]. + pub fn try_mutate(mut self, mut mutate: impl FnMut(&mut Vec)) -> Option { + mutate(&mut self.0); + (self.0.len() <= Self::bound()).then(move || self) + } + + /// Exactly the same semantics as [`Vec::insert`], but returns an `Err` (and is a noop) if the + /// new length of the vector exceeds `S`. + /// + /// # Panics + /// + /// Panics if `index > len`. + pub fn try_insert(&mut self, index: usize, element: T) -> Result<(), ()> { + if self.len() < Self::bound() { + self.0.insert(index, element); + Ok(()) + } else { + Err(()) + } + } + + /// Exactly the same semantics as [`Vec::push`], but returns an `Err` (and is a noop) if the + /// new length of the vector exceeds `S`. + /// + /// # Panics + /// + /// Panics if the new capacity exceeds isize::MAX bytes. + pub fn try_push(&mut self, element: T) -> Result<(), ()> { + if self.len() < Self::bound() { + self.0.push(element); + Ok(()) + } else { + Err(()) + } + } + + /// Returns true if this collection is full. + pub fn is_full(&self) -> bool { + self.len() >= Self::bound() + } +} + +impl Default for WeakBoundedVec { + fn default() -> Self { + // the bound cannot be below 0, which is satisfied by an empty vector + Self::unchecked_from(Vec::default()) + } +} + +impl core::fmt::Debug for WeakBoundedVec +where + Vec: core::fmt::Debug, + S: Get, +{ + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_tuple("WeakBoundedVec").field(&self.0).field(&Self::bound()).finish() + } +} + +impl Clone for WeakBoundedVec +where + T: Clone, +{ + fn clone(&self) -> Self { + // bound is retained + Self::unchecked_from(self.0.clone()) + } +} + +impl> TryFrom> for WeakBoundedVec { + type Error = (); + fn try_from(t: Vec) -> Result { + if t.len() <= Self::bound() { + // explicit check just above + Ok(Self::unchecked_from(t)) + } else { + Err(()) + } + } +} + +// It is okay to give a non-mutable reference of the inner vec to anyone. +impl AsRef> for WeakBoundedVec { + fn as_ref(&self) -> &Vec { + &self.0 + } +} + +impl AsRef<[T]> for WeakBoundedVec { + fn as_ref(&self) -> &[T] { + &self.0 + } +} + +impl AsMut<[T]> for WeakBoundedVec { + fn as_mut(&mut self) -> &mut [T] { + &mut self.0 + } +} + +// will allow for immutable all operations of `Vec` on `WeakBoundedVec`. +impl Deref for WeakBoundedVec { + type Target = Vec; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +// Allows for indexing similar to a normal `Vec`. Can panic if out of bound. +impl Index for WeakBoundedVec +where + I: SliceIndex<[T]>, +{ + type Output = I::Output; + + #[inline] + fn index(&self, index: I) -> &Self::Output { + self.0.index(index) + } +} + +impl IndexMut for WeakBoundedVec +where + I: SliceIndex<[T]>, +{ + #[inline] + fn index_mut(&mut self, index: I) -> &mut Self::Output { + self.0.index_mut(index) + } +} + +impl core::iter::IntoIterator for WeakBoundedVec { + type Item = T; + type IntoIter = alloc::vec::IntoIter; + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl<'a, T, S> core::iter::IntoIterator for &'a WeakBoundedVec { + type Item = &'a T; + type IntoIter = core::slice::Iter<'a, T>; + fn into_iter(self) -> Self::IntoIter { + self.0.iter() + } +} + +impl<'a, T, S> core::iter::IntoIterator for &'a mut WeakBoundedVec { + type Item = &'a mut T; + type IntoIter = core::slice::IterMut<'a, T>; + fn into_iter(self) -> Self::IntoIter { + self.0.iter_mut() + } +} + +impl codec::DecodeLength for WeakBoundedVec { + fn len(self_encoded: &[u8]) -> Result { + // `WeakBoundedVec` stored just a `Vec`, thus the length is at the beginning in + // `Compact` form, and same implementation as `Vec` can be used. + as codec::DecodeLength>::len(self_encoded) + } +} + +impl PartialEq> for WeakBoundedVec +where + T: PartialEq, + BoundSelf: Get, + BoundRhs: Get, +{ + fn eq(&self, rhs: &WeakBoundedVec) -> bool { + self.0 == rhs.0 + } +} + +impl PartialEq> for WeakBoundedVec +where + T: PartialEq, + BoundSelf: Get, + BoundRhs: Get, +{ + fn eq(&self, rhs: &BoundedVec) -> bool { + self.0 == rhs.0 + } +} + +impl<'a, T, BoundSelf, BoundRhs> PartialEq> for WeakBoundedVec +where + T: PartialEq, + BoundSelf: Get, + BoundRhs: Get, +{ + fn eq(&self, rhs: &BoundedSlice<'a, T, BoundRhs>) -> bool { + self.0 == rhs.0 + } +} + +impl> PartialEq> for WeakBoundedVec { + fn eq(&self, other: &Vec) -> bool { + &self.0 == other + } +} + +impl> Eq for WeakBoundedVec where T: Eq {} + +impl PartialOrd> for WeakBoundedVec +where + T: PartialOrd, + BoundSelf: Get, + BoundRhs: Get, +{ + fn partial_cmp(&self, other: &WeakBoundedVec) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl PartialOrd> for WeakBoundedVec +where + T: PartialOrd, + BoundSelf: Get, + BoundRhs: Get, +{ + fn partial_cmp(&self, other: &BoundedVec) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl<'a, T, BoundSelf, BoundRhs> PartialOrd> for WeakBoundedVec +where + T: PartialOrd, + BoundSelf: Get, + BoundRhs: Get, +{ + fn partial_cmp(&self, other: &BoundedSlice<'a, T, BoundRhs>) -> Option { + (&*self.0).partial_cmp(other.0) + } +} + +impl> Ord for WeakBoundedVec { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.0.cmp(&other.0) + } +} + +impl MaxEncodedLen for WeakBoundedVec +where + T: MaxEncodedLen, + S: Get, + WeakBoundedVec: Encode, +{ + fn max_encoded_len() -> usize { + // WeakBoundedVec encodes like Vec which encodes like [T], which is a compact u32 + // plus each item in the slice: + // See: https://docs.substrate.io/reference/scale-codec/ + codec::Compact(S::get()) + .encoded_size() + .saturating_add(Self::bound().saturating_mul(T::max_encoded_len())) + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::ConstU32; + use alloc::vec; + + #[test] + fn bound_returns_correct_value() { + assert_eq!(WeakBoundedVec::>::bound(), 7); + } + + #[test] + fn try_insert_works() { + let mut bounded: WeakBoundedVec> = vec![1, 2, 3].try_into().unwrap(); + bounded.try_insert(1, 0).unwrap(); + assert_eq!(*bounded, vec![1, 0, 2, 3]); + + assert!(bounded.try_insert(0, 9).is_err()); + assert_eq!(*bounded, vec![1, 0, 2, 3]); + } + + #[test] + #[should_panic(expected = "insertion index (is 9) should be <= len (is 3)")] + fn try_inert_panics_if_oob() { + let mut bounded: WeakBoundedVec> = vec![1, 2, 3].try_into().unwrap(); + bounded.try_insert(9, 0).unwrap(); + } + + #[test] + fn try_push_works() { + let mut bounded: WeakBoundedVec> = vec![1, 2, 3].try_into().unwrap(); + bounded.try_push(0).unwrap(); + assert_eq!(*bounded, vec![1, 2, 3, 0]); + + assert!(bounded.try_push(9).is_err()); + } + + #[test] + fn deref_coercion_works() { + let bounded: WeakBoundedVec> = vec![1, 2, 3].try_into().unwrap(); + // these methods come from deref-ed vec. + assert_eq!(bounded.len(), 3); + assert!(bounded.iter().next().is_some()); + assert!(!bounded.is_empty()); + } + + #[test] + fn try_mutate_works() { + let bounded: WeakBoundedVec> = vec![1, 2, 3, 4, 5, 6].try_into().unwrap(); + let bounded = bounded.try_mutate(|v| v.push(7)).unwrap(); + assert_eq!(bounded.len(), 7); + assert!(bounded.try_mutate(|v| v.push(8)).is_none()); + } + + #[test] + fn slice_indexing_works() { + let bounded: WeakBoundedVec> = vec![1, 2, 3, 4, 5, 6].try_into().unwrap(); + assert_eq!(&bounded[0..=2], &[1, 2, 3]); + } + + #[test] + fn vec_eq_works() { + let bounded: WeakBoundedVec> = vec![1, 2, 3, 4, 5, 6].try_into().unwrap(); + assert_eq!(bounded, vec![1, 2, 3, 4, 5, 6]); + } + + #[test] + fn too_big_succeed_to_decode() { + let v: Vec = vec![1, 2, 3, 4, 5]; + let w = WeakBoundedVec::>::decode(&mut &v.encode()[..]).unwrap(); + assert_eq!(v, *w); + } + + #[test] + fn is_full_works() { + let mut bounded: WeakBoundedVec> = vec![1, 2, 3].try_into().unwrap(); + assert!(!bounded.is_full()); + bounded.try_insert(1, 0).unwrap(); + assert_eq!(*bounded, vec![1, 0, 2, 3]); + + assert!(bounded.is_full()); + assert!(bounded.try_insert(0, 9).is_err()); + assert_eq!(*bounded, vec![1, 0, 2, 3]); + } +} diff --git a/contract-address/Cargo.toml b/contract-address/Cargo.toml deleted file mode 100644 index 9ba26c51d..000000000 --- a/contract-address/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "contract-address" -version = "0.2.0" -authors = ["Parity Technologies "] -license = "MIT" -homepage = "https://github.com/paritytech/parity-common" -repository = "https://github.com/paritytech/parity-common" -description = "A utility crate to create an ethereum contract address" -documentation = "https://docs.rs/contract-address/" -edition = "2018" -readme = "README.md" - -[dependencies] -ethereum-types = { version = "0.7", path = "../ethereum-types" } -rlp = { version = "0.4", path = "../rlp" } -keccak-hash = { version = "0.3", path = "../keccak-hash", default-features = false } - -[features] -default = [] -# this uses a nightly-only feature -# to embed REAMDE.md into lib.rs module docs -external_doc = [] - -[package.metadata.docs.rs] -# docs.rs builds the docs with nightly rust -features = ["external_doc"] diff --git a/contract-address/README.md b/contract-address/README.md deleted file mode 100644 index 8bf029d0b..000000000 --- a/contract-address/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# Contract address - -Provides a function to create an ethereum contract address. - -## Examples - -Create an ethereum address from sender and nonce. - -```rust -use contract_address::{ - Address, U256, ContractAddress -}; -use std::str::FromStr; - -let sender = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); -let contract_address = ContractAddress::from_sender_and_nonce(&sender, &U256::zero()); -``` diff --git a/contract-address/src/lib.rs b/contract-address/src/lib.rs deleted file mode 100644 index ea4050316..000000000 --- a/contract-address/src/lib.rs +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. -// This file is part of Parity Ethereum. - -// Parity Ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Ethereum. If not, see . - -#![cfg_attr(feature = "external_doc", feature(external_doc))] -#![cfg_attr(feature = "external_doc", doc(include = "../README.md"))] - -pub use ethereum_types::{Address, H256, U256}; -use keccak_hash::keccak; -use rlp::RlpStream; -use std::ops::Deref; - -/// Represents an ethereum contract address -#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] -pub struct ContractAddress(Address); - -impl ContractAddress { - /// Computes the address of a contract from the sender's address and the transaction nonce - pub fn from_sender_and_nonce(sender: &Address, nonce: &U256) -> Self { - let mut stream = RlpStream::new_list(2); - stream.append(sender); - stream.append(nonce); - - ContractAddress(Address::from(keccak(stream.as_raw()))) - } - - /// Computes the address of a contract from the sender's address, the salt and code hash - /// - /// pWASM `create2` scheme and EIP-1014 CREATE2 scheme - pub fn from_sender_salt_and_code(sender: &Address, salt: H256, code_hash: H256) -> Self { - let mut buffer = [0u8; 1 + 20 + 32 + 32]; - buffer[0] = 0xff; - &mut buffer[1..(1 + 20)].copy_from_slice(&sender[..]); - &mut buffer[(1 + 20)..(1 + 20 + 32)].copy_from_slice(&salt[..]); - &mut buffer[(1 + 20 + 32)..].copy_from_slice(&code_hash[..]); - - ContractAddress(Address::from(keccak(&buffer[..]))) - } - - /// Computes the address of a contract from the sender's address and the code hash - /// - /// Used by pwasm create ext. - pub fn from_sender_and_code(sender: &Address, code_hash: H256) -> Self { - let mut buffer = [0u8; 20 + 32]; - &mut buffer[..20].copy_from_slice(&sender[..]); - &mut buffer[20..].copy_from_slice(&code_hash[..]); - - ContractAddress(Address::from(keccak(&buffer[..]))) - } -} - -impl Deref for ContractAddress { - type Target = Address; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl From for Address { - fn from(contract_address: ContractAddress) -> Self { - contract_address.0 - } -} - -#[cfg(test)] -mod tests { - use super::*; - use std::str::FromStr; - - #[test] - fn test_from_sender_and_nonce() { - let sender = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); - let expected = Address::from_str("3f09c73a5ed19289fb9bdc72f1742566df146f56").unwrap(); - - let actual = ContractAddress::from_sender_and_nonce(&sender, &U256::from(88)); - - assert_eq!(Address::from(actual), expected); - } - - #[test] - fn test_from_sender_salt_and_code_hash() { - let sender = Address::zero(); - let code_hash = - H256::from_str("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470") - .unwrap(); - let expected_address = - Address::from_str("e33c0c7f7df4809055c3eba6c09cfe4baf1bd9e0").unwrap(); - - let contract_address = - ContractAddress::from_sender_salt_and_code(&sender, H256::zero(), code_hash); - - assert_eq!(Address::from(contract_address), expected_address); - } - - #[test] - fn test_from_sender_and_code_hash() { - let sender = Address::from_str("0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d").unwrap(); - let code_hash = - H256::from_str("d98f2e8134922f73748703c8e7084d42f13d2fa1439936ef5a3abcf5646fe83f") - .unwrap(); - let expected_address = - Address::from_str("064417880f5680b141ed7fcac031aad40df080b0").unwrap(); - - let contract_address = ContractAddress::from_sender_and_code(&sender, code_hash); - - assert_eq!(Address::from(contract_address), expected_address); - } -} diff --git a/ethbloom/CHANGELOG.md b/ethbloom/CHANGELOG.md new file mode 100644 index 000000000..a9a5f415e --- /dev/null +++ b/ethbloom/CHANGELOG.md @@ -0,0 +1,50 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.14.1] - 2024-09-12 +- Updated `impl-serde` to 0.5. [#859](https://github.com/paritytech/parity-common/pull/859) +- Updated `impl-codec` to 0.7. [#860](https://github.com/paritytech/parity-common/pull/860) + +## [0.13.0] - 2022-09-20 +- Updated `fixed-hash` to 0.8. [#680](https://github.com/paritytech/parity-common/pull/680) + +## [0.12.1] - 2022-02-07 +- Updated `scale-info` to ">=1.0, <3". [#627](https://github.com/paritytech/parity-common/pull/627) + +## [0.12.0] - 2022-02-04 +### Breaking +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Updated `impl-codec` to 0.6. [#623](https://github.com/paritytech/parity-common/pull/623) + +## [0.11.1] - 2021-09-30 +- Combined `scale-info` feature into `codec`. [#593](https://github.com/paritytech/parity-common/pull/593) + +## [0.11.0] - 2021-01-27 +### Breaking +- Updated `impl-codec` to 0.5. [#510](https://github.com/paritytech/parity-common/pull/510) + +### Potentially-breaking +- `serialize` feature no longer pulls `std`. [#503](https://github.com/paritytech/parity-common/pull/503) + +## [0.10.0] - 2021-01-05 +### Breaking +- Updated `rlp` to 0.5. [#463](https://github.com/paritytech/parity-common/pull/463) + +## [0.9.2] - 2020-05-18 +- Added `codec` feature. [#393](https://github.com/paritytech/parity-common/pull/393) + +## [0.9.1] - 2020-04-27 +- Added `arbitrary` feature. [#378](https://github.com/paritytech/parity-common/pull/378) + +## [0.9.0] - 2020-03-16 +- Removed `libc` feature. [#317](https://github.com/paritytech/parity-common/pull/317) +- License changed from MIT to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) + +## [0.8.1] - 2019-10-24 +### Dependencies +- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index 5e3347991..9f858c889 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -1,32 +1,37 @@ [package] name = "ethbloom" -version = "0.7.0" +version = "0.14.1" authors = ["Parity Technologies "] description = "Ethereum bloom filter" -license = "MIT" +license = "MIT OR Apache-2.0" documentation = "https://docs.rs/ethbloom" homepage = "https://github.com/paritytech/parity-common" repository = "https://github.com/paritytech/parity-common" -edition = "2018" +edition = "2021" +rust-version = "1.56.1" [dependencies] -tiny-keccak = "1.5" -crunchy = { version = "0.2", default-features = false, features = ["limit_256"] } -fixed-hash = { path = "../fixed-hash", version = "0.4", default-features = false } -impl-serde = { path = "../primitive-types/impls/serde", version = "0.2", default-features = false, optional = true } -impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.2", default-features = false } +tiny-keccak = { version = "2.0", features = ["keccak"] } +crunchy = { version = "0.2.2", default-features = false, features = ["limit_256"] } +fixed-hash = { path = "../fixed-hash", version = "0.8", default-features = false } +impl-serde = { path = "../primitive-types/impls/serde", version = "0.5", default-features = false, optional = true } +impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.4", default-features = false, optional = true } +impl-codec = { version = "0.7.0", path = "../primitive-types/impls/codec", default-features = false, optional = true } +scale-info = { version = ">=1.0, <3", features = ["derive"], default-features = false, optional = true } [dev-dependencies] -criterion = "0.3" -rand = "0.7" -hex-literal = "0.2" +criterion = "0.5.1" +rand = "0.8.0" +hex-literal = "0.4.1" [features] -default = ["std", "serialize", "libc", "rustc-hex"] +default = ["std", "rlp", "serialize", "rustc-hex"] std = ["fixed-hash/std", "crunchy/std"] -serialize = ["std", "impl-serde"] -libc = ["fixed-hash/libc"] +serialize = ["impl-serde"] rustc-hex = ["fixed-hash/rustc-hex"] +arbitrary = ["fixed-hash/arbitrary"] +rlp = ["impl-rlp"] +codec = ["impl-codec", "scale-info"] [[bench]] name = "bloom" diff --git a/ethbloom/benches/bloom.rs b/ethbloom/benches/bloom.rs index 2231832ed..f3de3b7b0 100644 --- a/ethbloom/benches/bloom.rs +++ b/ethbloom/benches/bloom.rs @@ -1,7 +1,15 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + use criterion::{criterion_group, criterion_main, Criterion}; use ethbloom::{Bloom, Input}; use hex_literal::hex; -use tiny_keccak::keccak256; +use tiny_keccak::{Hasher, Keccak}; fn test_bloom() -> Bloom { use std::str::FromStr; @@ -21,8 +29,17 @@ fn test_bloom() -> Bloom { 00000000000000000000000000000000\ 00000000000000000000000000000000\ 00000000000000000000000000000000\ - 00000000000000000000000000000000" - ).unwrap() + 00000000000000000000000000000000", + ) + .unwrap() +} + +fn keccak256(input: &[u8]) -> [u8; 32] { + let mut out = [0u8; 32]; + let mut keccak256 = Keccak::v256(); + keccak256.update(input); + keccak256.finalize(&mut out); + out } fn test_topic() -> Vec { diff --git a/ethbloom/benches/unrolling.rs b/ethbloom/benches/unrolling.rs index e35a33270..647528eb9 100644 --- a/ethbloom/benches/unrolling.rs +++ b/ethbloom/benches/unrolling.rs @@ -1,3 +1,11 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + use criterion::{criterion_group, criterion_main, Criterion}; use crunchy::unroll; use rand::RngCore; @@ -48,7 +56,7 @@ fn bench_backwards(c: &mut Criterion) { b.iter(|| { let other_data = random_data(); for i in 0..255 { - data[255-i] |= other_data[255-i]; + data[255 - i] |= other_data[255 - i]; } }); }); diff --git a/ethbloom/src/lib.rs b/ethbloom/src/lib.rs index ec6514c18..4f22f3247 100644 --- a/ethbloom/src/lib.rs +++ b/ethbloom/src/lib.rs @@ -1,73 +1,85 @@ -//! -//! ```rust -//! extern crate ethbloom; -//! #[macro_use] extern crate hex_literal; +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! ``` +//! use hex_literal::hex; //! use ethbloom::{Bloom, Input}; //! -//! fn main() { -//! use std::str::FromStr; -//! let bloom = Bloom::from_str( -//! "00000000000000000000000000000000\ -//! 00000000100000000000000000000000\ -//! 00000000000000000000000000000000\ -//! 00000000000000000000000000000000\ -//! 00000000000000000000000000000000\ -//! 00000000000000000000000000000000\ -//! 00000002020000000000000000000000\ -//! 00000000000000000000000800000000\ -//! 10000000000000000000000000000000\ -//! 00000000000000000000001000000000\ -//! 00000000000000000000000000000000\ -//! 00000000000000000000000000000000\ -//! 00000000000000000000000000000000\ -//! 00000000000000000000000000000000\ -//! 00000000000000000000000000000000\ -//! 00000000000000000000000000000000" -//! ).unwrap(); -//! let address = hex!("ef2d6d194084c2de36e0dabfce45d046b37d1106"); -//! let topic = hex!("02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc"); +//! use std::str::FromStr; +//! let bloom = Bloom::from_str( +//! "00000000000000000000000000000000\ +//! 00000000100000000000000000000000\ +//! 00000000000000000000000000000000\ +//! 00000000000000000000000000000000\ +//! 00000000000000000000000000000000\ +//! 00000000000000000000000000000000\ +//! 00000002020000000000000000000000\ +//! 00000000000000000000000800000000\ +//! 10000000000000000000000000000000\ +//! 00000000000000000000001000000000\ +//! 00000000000000000000000000000000\ +//! 00000000000000000000000000000000\ +//! 00000000000000000000000000000000\ +//! 00000000000000000000000000000000\ +//! 00000000000000000000000000000000\ +//! 00000000000000000000000000000000" +//! ).unwrap(); +//! let address = hex!("ef2d6d194084c2de36e0dabfce45d046b37d1106"); +//! let topic = hex!("02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc"); //! -//! let mut my_bloom = Bloom::default(); -//! assert!(!my_bloom.contains_input(Input::Raw(&address))); -//! assert!(!my_bloom.contains_input(Input::Raw(&topic))); +//! let mut my_bloom = Bloom::default(); +//! assert!(!my_bloom.contains_input(Input::Raw(&address))); +//! assert!(!my_bloom.contains_input(Input::Raw(&topic))); //! -//! my_bloom.accrue(Input::Raw(&address)); -//! assert!(my_bloom.contains_input(Input::Raw(&address))); -//! assert!(!my_bloom.contains_input(Input::Raw(&topic))); +//! my_bloom.accrue(Input::Raw(&address)); +//! assert!(my_bloom.contains_input(Input::Raw(&address))); +//! assert!(!my_bloom.contains_input(Input::Raw(&topic))); //! -//! my_bloom.accrue(Input::Raw(&topic)); -//! assert!(my_bloom.contains_input(Input::Raw(&address))); -//! assert!(my_bloom.contains_input(Input::Raw(&topic))); -//! assert_eq!(my_bloom, bloom); -//! } +//! my_bloom.accrue(Input::Raw(&topic)); +//! assert!(my_bloom.contains_input(Input::Raw(&address))); +//! assert!(my_bloom.contains_input(Input::Raw(&topic))); +//! assert_eq!(my_bloom, bloom); //! ``` -//! #![cfg_attr(not(feature = "std"), no_std)] -use core::{ops, mem}; +use core::{mem, ops}; use crunchy::unroll; use fixed_hash::*; +#[cfg(feature = "codec")] +use impl_codec::impl_fixed_hash_codec; +#[cfg(feature = "rlp")] +use impl_rlp::impl_fixed_hash_rlp; #[cfg(feature = "serialize")] use impl_serde::impl_fixed_hash_serde; -use impl_rlp::impl_fixed_hash_rlp; -use tiny_keccak::keccak256; +use tiny_keccak::{Hasher, Keccak}; // 3 according to yellowpaper const BLOOM_BITS: u32 = 3; const BLOOM_SIZE: usize = 256; -construct_fixed_hash!{ +construct_fixed_hash! { /// Bloom hash type with 256 bytes (2048 bits) size. + #[cfg_attr(feature = "codec", derive(scale_info::TypeInfo))] pub struct Bloom(BLOOM_SIZE); } +#[cfg(feature = "rlp")] impl_fixed_hash_rlp!(Bloom, BLOOM_SIZE); +#[cfg(feature = "serialize")] +impl_fixed_hash_serde!(Bloom, BLOOM_SIZE); +#[cfg(feature = "codec")] +impl_fixed_hash_codec!(Bloom, BLOOM_SIZE); /// Returns log2. fn log2(x: usize) -> u32 { if x <= 1 { - return 0; + return 0 } let n = x.leading_zeros(); @@ -87,7 +99,13 @@ enum Hash<'a> { impl<'a> From> for Hash<'a> { fn from(input: Input<'a>) -> Self { match input { - Input::Raw(raw) => Hash::Owned(keccak256(raw)), + Input::Raw(raw) => { + let mut out = [0u8; 32]; + let mut keccak256 = Keccak::v256(); + keccak256.update(raw); + keccak256.finalize(&mut out); + Hash::Owned(out) + }, Input::Hash(hash) => Hash::Ref(hash), } } @@ -139,14 +157,17 @@ impl Bloom { self.contains_bloom(&bloom) } - pub fn contains_bloom<'a, B>(&self, bloom: B) -> bool where BloomRef<'a>: From { - let bloom_ref: BloomRef = bloom.into(); + pub fn contains_bloom<'a, B>(&self, bloom: B) -> bool + where + BloomRef<'a>: From, + { + let bloom_ref: BloomRef<'_> = bloom.into(); // workaround for https://github.com/rust-lang/rust/issues/43644 self.contains_bloom_ref(bloom_ref) } - fn contains_bloom_ref(&self, bloom: BloomRef) -> bool { - let self_ref: BloomRef = self.into(); + fn contains_bloom_ref(&self, bloom: BloomRef<'_>) -> bool { + let self_ref: BloomRef<'_> = self.into(); self_ref.contains_bloom(bloom) } @@ -158,7 +179,7 @@ impl Bloom { let mask = bloom_bits - 1; let bloom_bytes = (log2(bloom_bits) + 7) / 8; - let hash: Hash = input.into(); + let hash: Hash<'_> = input.into(); // must be a power of 2 assert_eq!(m & (m - 1), 0); @@ -182,8 +203,11 @@ impl Bloom { } } - pub fn accrue_bloom<'a, B>(&mut self, bloom: B) where BloomRef<'a>: From { - let bloom_ref: BloomRef = bloom.into(); + pub fn accrue_bloom<'a, B>(&mut self, bloom: B) + where + BloomRef<'a>: From, + { + let bloom_ref: BloomRef<'_> = bloom.into(); assert_eq!(self.0.len(), BLOOM_SIZE); assert_eq!(bloom_ref.0.len(), BLOOM_SIZE); for i in 0..BLOOM_SIZE { @@ -212,15 +236,18 @@ impl<'a> BloomRef<'a> { } #[allow(clippy::trivially_copy_pass_by_ref)] - pub fn contains_bloom<'b, B>(&self, bloom: B) -> bool where BloomRef<'b>: From { - let bloom_ref: BloomRef = bloom.into(); + pub fn contains_bloom<'b, B>(&self, bloom: B) -> bool + where + BloomRef<'b>: From, + { + let bloom_ref: BloomRef<'_> = bloom.into(); assert_eq!(self.0.len(), BLOOM_SIZE); assert_eq!(bloom_ref.0.len(), BLOOM_SIZE); for i in 0..BLOOM_SIZE { let a = self.0[i]; let b = bloom_ref.0[i]; if (a & b) != b { - return false; + return false } } true @@ -244,16 +271,14 @@ impl<'a> From<&'a Bloom> for BloomRef<'a> { } } -#[cfg(feature = "serialize")] -impl_fixed_hash_serde!(Bloom, BLOOM_SIZE); - #[cfg(test)] mod tests { + use super::{Bloom, Input}; use core::str::FromStr; use hex_literal::hex; - use super::{Bloom, Input}; #[test] + #[rustfmt::skip] fn it_works() { let bloom = Bloom::from_str( "00000000000000000000000000000000\ @@ -271,7 +296,7 @@ mod tests { 00000000000000000000000000000000\ 00000000000000000000000000000000\ 00000000000000000000000000000000\ - 00000000000000000000000000000000" + 00000000000000000000000000000000", ).unwrap(); let address = hex!("ef2d6d194084c2de36e0dabfce45d046b37d1106"); let topic = hex!("02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc"); diff --git a/ethereum-types/CHANGELOG.md b/ethereum-types/CHANGELOG.md new file mode 100644 index 000000000..33030f4bb --- /dev/null +++ b/ethereum-types/CHANGELOG.md @@ -0,0 +1,63 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.15.1] - 2024-09-12 +- Updated `uint` to 0.10. [#859](https://github.com/paritytech/parity-common/pull/859) + +## [0.14.1] - 2022-11-29 +- Added `if_ethbloom` conditional macro. [#682](https://github.com/paritytech/parity-common/pull/682) + +## [0.14.0] - 2022-09-20 +- Updated `fixed-hash` to 0.8. [#680](https://github.com/paritytech/parity-common/pull/680) +- Updated `primitive-types` to 0.12. [#680](https://github.com/paritytech/parity-common/pull/680) +- Updated `ethbloom` to 0.13. [#680](https://github.com/paritytech/parity-common/pull/680) +- Made `ethbloom` optional. [#625](https://github.com/paritytech/parity-common/pull/625) + +## [0.13.1] - 2022-02-07 +- Updated `scale-info` to ">=1.0, <3". [#627](https://github.com/paritytech/parity-common/pull/627) + +## [0.13.0] - 2022-02-04 +### Breaking +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Updated `impl-codec` to 0.6. [#623](https://github.com/paritytech/parity-common/pull/623) +- Updated `primitive-types` to 0.11. [#623](https://github.com/paritytech/parity-common/pull/623) +- Updated `ethbloom` to 0.12. [#623](https://github.com/paritytech/parity-common/pull/623) + +## [0.12.1] - 2021-09-30 +- Combined `scale-info` feature into `codec`. [#593](https://github.com/paritytech/parity-common/pull/593) + +## [0.12.0] - 2021-07-02 +### Breaking +- Updated `primitive-types` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) + +## [0.11.0] - 2021-01-27 +### Breaking +- Updated `ethbloom` to 0.11. [#510](https://github.com/paritytech/parity-common/pull/510) +- Updated `primitive-types` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) +- Updated `impl-codec` to 0.5. [#510](https://github.com/paritytech/parity-common/pull/510) + +### Potentially-breaking +- `serialize` feature no longer pulls `std`. [#503](https://github.com/paritytech/parity-common/pull/503) + +## [0.10.0] - 2021-01-05 +### Breaking +- Updated `rlp` to 0.5. [#463](https://github.com/paritytech/parity-common/pull/463) +- Updated `uint` to 0.9. [#486](https://github.com/paritytech/parity-common/pull/486) + +## [0.9.2] - 2020-05-18 +- Added `codec` feature. [#393](https://github.com/paritytech/parity-common/pull/393) + +## [0.9.1] - 2020-04-27 +- Added `arbitrary` feature. [#378](https://github.com/paritytech/parity-common/pull/378) + +## [0.9.0] - 2020-03-16 +- License changed from MIT to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) +- Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) + +### Added +- Uint error type is re-exported. [#244](https://github.com/paritytech/parity-common/pull/244) diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index 97dbc2e3b..110d338f1 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -1,24 +1,32 @@ [package] name = "ethereum-types" -version = "0.7.0" +version = "0.15.1" authors = ["Parity Technologies "] -license = "MIT" +license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" description = "Ethereum types" -edition = "2018" +edition = "2021" +rust-version = "1.60.0" [dependencies] -ethbloom = { path = "../ethbloom", version = "0.7", default-features = false } -fixed-hash = { path = "../fixed-hash", version = "0.4", default-features = false, features = ["byteorder", "rustc-hex"] } -uint-crate = { path = "../uint", package = "uint", version = "0.8", default-features = false } -primitive-types = { path = "../primitive-types", version = "0.5", features = ["rlp", "byteorder", "rustc-hex"], default-features = false } -impl-serde = { path = "../primitive-types/impls/serde", version = "0.2", default-features = false, optional = true } -impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.2", default-features = false } +ethbloom = { path = "../ethbloom", version = "0.14", optional = true, default-features = false } +fixed-hash = { path = "../fixed-hash", version = "0.8", default-features = false, features = ["rustc-hex"] } +uint-crate = { path = "../uint", package = "uint", version = "0.10", default-features = false } +primitive-types = { path = "../primitive-types", version = "0.13", features = ["rustc-hex"], default-features = false } +impl-serde = { path = "../primitive-types/impls/serde", version = "0.5.0", default-features = false, optional = true } +impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.4", default-features = false, optional = true } +impl-codec = { version = "0.7.0", path = "../primitive-types/impls/codec", default-features = false, optional = true } +scale-info = { version = ">=1.0, <3", features = ["derive"], default-features = false, optional = true } [dev-dependencies] -serde_json = "1.0" +serde_json = "1.0.41" [features] -default = ["std", "serialize"] -std = ["uint-crate/std", "fixed-hash/std", "ethbloom/std", "primitive-types/std"] -serialize = ["std", "impl-serde", "primitive-types/serde", "ethbloom/serialize"] +default = ["std", "ethbloom", "rlp", "serialize"] +std = ["uint-crate/std", "fixed-hash/std", "ethbloom?/std", "primitive-types/std"] +serialize = ["impl-serde", "primitive-types/serde_no_std", "ethbloom/serialize"] +arbitrary = ["ethbloom/arbitrary", "fixed-hash/arbitrary", "uint-crate/arbitrary"] +rlp = ["impl-rlp", "ethbloom/rlp", "primitive-types/rlp"] +codec = ["impl-codec", "ethbloom/codec", "scale-info", "primitive-types/scale-info"] +num-traits = ["primitive-types/num-traits"] +rand = ["primitive-types/rand"] diff --git a/ethereum-types/src/hash.rs b/ethereum-types/src/hash.rs index 9c04ee576..070124268 100644 --- a/ethereum-types/src/hash.rs +++ b/ethereum-types/src/hash.rs @@ -1,7 +1,18 @@ -use crate::{U64, U128, U256, U512}; +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use crate::{U128, U256, U512, U64}; use fixed_hash::*; +#[cfg(feature = "codec")] +use impl_codec::impl_fixed_hash_codec; +#[cfg(feature = "rlp")] use impl_rlp::impl_fixed_hash_rlp; -#[cfg(feature="serialize")] +#[cfg(feature = "serialize")] use impl_serde::impl_fixed_hash_serde; pub trait BigEndianHash { @@ -11,30 +22,50 @@ pub trait BigEndianHash { fn into_uint(&self) -> Self::Uint; } -construct_fixed_hash!{ pub struct H32(4); } +construct_fixed_hash! { pub struct H32(4); } +#[cfg(feature = "rlp")] impl_fixed_hash_rlp!(H32, 4); -#[cfg(feature = "serialize")] impl_fixed_hash_serde!(H32, 4); - -construct_fixed_hash!{ pub struct H64(8); } +#[cfg(feature = "serialize")] +impl_fixed_hash_serde!(H32, 4); +#[cfg(feature = "codec")] +impl_fixed_hash_codec!(H32, 4); + +construct_fixed_hash! { + #[cfg_attr(feature = "codec", derive(scale_info::TypeInfo))] + pub struct H64(8); +} +#[cfg(feature = "rlp")] impl_fixed_hash_rlp!(H64, 8); -#[cfg(feature = "serialize")] impl_fixed_hash_serde!(H64, 8); - -construct_fixed_hash!{ pub struct H128(16); } -impl_fixed_hash_rlp!(H128, 16); -#[cfg(feature = "serialize")] impl_fixed_hash_serde!(H128, 16); +#[cfg(feature = "serialize")] +impl_fixed_hash_serde!(H64, 8); +#[cfg(feature = "codec")] +impl_fixed_hash_codec!(H64, 8); -pub use primitive_types::H160; -pub use primitive_types::H256; +pub use primitive_types::{H128, H160, H256}; -construct_fixed_hash!{ pub struct H264(33); } +construct_fixed_hash! { + #[cfg_attr(feature = "codec", derive(scale_info::TypeInfo))] + pub struct H264(33); +} +#[cfg(feature = "rlp")] impl_fixed_hash_rlp!(H264, 33); -#[cfg(feature = "serialize")] impl_fixed_hash_serde!(H264, 33); +#[cfg(feature = "serialize")] +impl_fixed_hash_serde!(H264, 33); +#[cfg(feature = "codec")] +impl_fixed_hash_codec!(H264, 33); pub use primitive_types::H512; -construct_fixed_hash!{ pub struct H520(65); } +construct_fixed_hash! { + #[cfg_attr(feature = "codec", derive(scale_info::TypeInfo))] + pub struct H520(65); +} +#[cfg(feature = "rlp")] impl_fixed_hash_rlp!(H520, 65); -#[cfg(feature = "serialize")] impl_fixed_hash_serde!(H520, 65); +#[cfg(feature = "serialize")] +impl_fixed_hash_serde!(H520, 65); +#[cfg(feature = "codec")] +impl_fixed_hash_codec!(H520, 65); macro_rules! impl_uint_conversions { ($hash: ident, $uint: ident) => { @@ -43,15 +74,15 @@ macro_rules! impl_uint_conversions { fn from_uint(value: &$uint) -> Self { let mut ret = $hash::zero(); - value.to_big_endian(ret.as_bytes_mut()); + value.write_as_big_endian(ret.as_bytes_mut()); ret } fn into_uint(&self) -> $uint { - $uint::from(self.as_ref() as &[u8]) + $uint::from_big_endian(self.as_ref() as &[u8]) } } - } + }; } impl_uint_conversions!(H64, U64); @@ -91,7 +122,10 @@ mod tests { (H256::from_low_u64_be(16), "0x0000000000000000000000000000000000000000000000000000000000000010"), (H256::from_low_u64_be(1_000), "0x00000000000000000000000000000000000000000000000000000000000003e8"), (H256::from_low_u64_be(100_000), "0x00000000000000000000000000000000000000000000000000000000000186a0"), - (H256::from_low_u64_be(u64::max_value()), "0x000000000000000000000000000000000000000000000000ffffffffffffffff"), + ( + H256::from_low_u64_be(u64::max_value()), + "0x000000000000000000000000000000000000000000000000ffffffffffffffff", + ), ]; for (number, expected) in tests { @@ -100,11 +134,24 @@ mod tests { } } + #[test] + fn test_parse_0x() { + assert!("0x0000000000000000000000000000000000000000000000000000000000000000" + .parse::() + .is_ok()) + } + #[test] fn test_serialize_invalid() { - assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000\"").unwrap_err().is_data()); - assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000g\"").unwrap_err().is_data()); - assert!(ser::from_str::("\"0x00000000000000000000000000000000000000000000000000000000000000000\"").unwrap_err().is_data()); + assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000\"") + .unwrap_err() + .is_data()); + assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000g\"") + .unwrap_err() + .is_data()); + assert!(ser::from_str::("\"0x00000000000000000000000000000000000000000000000000000000000000000\"") + .unwrap_err() + .is_data()); assert!(ser::from_str::("\"\"").unwrap_err().is_data()); assert!(ser::from_str::("\"0\"").unwrap_err().is_data()); assert!(ser::from_str::("\"10\"").unwrap_err().is_data()); diff --git a/ethereum-types/src/lib.rs b/ethereum-types/src/lib.rs index a6a828f1b..f59a61a11 100644 --- a/ethereum-types/src/lib.rs +++ b/ethereum-types/src/lib.rs @@ -1,16 +1,38 @@ -#![cfg_attr(not(feature = "std"), no_std)] +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. -#[cfg(feature = "std")] -extern crate core; +#![cfg_attr(not(feature = "std"), no_std)] mod hash; mod uint; -pub use uint::{U64, U128, U256, U512}; -pub use hash::{BigEndianHash, H32, H64, H128, H160, H256, H264, H512, H520}; +#[cfg(feature = "ethbloom")] pub use ethbloom::{Bloom, BloomRef, Input as BloomInput}; +pub use hash::{BigEndianHash, H128, H160, H256, H264, H32, H512, H520, H64}; +pub use uint::{FromDecStrErr, FromStrRadixErr, FromStrRadixErrKind, U128, U256, U512, U64}; pub type Address = H160; pub type Secret = H256; pub type Public = H512; pub type Signature = H520; + +/// Conditional compilation depending on whether ethereum-types is built with ethbloom support. +#[cfg(feature = "ethbloom")] +#[macro_export] +macro_rules! if_ethbloom { + ($($tt:tt)*) => { + $($tt)* + }; +} + +#[cfg(not(feature = "ethbloom"))] +#[macro_export] +#[doc(hidden)] +macro_rules! if_ethbloom { + ($($tt:tt)*) => {}; +} diff --git a/ethereum-types/src/uint.rs b/ethereum-types/src/uint.rs index dd92f536f..5dfbdb310 100644 --- a/ethereum-types/src/uint.rs +++ b/ethereum-types/src/uint.rs @@ -1,23 +1,39 @@ -use uint_crate::*; +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#[cfg(feature = "codec")] +use impl_codec::impl_uint_codec; +#[cfg(feature = "rlp")] use impl_rlp::impl_uint_rlp; -#[cfg(feature="serialize")] +#[cfg(feature = "serialize")] use impl_serde::impl_uint_serde; +use uint_crate::*; + +pub use uint_crate::{FromDecStrErr, FromStrRadixErr, FromStrRadixErrKind}; construct_uint! { /// Unsigned 64-bit integer. pub struct U64(1); } +#[cfg(feature = "rlp")] impl_uint_rlp!(U64, 1); -#[cfg(feature = "serialize")] impl_uint_serde!(U64, 1); +#[cfg(feature = "serialize")] +impl_uint_serde!(U64, 1); +#[cfg(feature = "codec")] +impl_uint_codec!(U64, 1); pub use primitive_types::{U128, U256, U512}; - #[cfg(test)] mod tests { use super::{U256, U512}; - use std::u64::MAX; use serde_json as ser; + use std::u64::MAX; macro_rules! test_serialize { ($name: ident, $test_name: ident) => { @@ -42,14 +58,31 @@ mod tests { assert_eq!(number, ser::from_str(&format!("{:?}", expected)).unwrap()); } + let tests = vec![ + ($name::from(0), "0"), + ($name::from(1), "1"), + ($name::from(2), "2"), + ($name::from(10), "a"), + ($name::from(15), "f"), + ($name::from(15), "f"), + ($name::from(16), "10"), + ($name::from(1_000), "3e8"), + ($name::from(100_000), "186a0"), + ($name::from(u64::max_value()), "ffffffffffffffff"), + ($name::from(u64::max_value()) + 1, "10000000000000000"), + ]; + + for (number, expected) in tests { + assert_eq!(format!("{:?}", "0x".to_string() + expected), ser::to_string_pretty(&number).unwrap()); + assert_eq!(number, ser::from_str(&format!("{:?}", expected)).unwrap()); + } + // Invalid examples assert!(ser::from_str::<$name>("\"0x\"").unwrap_err().is_data()); assert!(ser::from_str::<$name>("\"0xg\"").unwrap_err().is_data()); assert!(ser::from_str::<$name>("\"\"").unwrap_err().is_data()); - assert!(ser::from_str::<$name>("\"10\"").unwrap_err().is_data()); - assert!(ser::from_str::<$name>("\"0\"").unwrap_err().is_data()); } - } + }; } test_serialize!(U256, test_u256); @@ -61,16 +94,16 @@ mod tests { ser::to_string_pretty(&!U256::zero()).unwrap(), "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" ); - assert!( - ser::from_str::("\"0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"").unwrap_err().is_data() - ); + assert!(ser::from_str::("\"0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"") + .unwrap_err() + .is_data()); } #[test] fn fixed_arrays_roundtrip() { let raw: U256 = "7094875209347850239487502394881".into(); - let array: [u8; 32] = raw.into(); - let new_raw = array.into(); + let array: [u8; 32] = raw.to_big_endian(); + let new_raw = U256::from_big_endian(&array); assert_eq!(raw, new_raw); } @@ -108,58 +141,58 @@ mod tests { assert_eq!(U512([0, 27, 0, 0, 0, 0, 0, 0]), result); let result = U256([MAX, 0, 0, 0]).full_mul(U256([MAX, 0, 0, 0])); - assert_eq!(U512([1, MAX-1, 0, 0, 0, 0, 0, 0]), result); + assert_eq!(U512([1, MAX - 1, 0, 0, 0, 0, 0, 0]), result); let result = U256([0, MAX, 0, 0]).full_mul(U256([MAX, 0, 0, 0])); - assert_eq!(U512([0, 1, MAX-1, 0, 0, 0, 0, 0]), result); + assert_eq!(U512([0, 1, MAX - 1, 0, 0, 0, 0, 0]), result); let result = U256([MAX, MAX, 0, 0]).full_mul(U256([MAX, 0, 0, 0])); - assert_eq!(U512([1, MAX, MAX-1, 0, 0, 0, 0, 0]), result); + assert_eq!(U512([1, MAX, MAX - 1, 0, 0, 0, 0, 0]), result); let result = U256([MAX, 0, 0, 0]).full_mul(U256([MAX, MAX, 0, 0])); - assert_eq!(U512([1, MAX, MAX-1, 0, 0, 0, 0, 0]), result); + assert_eq!(U512([1, MAX, MAX - 1, 0, 0, 0, 0, 0]), result); let result = U256([MAX, MAX, 0, 0]).full_mul(U256([MAX, MAX, 0, 0])); - assert_eq!(U512([1, 0, MAX-1, MAX, 0, 0, 0, 0]), result); + assert_eq!(U512([1, 0, MAX - 1, MAX, 0, 0, 0, 0]), result); let result = U256([MAX, 0, 0, 0]).full_mul(U256([MAX, MAX, MAX, 0])); - assert_eq!(U512([1, MAX, MAX, MAX-1, 0, 0, 0, 0]), result); + assert_eq!(U512([1, MAX, MAX, MAX - 1, 0, 0, 0, 0]), result); let result = U256([MAX, MAX, MAX, 0]).full_mul(U256([MAX, 0, 0, 0])); - assert_eq!(U512([1, MAX, MAX, MAX-1, 0, 0, 0, 0]), result); + assert_eq!(U512([1, MAX, MAX, MAX - 1, 0, 0, 0, 0]), result); let result = U256([MAX, 0, 0, 0]).full_mul(U256([MAX, MAX, MAX, MAX])); - assert_eq!(U512([1, MAX, MAX, MAX, MAX-1, 0, 0, 0]), result); + assert_eq!(U512([1, MAX, MAX, MAX, MAX - 1, 0, 0, 0]), result); let result = U256([MAX, MAX, MAX, MAX]).full_mul(U256([MAX, 0, 0, 0])); - assert_eq!(U512([1, MAX, MAX, MAX, MAX-1, 0, 0, 0]), result); + assert_eq!(U512([1, MAX, MAX, MAX, MAX - 1, 0, 0, 0]), result); let result = U256([MAX, MAX, MAX, 0]).full_mul(U256([MAX, MAX, 0, 0])); - assert_eq!(U512([1, 0, MAX, MAX-1, MAX, 0, 0, 0]), result); + assert_eq!(U512([1, 0, MAX, MAX - 1, MAX, 0, 0, 0]), result); let result = U256([MAX, MAX, 0, 0]).full_mul(U256([MAX, MAX, MAX, 0])); - assert_eq!(U512([1, 0, MAX, MAX-1, MAX, 0, 0, 0]), result); + assert_eq!(U512([1, 0, MAX, MAX - 1, MAX, 0, 0, 0]), result); let result = U256([MAX, MAX, MAX, MAX]).full_mul(U256([MAX, MAX, 0, 0])); - assert_eq!(U512([1, 0, MAX, MAX, MAX-1, MAX, 0, 0]), result); + assert_eq!(U512([1, 0, MAX, MAX, MAX - 1, MAX, 0, 0]), result); let result = U256([MAX, MAX, 0, 0]).full_mul(U256([MAX, MAX, MAX, MAX])); - assert_eq!(U512([1, 0, MAX, MAX, MAX-1, MAX, 0, 0]), result); + assert_eq!(U512([1, 0, MAX, MAX, MAX - 1, MAX, 0, 0]), result); let result = U256([MAX, MAX, MAX, 0]).full_mul(U256([MAX, MAX, MAX, 0])); - assert_eq!(U512([1, 0, 0, MAX-1, MAX, MAX, 0, 0]), result); + assert_eq!(U512([1, 0, 0, MAX - 1, MAX, MAX, 0, 0]), result); let result = U256([MAX, MAX, MAX, 0]).full_mul(U256([MAX, MAX, MAX, MAX])); - assert_eq!(U512([1, 0, 0, MAX, MAX-1, MAX, MAX, 0]), result); + assert_eq!(U512([1, 0, 0, MAX, MAX - 1, MAX, MAX, 0]), result); let result = U256([MAX, MAX, MAX, MAX]).full_mul(U256([MAX, MAX, MAX, 0])); - assert_eq!(U512([1, 0, 0, MAX, MAX-1, MAX, MAX, 0]), result); + assert_eq!(U512([1, 0, 0, MAX, MAX - 1, MAX, MAX, 0]), result); let result = U256([MAX, MAX, MAX, MAX]).full_mul(U256([MAX, MAX, MAX, MAX])); - assert_eq!(U512([1, 0, 0, 0, MAX-1, MAX, MAX, MAX]), result); + assert_eq!(U512([1, 0, 0, 0, MAX - 1, MAX, MAX, MAX]), result); let result = U256([0, 0, 0, MAX]).full_mul(U256([0, 0, 0, MAX])); - assert_eq!(U512([0, 0, 0, 0, 0, 0, 1, MAX-1]), result); + assert_eq!(U512([0, 0, 0, 0, 0, 0, 1, MAX - 1]), result); let result = U256([1, 0, 0, 0]).full_mul(U256([0, 0, 0, MAX])); assert_eq!(U512([0, 0, 0, MAX, 0, 0, 0, 0]), result); diff --git a/ethereum-types/tests/serde.rs b/ethereum-types/tests/serde.rs index 1cdbc4466..596d1e953 100644 --- a/ethereum-types/tests/serde.rs +++ b/ethereum-types/tests/serde.rs @@ -1,4 +1,4 @@ -// Copyright 2019 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license @@ -6,7 +6,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use ethereum_types::{U256, U512, H160, H256}; +use ethereum_types::{H160, H256, U256, U512}; use serde_json as ser; macro_rules! test { @@ -32,14 +32,31 @@ macro_rules! test { assert_eq!(number, ser::from_str(&format!("{:?}", expected)).unwrap()); } + let tests = vec![ + ($name::from(0), "0"), + ($name::from(1), "1"), + ($name::from(2), "2"), + ($name::from(10), "a"), + ($name::from(15), "f"), + ($name::from(15), "f"), + ($name::from(16), "10"), + ($name::from(1_000), "3e8"), + ($name::from(100_000), "186a0"), + ($name::from(u64::max_value()), "ffffffffffffffff"), + ($name::from(u64::max_value()) + $name::from(1u64), "10000000000000000"), + ]; + + for (number, expected) in tests { + assert_eq!(format!("{:?}", "0x".to_string() + expected), ser::to_string_pretty(&number).unwrap()); + assert_eq!(number, ser::from_str(&format!("{:?}", expected)).unwrap()); + } + // Invalid examples assert!(ser::from_str::<$name>("\"0x\"").unwrap_err().is_data()); assert!(ser::from_str::<$name>("\"0xg\"").unwrap_err().is_data()); assert!(ser::from_str::<$name>("\"\"").unwrap_err().is_data()); - assert!(ser::from_str::<$name>("\"10\"").unwrap_err().is_data()); - assert!(ser::from_str::<$name>("\"0\"").unwrap_err().is_data()); } - } + }; } test!(U256, test_u256); @@ -51,9 +68,9 @@ fn test_large_values() { ser::to_string_pretty(&!U256::zero()).unwrap(), "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" ); - assert!( - ser::from_str::("\"0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"").unwrap_err().is_data() - ); + assert!(ser::from_str::("\"0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"") + .unwrap_err() + .is_data()); } #[test] @@ -94,17 +111,30 @@ fn test_h256() { #[test] fn test_invalid() { - assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000\"").unwrap_err().is_data()); - assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000g\"").unwrap_err().is_data()); - assert!(ser::from_str::("\"0x00000000000000000000000000000000000000000000000000000000000000000\"").unwrap_err().is_data()); + assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000\"") + .unwrap_err() + .is_data()); + assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000g\"") + .unwrap_err() + .is_data()); + assert!(ser::from_str::("\"0x00000000000000000000000000000000000000000000000000000000000000000\"") + .unwrap_err() + .is_data()); assert!(ser::from_str::("\"\"").unwrap_err().is_data()); assert!(ser::from_str::("\"0\"").unwrap_err().is_data()); assert!(ser::from_str::("\"10\"").unwrap_err().is_data()); } #[test] -fn test_invalid_char() { +fn test_invalid_char_with_prefix() { const INVALID_STR: &str = "\"0x000000000000000000000000000000000000000000000000000000000000000g\""; const EXPECTED_MSG: &str = "invalid hex character: g, at 65 at line 1 column 68"; assert_eq!(ser::from_str::(INVALID_STR).unwrap_err().to_string(), EXPECTED_MSG); } + +#[test] +fn test_invalid_char_without_prefix() { + const INVALID_STR: &str = "\"000000000000000000000000000000000000000000000000000000000000000g\""; + const EXPECTED_MSG: &str = "invalid hex character: g, at 63 at line 1 column 66"; + assert_eq!(ser::from_str::(INVALID_STR).unwrap_err().to_string(), EXPECTED_MSG); +} diff --git a/fixed-hash/CHANGELOG.md b/fixed-hash/CHANGELOG.md new file mode 100644 index 000000000..89fcb1c3d --- /dev/null +++ b/fixed-hash/CHANGELOG.md @@ -0,0 +1,32 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.8.0] - 2022-09-20 +### Breaking +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Updated `arbitrary` to 1.0. [#530](https://github.com/paritytech/parity-common/pull/530) +- Updated `quickcheck` to 1.0. [#674](https://github.com/paritytech/parity-common/pull/674) + +## [0.7.0] - 2021-01-05 +### Breaking +- Updated `rand` to 0.8. [#488](https://github.com/paritytech/parity-common/pull/488) + +## [0.6.1] - 2020-04-27 +- Added `arbitrary` feature. [#378](https://github.com/paritytech/parity-common/pull/378) + +## [0.6.0] - 2020-03-16 +- Removed `libc` feature. [#317](https://github.com/paritytech/parity-common/pull/317) +- License changed from MIT to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) + +## [0.5.2] - 2019-12-19 +### Fixed +- Re-export `alloc` for both std and no-std to fix compilation. [#268](https://github.com/paritytech/parity-common/pull/268) + +## [0.5.1] - 2019-10-24 +### Dependencies +- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) diff --git a/fixed-hash/Cargo.toml b/fixed-hash/Cargo.toml index 9947ceaa2..161b1cef3 100644 --- a/fixed-hash/Cargo.toml +++ b/fixed-hash/Cargo.toml @@ -1,29 +1,37 @@ [package] name = "fixed-hash" -version = "0.4.0" +version = "0.8.0" authors = ["Parity Technologies "] -license = "MIT" +license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" repository = "https://github.com/paritytech/parity-common" description = "Macros to define custom fixed-size hash types" documentation = "https://docs.rs/fixed-hash/" readme = "README.md" +edition = "2021" +rust-version = "1.60" [package.metadata.docs.rs] features = ["quickcheck", "api-dummy"] [dependencies] -rand = { version = "0.5", optional = true, default-features = false } -rustc-hex = { version = "2.0", optional = true, default-features = false } -quickcheck = { version = "0.7", optional = true } -byteorder = { version = "1.2", optional = true, default-features = false } -static_assertions = "0.2" +quickcheck = { version = "1", optional = true } +rand = { version = "0.8.0", optional = true, default-features = false } +rustc-hex = { version = "2.0.1", optional = true, default-features = false } +static_assertions = "1.0.0" +arbitrary = { version = "1.0", optional = true } -[target.'cfg(not(target_os = "unknown"))'.dependencies] -libc = { version = "0.2", optional = true, default-features = false } +[dev-dependencies] +rand_xorshift = "0.3.0" +criterion = "0.5.1" +rand = { version = "0.8.0", default-features = false, features = ["std_rng"] } [features] -default = ["std", "libc", "rand", "rustc-hex", "byteorder"] -std = ["rustc-hex/std", "rand/std", "byteorder/std"] +default = ["std", "rand", "rustc-hex"] +std = ["rustc-hex/std", "rand?/std"] api-dummy = [] # Feature used by docs.rs to display documentation of hash types + +[[bench]] +name = "cmp" +harness = false diff --git a/fixed-hash/README.md b/fixed-hash/README.md index 19f4c79f9..7f38bc728 100644 --- a/fixed-hash/README.md +++ b/fixed-hash/README.md @@ -7,7 +7,7 @@ Provides macros to construct custom fixed-size hash types. Simple 256 bit (32 bytes) hash type. ```rust -#[macro_use] extern crate fixed_hash; +use fixed_hash::construct_fixed_hash; construct_fixed_hash! { /// My 256 bit hash type. @@ -30,9 +30,6 @@ assert_eq!(H160::from(H256::zero()), H160::zero()); It is possible to add attributes to your types, for example to make them serializable. ```rust -extern crate serde; -#[macro_use] extern crate serde_derive; - construct_fixed_hash!{ /// My serializable hash type. #[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))] @@ -42,7 +39,7 @@ construct_fixed_hash!{ ## Features -By default this is an standard library depending crate. +By default this is an standard library depending crate. For a `#[no_std]` environment use it as follows: ``` @@ -55,15 +52,12 @@ fixed-hash = { version = "0.3", default-features = false } - Using this feature enables the following features - `rustc-hex/std` - `rand/std` - - `byteorder/std` - - Enabled by default. -- `libc`: Use `libc` for implementations of `PartialEq` and `Ord`. - Enabled by default. - `rand`: Provide API based on the `rand` crate. - Enabled by default. -- `byteorder`: Provide API based on the `byteorder` crate. - - Enabled by default. - `quickcheck`: Provide `quickcheck` implementation for hash types. - Disabled by default. +- `arbitrary`: Allow for creation of a hash from random unstructured input. + - Disabled by default. - `api-dummy`: Generate a dummy hash type for API documentation. - - Enabled by default at `docs.rs` + - Enabled by default at `docs.rs` diff --git a/fixed-hash/benches/cmp.rs b/fixed-hash/benches/cmp.rs new file mode 100644 index 000000000..38633f9f4 --- /dev/null +++ b/fixed-hash/benches/cmp.rs @@ -0,0 +1,94 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Benchmarks for fixed-hash cmp implementation. + +use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; + +use fixed_hash::construct_fixed_hash; + +construct_fixed_hash! { pub struct H256(32); } + +criterion_group!(cmp, eq_equal, eq_nonequal, compare); +criterion_main!(cmp); + +fn eq_equal(c: &mut Criterion) { + let mut group = c.benchmark_group("eq_self"); + for input in [ + H256::zero(), + H256::repeat_byte(0xAA), + H256::from([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x2D, 0x6D, 0x19, 0x40, 0x84, + 0xC2, 0xDE, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, 0x11, 0x06, + ]), + H256([u8::MAX; 32]), + ] { + group.bench_with_input(BenchmarkId::from_parameter(input), &input, |b, x| { + b.iter(|| black_box(x.eq(black_box(x)))) + }); + } + group.finish(); +} + +fn eq_nonequal(c: &mut Criterion) { + let mut group = c.benchmark_group("eq_nonequal"); + for input in [ + ( + H256::zero(), + H256::from([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + ]), + ), + (H256::repeat_byte(0xAA), H256::repeat_byte(0xA1)), + ( + H256::from([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x2D, 0x6D, 0x19, 0x40, + 0x84, 0xC2, 0xDE, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, 0x11, 0x06, + ]), + H256::from([ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x2D, 0x6D, 0x19, 0x40, + 0x84, 0xC2, 0xDE, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, 0x11, 0x06, + ]), + ), + ] { + group.bench_with_input(BenchmarkId::from_parameter(input.0), &input, |b, (x, y)| { + b.iter(|| black_box(x.eq(black_box(y)))) + }); + } + group.finish(); +} + +fn compare(c: &mut Criterion) { + let mut group = c.benchmark_group("compare"); + for input in [ + ( + H256::zero(), + H256::from([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + ]), + ), + (H256::zero(), H256::zero()), + (H256::repeat_byte(0xAA), H256::repeat_byte(0xAA)), + (H256::repeat_byte(0xAA), H256::repeat_byte(0xA1)), + ( + H256::from([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x2D, 0x6D, 0x19, 0x40, + 0x84, 0xC2, 0xDF, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, 0x11, 0x06, + ]), + H256::from([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x2D, 0x6D, 0x19, 0x40, + 0x84, 0xC2, 0xDE, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, 0x11, 0x06, + ]), + ), + ] { + group.bench_with_input(BenchmarkId::from_parameter(input.1), &input, |b, (x, y)| { + b.iter(|| black_box(x.cmp(black_box(y)))) + }); + } + group.finish(); +} diff --git a/fixed-hash/src/hash.rs b/fixed-hash/src/hash.rs index 95c762848..4cf63487e 100644 --- a/fixed-hash/src/hash.rs +++ b/fixed-hash/src/hash.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license @@ -13,44 +13,37 @@ /// Create a public unformatted hash type with 32 bytes size. /// /// ``` -/// # #[macro_use] extern crate fixed_hash; +/// use fixed_hash::construct_fixed_hash; +/// /// construct_fixed_hash!{ pub struct H256(32); } -/// # fn main() { -/// # assert_eq!(std::mem::size_of::(), 32); -/// # } +/// assert_eq!(std::mem::size_of::(), 32); /// ``` /// /// With additional attributes and doc comments. /// /// ``` -/// # #[macro_use] extern crate fixed_hash; -/// // Add the below two lines to import serde and its derive -/// // extern crate serde; -/// // #[macro_use] extern crate serde_derive; +/// use fixed_hash::construct_fixed_hash; /// construct_fixed_hash!{ -/// /// My unformatted 160 bytes sized hash type. -/// #[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))] -/// pub struct H160(20); +/// /// My unformatted 160 bytes sized hash type. +/// #[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))] +/// pub struct H160(20); /// } -/// # fn main() { -/// # assert_eq!(std::mem::size_of::(), 20); -/// # } +/// assert_eq!(std::mem::size_of::(), 20); /// ``` /// /// The visibility modifier is optional and you can create a private hash type. /// /// ``` -/// # #[macro_use] extern crate fixed_hash; +/// use fixed_hash::construct_fixed_hash; /// construct_fixed_hash!{ struct H512(64); } -/// # fn main() { -/// # assert_eq!(std::mem::size_of::(), 64); -/// # } +/// assert_eq!(std::mem::size_of::(), 64); /// ``` #[macro_export(local_inner_macros)] macro_rules! construct_fixed_hash { ( $(#[$attr:meta])* $visibility:vis struct $name:ident ( $n_bytes:expr ); ) => { #[repr(C)] $(#[$attr])* + #[derive(PartialEq, Eq)] $visibility struct $name (pub [u8; $n_bytes]); impl From<[u8; $n_bytes]> for $name { @@ -67,7 +60,7 @@ macro_rules! construct_fixed_hash { impl<'a> From<&'a [u8; $n_bytes]> for $name { /// Constructs a hash type from the given reference - /// to the bytes array of fixed length. + /// to the bytes array of fixed length. /// /// # Note /// @@ -80,7 +73,7 @@ macro_rules! construct_fixed_hash { impl<'a> From<&'a mut [u8; $n_bytes]> for $name { /// Constructs a hash type from the given reference - /// to the mutable bytes array of fixed length. + /// to the mutable bytes array of fixed length. /// /// # Note /// @@ -115,19 +108,19 @@ macro_rules! construct_fixed_hash { impl $name { /// Returns a new fixed hash where all bits are set to the given byte. #[inline] - pub fn repeat_byte(byte: u8) -> $name { + pub const fn repeat_byte(byte: u8) -> $name { $name([byte; $n_bytes]) } /// Returns a new zero-initialized fixed hash. #[inline] - pub fn zero() -> $name { + pub const fn zero() -> $name { $name::repeat_byte(0u8) } /// Returns the size of this hash in bytes. #[inline] - pub fn len_bytes() -> usize { + pub const fn len_bytes() -> usize { $n_bytes } @@ -145,7 +138,7 @@ macro_rules! construct_fixed_hash { /// Extracts a reference to the byte array containing the entire fixed hash. #[inline] - pub fn as_fixed_bytes(&self) -> &[u8; $n_bytes] { + pub const fn as_fixed_bytes(&self) -> &[u8; $n_bytes] { &self.0 } @@ -157,7 +150,7 @@ macro_rules! construct_fixed_hash { /// Returns the inner bytes array. #[inline] - pub fn to_fixed_bytes(self) -> [u8; $n_bytes] { + pub const fn to_fixed_bytes(self) -> [u8; $n_bytes] { self.0 } @@ -262,27 +255,36 @@ macro_rules! construct_fixed_hash { impl $crate::core_::marker::Copy for $name {} - #[cfg_attr(feature = "dev", allow(expl_impl_clone_on_copy))] impl $crate::core_::clone::Clone for $name { fn clone(&self) -> $name { - let mut ret = $name::zero(); - ret.0.copy_from_slice(&self.0); - ret + *self } } - impl $crate::core_::cmp::Eq for $name {} - impl $crate::core_::cmp::PartialOrd for $name { + #[inline] fn partial_cmp(&self, other: &Self) -> Option<$crate::core_::cmp::Ordering> { - Some(self.cmp(other)) + self.as_bytes().partial_cmp(other.as_bytes()) + } + } + + impl $crate::core_::cmp::Ord for $name { + #[inline] + fn cmp(&self, other: &Self) -> $crate::core_::cmp::Ordering { + self.as_bytes().cmp(other.as_bytes()) + } + } + + impl $crate::core_::default::Default for $name { + #[inline] + fn default() -> Self { + Self::zero() } } impl $crate::core_::hash::Hash for $name { fn hash(&self, state: &mut H) where H: $crate::core_::hash::Hasher { state.write(&self.0); - state.finish(); } } @@ -308,52 +310,71 @@ macro_rules! construct_fixed_hash { } } - impl $crate::core_::default::Default for $name { - #[inline] - fn default() -> Self { - Self::zero() - } - } - - impl_ops_for_hash!($name, BitOr, bitor, BitOrAssign, bitor_assign, |, |=); - impl_ops_for_hash!($name, BitAnd, bitand, BitAndAssign, bitand_assign, &, &=); - impl_ops_for_hash!($name, BitXor, bitxor, BitXorAssign, bitxor_assign, ^, ^=); + impl_bit_ops_for_fixed_hash!($name, BitOr, bitor, BitOrAssign, bitor_assign, |, |=); + impl_bit_ops_for_fixed_hash!($name, BitAnd, bitand, BitAndAssign, bitand_assign, &, &=); + impl_bit_ops_for_fixed_hash!($name, BitXor, bitxor, BitXorAssign, bitxor_assign, ^, ^=); impl_byteorder_for_fixed_hash!($name); + impl_rand_for_fixed_hash!($name); - impl_libc_for_fixed_hash!($name); impl_rustc_hex_for_fixed_hash!($name); impl_quickcheck_for_fixed_hash!($name); + impl_arbitrary_for_fixed_hash!($name); } } -// Implementation for disabled byteorder crate support. -// -// # Note -// -// Feature guarded macro definitions instead of feature guarded impl blocks -// to work around the problems of introducing `byteorder` crate feature in -// a user crate. -#[cfg(not(feature = "byteorder"))] #[macro_export] #[doc(hidden)] -macro_rules! impl_byteorder_for_fixed_hash { - ( $name:ident ) => {} +macro_rules! impl_bit_ops_for_fixed_hash { + ( + $impl_for:ident, + $ops_trait_name:ident, + $ops_fn_name:ident, + $ops_assign_trait_name:ident, + $ops_assign_fn_name:ident, + $ops_tok:tt, + $ops_assign_tok:tt + ) => { + impl<'r> $crate::core_::ops::$ops_assign_trait_name<&'r $impl_for> for $impl_for { + fn $ops_assign_fn_name(&mut self, rhs: &'r $impl_for) { + for (lhs, rhs) in self.as_bytes_mut().iter_mut().zip(rhs.as_bytes()) { + *lhs $ops_assign_tok rhs; + } + } + } + + impl $crate::core_::ops::$ops_assign_trait_name<$impl_for> for $impl_for { + #[inline] + fn $ops_assign_fn_name(&mut self, rhs: $impl_for) { + *self $ops_assign_tok &rhs; + } + } + + impl<'l, 'r> $crate::core_::ops::$ops_trait_name<&'r $impl_for> for &'l $impl_for { + type Output = $impl_for; + + fn $ops_fn_name(self, rhs: &'r $impl_for) -> Self::Output { + let mut ret = self.clone(); + ret $ops_assign_tok rhs; + ret + } + } + + impl $crate::core_::ops::$ops_trait_name<$impl_for> for $impl_for { + type Output = $impl_for; + + #[inline] + fn $ops_fn_name(self, rhs: Self) -> Self::Output { + &self $ops_tok &rhs + } + } + }; } -// Implementation for enabled byteorder crate support. -// -// # Note -// -// Feature guarded macro definitions instead of feature guarded impl blocks -// to work around the problems of introducing `byteorder` crate feature in -// a user crate. -#[cfg(feature = "byteorder")] #[macro_export] #[doc(hidden)] macro_rules! impl_byteorder_for_fixed_hash { ( $name:ident ) => { - /// Utilities using the `byteorder` crate. impl $name { /// Returns the least significant `n` bytes as slice. /// @@ -366,14 +387,11 @@ macro_rules! impl_byteorder_for_fixed_hash { &self[(Self::len_bytes() - n)..] } - fn to_low_u64_with_byteorder(&self) -> u64 - where - B: $crate::byteorder::ByteOrder - { + fn to_low_u64_with_fn(&self, from_bytes: fn([u8; 8]) -> u64) -> u64 { let mut buf = [0x0; 8]; let capped = $crate::core_::cmp::min(Self::len_bytes(), 8); buf[(8 - capped)..].copy_from_slice(self.least_significant_bytes(capped)); - B::read_u64(&buf) + from_bytes(buf) } /// Returns the lowest 8 bytes interpreted as big-endian. @@ -384,7 +402,7 @@ macro_rules! impl_byteorder_for_fixed_hash { /// are interpreted as being zero. #[inline] pub fn to_low_u64_be(&self) -> u64 { - self.to_low_u64_with_byteorder::<$crate::byteorder::BigEndian>() + self.to_low_u64_with_fn(u64::from_be_bytes) } /// Returns the lowest 8 bytes interpreted as little-endian. @@ -395,7 +413,7 @@ macro_rules! impl_byteorder_for_fixed_hash { /// are interpreted as being zero. #[inline] pub fn to_low_u64_le(&self) -> u64 { - self.to_low_u64_with_byteorder::<$crate::byteorder::LittleEndian>() + self.to_low_u64_with_fn(u64::from_le_bytes) } /// Returns the lowest 8 bytes interpreted as native-endian. @@ -406,15 +424,11 @@ macro_rules! impl_byteorder_for_fixed_hash { /// are interpreted as being zero. #[inline] pub fn to_low_u64_ne(&self) -> u64 { - self.to_low_u64_with_byteorder::<$crate::byteorder::NativeEndian>() + self.to_low_u64_with_fn(u64::from_ne_bytes) } - fn from_low_u64_with_byteorder(val: u64) -> Self - where - B: $crate::byteorder::ByteOrder - { - let mut buf = [0x0; 8]; - B::write_u64(&mut buf, val); + fn from_low_u64_with_fn(val: u64, to_bytes: fn(u64) -> [u8; 8]) -> Self { + let buf = to_bytes(val); let capped = $crate::core_::cmp::min(Self::len_bytes(), 8); let mut bytes = [0x0; $crate::core_::mem::size_of::()]; bytes[(Self::len_bytes() - capped)..].copy_from_slice(&buf[..capped]); @@ -430,7 +444,7 @@ macro_rules! impl_byteorder_for_fixed_hash { /// if the hash type has less than 8 bytes. #[inline] pub fn from_low_u64_be(val: u64) -> Self { - Self::from_low_u64_with_byteorder::<$crate::byteorder::BigEndian>(val) + Self::from_low_u64_with_fn(val, u64::to_be_bytes) } /// Creates a new hash type from the given `u64` value. @@ -442,7 +456,7 @@ macro_rules! impl_byteorder_for_fixed_hash { /// if the hash type has less than 8 bytes. #[inline] pub fn from_low_u64_le(val: u64) -> Self { - Self::from_low_u64_with_byteorder::<$crate::byteorder::LittleEndian>(val) + Self::from_low_u64_with_fn(val, u64::to_le_bytes) } /// Creates a new hash type from the given `u64` value. @@ -454,14 +468,14 @@ macro_rules! impl_byteorder_for_fixed_hash { /// if the hash type has less than 8 bytes. #[inline] pub fn from_low_u64_ne(val: u64) -> Self { - Self::from_low_u64_with_byteorder::<$crate::byteorder::NativeEndian>(val) + Self::from_low_u64_with_fn(val, u64::to_ne_bytes) } } - } + }; } // Implementation for disabled rand crate support. -// +// // # Note // // Feature guarded macro definitions instead of feature guarded impl blocks @@ -471,11 +485,11 @@ macro_rules! impl_byteorder_for_fixed_hash { #[macro_export] #[doc(hidden)] macro_rules! impl_rand_for_fixed_hash { - ( $name:ident ) => {} + ( $name:ident ) => {}; } // Implementation for enabled rand crate support. -// +// // # Note // // Feature guarded macro definitions instead of feature guarded impl blocks @@ -486,9 +500,7 @@ macro_rules! impl_rand_for_fixed_hash { #[doc(hidden)] macro_rules! impl_rand_for_fixed_hash { ( $name:ident ) => { - impl $crate::rand::distributions::Distribution<$name> - for $crate::rand::distributions::Standard - { + impl $crate::rand::distributions::Distribution<$name> for $crate::rand::distributions::Standard { fn sample(&self, rng: &mut R) -> $name { let mut ret = $name::zero(); for byte in ret.as_bytes_mut().iter_mut() { @@ -504,7 +516,7 @@ macro_rules! impl_rand_for_fixed_hash { /// given random number generator. pub fn randomize_using(&mut self, rng: &mut R) where - R: $crate::rand::Rng + ?Sized + R: $crate::rand::Rng + ?Sized, { use $crate::rand::distributions::Distribution; *self = $crate::rand::distributions::Standard.sample(rng); @@ -512,7 +524,7 @@ macro_rules! impl_rand_for_fixed_hash { /// Assign `self` to a cryptographically random value. pub fn randomize(&mut self) { - let mut rng = $crate::rand::rngs::EntropyRng::new(); + let mut rng = $crate::rand::rngs::OsRng; self.randomize_using(&mut rng); } @@ -520,7 +532,7 @@ macro_rules! impl_rand_for_fixed_hash { /// given random number generator. pub fn random_using(rng: &mut R) -> Self where - R: $crate::rand::Rng + ?Sized + R: $crate::rand::Rng + ?Sized, { let mut ret = Self::zero(); ret.randomize_using(rng); @@ -534,85 +546,11 @@ macro_rules! impl_rand_for_fixed_hash { hash } } - } -} - -// Implementation for disabled libc crate support. -// -// # Note -// -// Feature guarded macro definitions instead of feature guarded impl blocks -// to work around the problems of introducing `libc` crate feature in -// a user crate. -#[cfg(not(all(feature = "libc", not(target_os = "unknown"))))] -#[macro_export] -#[doc(hidden)] -macro_rules! impl_libc_for_fixed_hash { - ( $name:ident ) => { - impl $crate::core_::cmp::PartialEq for $name { - #[inline] - fn eq(&self, other: &Self) -> bool { - self.as_bytes() == other.as_bytes() - } - } - - impl $crate::core_::cmp::Ord for $name { - #[inline] - fn cmp(&self, other: &Self) -> $crate::core_::cmp::Ordering { - self.as_bytes().cmp(other.as_bytes()) - } - } - } -} - -// Implementation for enabled libc crate support. -// -// # Note -// -// Feature guarded macro definitions instead of feature guarded impl blocks -// to work around the problems of introducing `libc` crate feature in -// a user crate. -#[cfg(all(feature = "libc", not(target_os = "unknown")))] -#[macro_export] -#[doc(hidden)] -macro_rules! impl_libc_for_fixed_hash { - ( $name:ident ) => { - impl $crate::core_::cmp::PartialEq for $name { - #[inline] - fn eq(&self, other: &Self) -> bool { - unsafe { - $crate::libc::memcmp( - self.as_ptr() as *const $crate::libc::c_void, - other.as_ptr() as *const $crate::libc::c_void, - Self::len_bytes(), - ) == 0 - } - } - } - - impl $crate::core_::cmp::Ord for $name { - fn cmp(&self, other: &Self) -> $crate::core_::cmp::Ordering { - let r = unsafe { - $crate::libc::memcmp( - self.as_ptr() as *const $crate::libc::c_void, - other.as_ptr() as *const $crate::libc::c_void, - Self::len_bytes(), - ) - }; - if r < 0 { - return $crate::core_::cmp::Ordering::Less; - } - if r > 0 { - return $crate::core_::cmp::Ordering::Greater; - } - $crate::core_::cmp::Ordering::Equal - } - } - } + }; } // Implementation for disabled rustc-hex crate support. -// +// // # Note // // Feature guarded macro definitions instead of feature guarded impl blocks @@ -622,11 +560,11 @@ macro_rules! impl_libc_for_fixed_hash { #[macro_export] #[doc(hidden)] macro_rules! impl_rustc_hex_for_fixed_hash { - ( $name:ident ) => {} + ( $name:ident ) => {}; } // Implementation for enabled rustc-hex crate support. -// +// // # Note // // Feature guarded macro definitions instead of feature guarded impl blocks @@ -650,24 +588,24 @@ macro_rules! impl_rustc_hex_for_fixed_hash { /// /// - When encountering invalid non hex-digits /// - Upon empty string input or invalid input length in general - fn from_str( - input: &str, - ) -> $crate::core_::result::Result<$name, $crate::rustc_hex::FromHexError> { - #[cfg(not(feature = "std"))] - use $crate::alloc_::vec::Vec; - use $crate::rustc_hex::FromHex; - let bytes: Vec = input.from_hex()?; - if bytes.len() != Self::len_bytes() { - return Err($crate::rustc_hex::FromHexError::InvalidHexLength); + fn from_str(input: &str) -> $crate::core_::result::Result<$name, $crate::rustc_hex::FromHexError> { + let input = input.strip_prefix("0x").unwrap_or(input); + let mut iter = $crate::rustc_hex::FromHexIter::new(input); + let mut result = Self::zero(); + for byte in result.as_mut() { + *byte = iter.next().ok_or(Self::Err::InvalidHexLength)??; + } + if iter.next().is_some() { + return Err(Self::Err::InvalidHexLength) } - Ok($name::from_slice(&bytes)) + Ok(result) } } - } + }; } // Implementation for disabled quickcheck crate support. -// +// // # Note // // Feature guarded macro definitions instead of feature guarded impl blocks @@ -677,11 +615,11 @@ macro_rules! impl_rustc_hex_for_fixed_hash { #[macro_export] #[doc(hidden)] macro_rules! impl_quickcheck_for_fixed_hash { - ( $name:ident ) => {} + ( $name:ident ) => {}; } // Implementation for enabled quickcheck crate support. -// +// // # Note // // Feature guarded macro definitions instead of feature guarded impl blocks @@ -693,58 +631,45 @@ macro_rules! impl_quickcheck_for_fixed_hash { macro_rules! impl_quickcheck_for_fixed_hash { ( $name:ident ) => { impl $crate::quickcheck::Arbitrary for $name { - fn arbitrary(g: &mut G) -> Self { - let mut res = [0u8; $crate::core_::mem::size_of::()]; - g.fill_bytes(&mut res[..Self::len_bytes()]); + fn arbitrary(g: &mut $crate::quickcheck::Gen) -> Self { + let res: [u8; Self::len_bytes()] = $crate::core_::array::from_fn(|_| u8::arbitrary(g)); Self::from(res) } } - } + }; } +// When the `arbitrary` feature is disabled. +// +// # Note +// +// Feature guarded macro definitions instead of feature guarded impl blocks +// to work around the problems of introducing `arbitrary` crate feature in +// a user crate. +#[cfg(not(feature = "arbitrary"))] #[macro_export] #[doc(hidden)] -macro_rules! impl_ops_for_hash { - ( - $impl_for:ident, - $ops_trait_name:ident, - $ops_fn_name:ident, - $ops_assign_trait_name:ident, - $ops_assign_fn_name:ident, - $ops_tok:tt, - $ops_assign_tok:tt - ) => { - impl<'r> $crate::core_::ops::$ops_assign_trait_name<&'r $impl_for> for $impl_for { - fn $ops_assign_fn_name(&mut self, rhs: &'r $impl_for) { - for (lhs, rhs) in self.as_bytes_mut().iter_mut().zip(rhs.as_bytes()) { - *lhs $ops_assign_tok rhs; - } - } - } - - impl $crate::core_::ops::$ops_assign_trait_name<$impl_for> for $impl_for { - #[inline] - fn $ops_assign_fn_name(&mut self, rhs: $impl_for) { - *self $ops_assign_tok &rhs; - } - } - - impl<'l, 'r> $crate::core_::ops::$ops_trait_name<&'r $impl_for> for &'l $impl_for { - type Output = $impl_for; - - fn $ops_fn_name(self, rhs: &'r $impl_for) -> Self::Output { - let mut ret = self.clone(); - ret $ops_assign_tok rhs; - ret - } - } - - impl $crate::core_::ops::$ops_trait_name<$impl_for> for $impl_for { - type Output = $impl_for; +macro_rules! impl_arbitrary_for_fixed_hash { + ( $name:ident ) => {}; +} - #[inline] - fn $ops_fn_name(self, rhs: Self) -> Self::Output { - &self $ops_tok &rhs +// When the `arbitrary` feature is enabled. +// +// # Note +// +// Feature guarded macro definitions instead of feature guarded impl blocks +// to work around the problems of introducing `arbitrary` crate feature in +// a user crate. +#[cfg(feature = "arbitrary")] +#[macro_export] +#[doc(hidden)] +macro_rules! impl_arbitrary_for_fixed_hash { + ( $name:ident ) => { + impl $crate::arbitrary::Arbitrary<'_> for $name { + fn arbitrary(u: &mut $crate::arbitrary::Unstructured<'_>) -> $crate::arbitrary::Result { + let mut res = Self::zero(); + u.fill_buffer(&mut res.0)?; + Ok(Self::from(res)) } } }; @@ -765,21 +690,18 @@ macro_rules! impl_ops_for_hash { /// # Example /// /// ``` -/// #[macro_use] extern crate fixed_hash; +/// use fixed_hash::{construct_fixed_hash, impl_fixed_hash_conversions}; /// construct_fixed_hash!{ struct H160(20); } /// construct_fixed_hash!{ struct H256(32); } /// impl_fixed_hash_conversions!(H256, H160); /// // now use it! -/// # fn main() { /// assert_eq!(H256::from(H160::zero()), H256::zero()); /// assert_eq!(H160::from(H256::zero()), H160::zero()); -/// # } /// ``` #[macro_export(local_inner_macros)] macro_rules! impl_fixed_hash_conversions { ($large_ty:ident, $small_ty:ident) => { $crate::static_assertions::const_assert!( - VALID_SIZES; $crate::core_::mem::size_of::<$small_ty>() < $crate::core_::mem::size_of::<$large_ty>() ); @@ -789,14 +711,11 @@ macro_rules! impl_fixed_hash_conversions { let small_ty_size = $small_ty::len_bytes(); $crate::core_::debug_assert!( - large_ty_size > small_ty_size - && large_ty_size % 2 == 0 - && small_ty_size % 2 == 0 + large_ty_size > small_ty_size && large_ty_size % 2 == 0 && small_ty_size % 2 == 0 ); let mut ret = $large_ty::zero(); - ret.as_bytes_mut()[(large_ty_size - small_ty_size)..large_ty_size] - .copy_from_slice(value.as_bytes()); + ret.as_bytes_mut()[(large_ty_size - small_ty_size)..large_ty_size].copy_from_slice(value.as_bytes()); ret } } @@ -807,15 +726,12 @@ macro_rules! impl_fixed_hash_conversions { let small_ty_size = $small_ty::len_bytes(); $crate::core_::debug_assert!( - large_ty_size > small_ty_size - && large_ty_size % 2 == 0 - && small_ty_size % 2 == 0 + large_ty_size > small_ty_size && large_ty_size % 2 == 0 && small_ty_size % 2 == 0 ); let mut ret = $small_ty::zero(); - ret.as_bytes_mut().copy_from_slice( - &value[(large_ty_size - small_ty_size)..large_ty_size], - ); + ret.as_bytes_mut() + .copy_from_slice(&value[(large_ty_size - small_ty_size)..large_ty_size]); ret } } diff --git a/fixed-hash/src/lib.rs b/fixed-hash/src/lib.rs index 061c33854..5f365e997 100644 --- a/fixed-hash/src/lib.rs +++ b/fixed-hash/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license @@ -10,51 +10,41 @@ // Re-export liballoc using an alias so that the macros can work without // requiring `extern crate alloc` downstream. -#[cfg(not(feature = "std"))] #[doc(hidden)] pub extern crate alloc as alloc_; // Re-export libcore using an alias so that the macros can work without -// requiring `extern crate core` downstream. +// requiring `use core` downstream. #[doc(hidden)] -pub extern crate core as core_; - -#[cfg(all(feature = "libc", not(target_os = "unknown")))] -#[doc(hidden)] -pub extern crate libc; +pub use core as core_; // This disables a warning for unused #[macro_use(..)] // which is incorrect since the compiler does not check // for all available configurations. #[allow(unused_imports)] -#[macro_use(const_assert)] #[doc(hidden)] -pub extern crate static_assertions; +pub use static_assertions; // Export `const_assert` macro so that users of this crate do not // have to import the `static_assertions` crate themselves. #[doc(hidden)] pub use static_assertions::const_assert; -#[cfg(feature = "byteorder")] -#[doc(hidden)] -pub extern crate byteorder; - -#[cfg(not(feature = "libc"))] -#[doc(hidden)] -pub mod libc {} - #[cfg(feature = "rustc-hex")] #[doc(hidden)] -pub extern crate rustc_hex; +pub use rustc_hex; #[cfg(feature = "rand")] #[doc(hidden)] -pub extern crate rand; +pub use rand; #[cfg(feature = "quickcheck")] #[doc(hidden)] -pub extern crate quickcheck; +pub use quickcheck; + +#[cfg(feature = "arbitrary")] +#[doc(hidden)] +pub use arbitrary; #[macro_use] mod hash; @@ -63,7 +53,7 @@ mod hash; mod tests; #[cfg(feature = "api-dummy")] -construct_fixed_hash!{ - /// Go here for an overview of the hash type API. - pub struct ApiDummy(32); +construct_fixed_hash! { + /// Go here for an overview of the hash type API. + pub struct ApiDummy(32); } diff --git a/fixed-hash/src/tests.rs b/fixed-hash/src/tests.rs index 66869d002..a0975462f 100644 --- a/fixed-hash/src/tests.rs +++ b/fixed-hash/src/tests.rs @@ -1,8 +1,16 @@ -construct_fixed_hash!{ struct H32(4); } -construct_fixed_hash!{ struct H64(8); } -construct_fixed_hash!{ struct H128(16); } -construct_fixed_hash!{ struct H160(20); } -construct_fixed_hash!{ struct H256(32); } +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +construct_fixed_hash! { pub struct H32(4); } +construct_fixed_hash! { pub struct H64(8); } +construct_fixed_hash! { pub struct H128(16); } +construct_fixed_hash! { pub struct H160(20); } +construct_fixed_hash! { pub struct H256(32); } impl_fixed_hash_conversions!(H256, H160); @@ -147,32 +155,19 @@ mod is_zero { } } -#[cfg(feature = "byteorder")] mod to_low_u64 { use super::*; #[test] fn smaller_size() { - assert_eq!( - H32::from([0x01, 0x23, 0x45, 0x67]).to_low_u64_be(), - 0x0123_4567 - ); - assert_eq!( - H32::from([0x01, 0x23, 0x45, 0x67]).to_low_u64_le(), - 0x6745_2301_0000_0000 - ); + assert_eq!(H32::from([0x01, 0x23, 0x45, 0x67]).to_low_u64_be(), 0x0123_4567); + assert_eq!(H32::from([0x01, 0x23, 0x45, 0x67]).to_low_u64_le(), 0x6745_2301_0000_0000); } #[test] fn equal_size() { - assert_eq!( - H64::from([0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF]).to_low_u64_le(), - 0xEFCD_AB89_6745_2301 - ); - assert_eq!( - H64::from([0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF]).to_low_u64_be(), - 0x0123_4567_89AB_CDEF - ) + assert_eq!(H64::from([0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF]).to_low_u64_le(), 0xEFCD_AB89_6745_2301); + assert_eq!(H64::from([0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF]).to_low_u64_be(), 0x0123_4567_89AB_CDEF) } #[test] @@ -199,20 +194,13 @@ mod to_low_u64 { } } -#[cfg(feature = "byteorder")] mod from_low_u64 { use super::*; #[test] fn smaller_size() { - assert_eq!( - H32::from_low_u64_be(0x0123_4567_89AB_CDEF), - H32::from([0x01, 0x23, 0x45, 0x67]) - ); - assert_eq!( - H32::from_low_u64_le(0x0123_4567_89AB_CDEF), - H32::from([0xEF, 0xCD, 0xAB, 0x89]) - ); + assert_eq!(H32::from_low_u64_be(0x0123_4567_89AB_CDEF), H32::from([0x01, 0x23, 0x45, 0x67])); + assert_eq!(H32::from_low_u64_le(0x0123_4567_89AB_CDEF), H32::from([0xEF, 0xCD, 0xAB, 0x89])); } #[test] @@ -250,30 +238,12 @@ mod from_low_u64 { #[cfg(feature = "rand")] mod rand { use super::*; - use rand::{SeedableRng, XorShiftRng}; + use ::rand::{rngs::StdRng, SeedableRng}; #[test] fn random() { - let default_seed = ::Seed::default(); - let mut rng = XorShiftRng::from_seed(default_seed); - assert_eq!( - H32::random_using(&mut rng), - H32::from([0x43, 0xCA, 0x64, 0xED]) - ); - } - - #[test] - fn randomize() { - let default_seed = ::Seed::default(); - let mut rng = XorShiftRng::from_seed(default_seed); - assert_eq!( - { - let mut ret = H32::zero(); - ret.randomize_using(&mut rng); - ret - }, - H32::from([0x43, 0xCA, 0x64, 0xED]) - ) + let mut rng = StdRng::seed_from_u64(123); + assert_eq!(H32::random_using(&mut rng), H32::from([0xeb, 0x96, 0xaf, 0x1c])); } } @@ -283,7 +253,7 @@ mod from_str { #[test] fn valid() { - use core_::str::FromStr; + use crate::core_::str::FromStr; assert_eq!( H64::from_str("0123456789ABCDEF").unwrap(), @@ -293,19 +263,19 @@ mod from_str { #[test] fn empty_str() { - use core_::str::FromStr; + use crate::core_::str::FromStr; assert!(H64::from_str("").is_err()) } #[test] fn invalid_digits() { - use core_::str::FromStr; + use crate::core_::str::FromStr; assert!(H64::from_str("Hello, World!").is_err()) } #[test] fn too_many_digits() { - use core_::str::FromStr; + use crate::core_::str::FromStr; assert!(H64::from_str("0123456789ABCDEF0").is_err()) } } @@ -313,14 +283,13 @@ mod from_str { #[test] fn from_h160_to_h256() { let h160 = H160::from([ - 0xEF, 0x2D, 0x6D, 0x19, 0x40, 0x84, 0xC2, 0xDE, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, - 0x46, 0xB3, 0x7D, 0x11, 0x06, + 0xEF, 0x2D, 0x6D, 0x19, 0x40, 0x84, 0xC2, 0xDE, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, + 0x11, 0x06, ]); let h256 = H256::from(h160); let expected = H256::from([ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x2D, 0x6D, - 0x19, 0x40, 0x84, 0xC2, 0xDE, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, - 0x11, 0x06, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x2D, 0x6D, 0x19, 0x40, 0x84, + 0xC2, 0xDE, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, 0x11, 0x06, ]); assert_eq!(h256, expected); } @@ -357,8 +326,8 @@ fn from_h256_to_h160_lossy() { assert_eq!(h160, expected); } -#[cfg(all(feature = "std", feature = "byteorder"))] #[test] +#[cfg(feature = "std")] fn display_and_debug() { fn test_for(x: u64, hex: &'static str, display: &'static str) { let hash = H64::from_low_u64_be(x); @@ -378,6 +347,15 @@ fn display_and_debug() { test_for(0x1000, "0000000000001000", "0000…1000"); } +#[test] +fn const_matching_works() { + const ONES: H32 = H32::repeat_byte(1); + match H32::repeat_byte(0) { + ONES => unreachable!(), + _ => {}, + } +} + mod ops { use super::*; diff --git a/keccak-hash/CHANGELOG.md b/keccak-hash/CHANGELOG.md new file mode 100644 index 000000000..f34dc1074 --- /dev/null +++ b/keccak-hash/CHANGELOG.md @@ -0,0 +1,43 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.11.0] - 2024-09-11 +- Updated `primitive-types` to 0.13. [#859](https://github.com/paritytech/parity-common/pull/859) + +## [0.10.0] - 2022-09-20 +### Breaking +- Updated `parity-util-mem` to 0.12. [#680](https://github.com/paritytech/parity-common/pull/680) + +## [0.9.0] - 2022-02-04 +### Breaking +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Updated `primitive-types` to 0.11. [#623](https://github.com/paritytech/parity-common/pull/623) + +## [0.8.0] - 2021-07-02 +### Breaking +- Updated `primitive-types` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) + +## [0.7.0] - 2021-01-27 +### Breaking +- Updated `primitive-types` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) + +## [0.6.0] - 2021-01-05 +### Breaking +- Updated `primitive-types` to 0.8. [#463](https://github.com/paritytech/parity-common/pull/463) + +## [0.5.1] - 2020-04-10 +- Added `keccak256_range` and `keccak512_range` functions. [#370](https://github.com/paritytech/parity-common/pull/370) + +## [0.5.0] - 2020-03-16 +- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) +- Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) +- Updated tiny-keccak. [#260](https://github.com/paritytech/parity-common/pull/260) + +## [0.4.1] - 2019-10-24 +### Dependencies +- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) diff --git a/keccak-hash/Cargo.toml b/keccak-hash/Cargo.toml index 6a3229fb2..210ea4020 100644 --- a/keccak-hash/Cargo.toml +++ b/keccak-hash/Cargo.toml @@ -1,20 +1,26 @@ [package] name = "keccak-hash" -version = "0.3.0" +version = "0.11.0" description = "`keccak-hash` is a set of utility functions to facilitate working with Keccak hashes (256/512 bits long)." authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" readme = "README.md" -license = "GPL-3.0" -edition = "2018" +license = "MIT OR Apache-2.0" +edition = "2021" +rust-version = "1.56.1" [dependencies] -tiny-keccak = "1.4" -primitive-types = { path = "../primitive-types", version = "0.5", default-features = false } +tiny-keccak = { version = "2.0", features = ["keccak"] } +primitive-types = { path = "../primitive-types", version = "0.13", default-features = false } [dev-dependencies] -tempdir = "0.3" +tempfile = "3.1.0" +criterion = "0.5.1" [features] default = ["std"] std = [] + +[[bench]] +name = "keccak_256" +harness = false diff --git a/keccak-hash/benches/keccak_256.rs b/keccak-hash/benches/keccak_256.rs index e18de9fe6..3a28f993a 100644 --- a/keccak-hash/benches/keccak_256.rs +++ b/keccak-hash/benches/keccak_256.rs @@ -1,50 +1,55 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -#![feature(test)] - -extern crate test; - +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use criterion::{black_box, criterion_group, criterion_main, Criterion}; use keccak_hash::keccak; -use test::Bencher; -#[bench] -fn bench_keccak_256_with_empty_input(b: &mut Bencher) { - let empty = [0u8;0]; - b.bytes = empty.len() as u64; - b.iter(|| { - let _out = keccak(empty); - }) +criterion_group!(keccak_256, keccak_256_with_empty_input, keccak_256_with_typical_input, keccak_256_with_large_input,); +criterion_main!(keccak_256); + +pub fn keccak_256_with_empty_input(c: &mut Criterion) { + let empty = [0u8; 0]; + c.bench_function("keccak_256_with_empty_input", |b| { + b.iter(|| { + let _out = keccak(black_box(empty)); + }) + }); } -#[bench] -fn bench_keccak_256_with_typical_input(b: &mut Bencher) { - let data: Vec = From::from("some medum length string with important information"); - b.bytes = data.len() as u64; - b.iter(|| { - let _out = keccak(&data); - }) +pub fn keccak_256_with_typical_input(c: &mut Criterion) { + let mut data: Vec = From::from("some medium length string with important information"); + let len = data.len(); + let mut group = c.benchmark_group("keccak_256_with_typical_input"); + group.bench_function("regular", |b| { + b.iter(|| { + let _out = keccak(black_box(&data)); + }) + }); + group.bench_function("inplace", |b| { + b.iter(|| { + keccak_hash::keccak256(black_box(&mut data[..])); + }) + }); + group.bench_function("inplace_range", |b| { + b.iter(|| { + keccak_hash::keccak256_range(black_box(&mut data[..]), 0..len); + }) + }); + + group.finish(); } -#[bench] -fn bench_keccak_256_with_large_input(b: &mut Bencher) { - // 4096 chars - let data: Vec = From::from("IGxcKBr1Qp7tuqtpSVhAbvt7UgWLEi7mCA6Wa185seLSIJLFS8K1aAFO9AwtO9b3n9SM3Qg136JMmy9Mj9gZ84IaUm8XioPtloabFDU5ZR1wvauJT6jNTkvBVBpUigIsyU7C1u3s99vKP64LpXqvo1hwItZKtISxmUAgzzjv5q14V4G9bkKAnmc4M5xixgLsDGZmnj6HcOMY3XRkWtxN3RscSKwPA0bfpgtz27ZVHplbXwloYRgRLpjRhZJc7sqO8RFnTHKasVkxVRcUoDBvWNJK27TbLvQQcfxETI2Q1H6c2cBAchi8unSiuxqy5rIvVxcl9rsmmRY4IXLEG9qKntUGbiIRLjEffIP9ODoWog0GbWLmMtfvtf24hWVwXz6Ap5oUAR0kLgb7HYIYrOwKjvfV25iEF7GW8cjhl8yowXx1zcgW4t6NJNqJlGzRKx8MvRWQXvHz8h8JxcHl7S64i6PAkxI9eCLXLvs8cpbEQQHt05Zu6GKm6IInjc9mSh52WFuGhgjbno69XzfkBufJs6c9tZuBf6ErVPj4UxmT82ajCruDusk79Tlvb8oQMLjoplQc1alQaLQwSsMac9iVp9MiE3PeYnTTepJ1V10tp79fciDAnNPJgPcRfDYv0REcSFgR9Q7yWhbpPpyBjO7HwOykDQVGtV0ZbDFrFRygLAXagAIkOPc9HDfcBNID1Q2MGk8ijVWMyvmGz1wzbpNfFcQaSOm8olhwoLyHUGvkyXegh44iNsPBUvSicNxTTDowtMqO5azleuWEjzxCobYbASDopvl6JeJjRtEBBO5YCQJiHsYjlXh9QR5Q543GsqhzRLgcHNRSZYLMZqDmIABXZi8VRNJMZyWXDRKHOGDmcHWe55uZomW6FnyU0uSRKxxz66K0JWfxuFzzxAR0vR4ZZCTemgDRQuDwL1loC3KUMjDpU13jUgoPc4UJUVfwQ4f4BUY3X51Cfw9FLw4oX39KoFoiCP2Z6z27gZUY1IlE59WoXGLj4KjTp4C16ZihG080gfDIWlXnDEk3VwBuBFyKWARB63sGLrGnn27b1gHWMaop6sPvkQgWxkEKIqsxDIvXLZJg2s23V8Gqtt0FeA7R3RCvBysF4jNjQ7NiQTIQWQZ8G9gO4mEsftolSZv6FlSpNeBKIIwYWSO2R6vkgeiz06euE9bwwnenOjwPNGTGk8WHIOZBJ1hIP0ejVU2i2ca9ON0phSAnewqjo5W3PtZf2Q7mDvp9imuVWoy4t8XcZq8I2Un9jVjes9Xi0FLN2t71vLFWLWZmGDzwXxpqEgkARS1WjtJoYXCBmRnXEPj6jQfwMZWKPYSIrmOogxMVoWvA8wrof6utfJna9JezyTnrBJSCuGTSNmwwAXRLoFYxF1RITyN8mI2KmHSfvLXBrbE6kmAkjsm4XJb6kria7oUQQ1gzJuCyB7oNHjZTBFNhNa7VeQ1s1xLOwZXLOAjZ4MDTYKnF7giGJGyswb5KQxkOV9orbuAu6pJsjtql6h1UD3BcNUkG3oz8kJNepbuCN3vNCJcZOX1VrQi0PWkDwyvECrQ2E1CgbU6GpWatpg2sCTpo9W62pCcWBK2FKUFWqU3qo2T7T1Mk2ZtM6hE9I8op0M7xlGE91Mn7ea6aq93MWp7nvFlBvbaMIoeU4MpDx0BeOSkROY03ZBJ0x7K8nJrNUhAtvxp17c9oFk0VxLiuRbAAcwDUormOmpVXZNIcqnap4twEVYaSIowfcNojyUSrFL5nPc8ZG93WgNNl9rpUPZhssVml3DvXghI80A9SW3QauzohTQAX2bkWelFBHnuG2LKrsJ8en51N6CkjcS5b87y1DVMZELcZ1n5s8PCAA1wyn7OSZlgw00GRzch1YwMoHzBBgIUtMO9HrMyuhgqIPJP7KcKbQkKhtvBXKplX8SCfSlOwUkLwHNKm3HYVE0uVfJ91NAsUrGoCOjYiXYpoRT8bjAPWTm6fDlTq2sbPOyTMoc4xRasmiOJ7B0PT6UxPzCPImM4100sPFxp7Kofv4okKZWTPKTefeYiPefI3jRgfDtEIP9E6a35LZD75lBNMXYlAqL3qlnheUQD1WQimFTHiDsW6bmURptNvtkMjEXzXzpWbnyxBskUGTvP2YQjtSAhWliDXkv6t1x71cYav7TQbqvbIzMRQQsguSGYMbs8YIC4DC9ep5reWAfanlTxcxksbEhQ7FGzXOvcufeGnDl2C85gWfryVzwN7kOZiSEktFMOQ1ngRC23y1fCOiHQVQJ2nLnaW7GILb9wkN1mBTRuHsOefRJST0TnRxcn4bBq4MIibIitVyjPRy7G5XvPEcL4pFaW1HCPGm6pUOEEwTer32JObNGCyTFB1BI2cRLJu5BHPjgG3mmb0gGkGlIfh8D2b2amogpivqEn2r9Y1KOKQ8ufJvG2mYfkevco9DuEZ9Nmzkm6XkCTZaFMNHqbfQaKqsEYK7i2N1KfkBct1leW2H9MQ9QO7AHCqXHK47b1kWVIm6pSJA1yV4funzCqXnIJCEURQgHiKf38YpN7ylLhe1J4UvSG3KeesZNeFFIZOEP9HZUSFMpnN1MOrwejojK0D4qzwucYWtXrTQ8I7UP5QhlijIsCKckUa9C1Osjrq8cgSclYNGt19wpy0onUbX1rOQBUlAAUJs4CyXNU0wmVUjw7tG1LUC8my4s9KZDUj4R5UcPz3VaZRrx1RqYu6YxjroJW70I1LyG4WEiQbOkCoLmaiWo9WzbUS2cErlOo2RPymlkWHxbNnZawX2Bc872ivRHSWqNpRHyuR5QewXmcyghH3EhESBAxTel5E2xuQXfLCEVK0kEk0Mj22KPsckKKyH7sVYC1F4YItQh5hj9Titb7KflQb9vnXQ44UHxY3zBhTQT5PSYv1Kv8HxXCsnpmhZCiBru16iX9oEB33icBVB2KKcZZEEKnCGPVxJlM9RTlyNyQmjHf7z4GeTDuMAUrsMO31WvgZBnWcAOtn6ulBTUCAaqxJiWqzlMx2FSANAlyAjAxqzmQjzPLvQRjskUnBFN3woKB1m2bSo2c5thwA1fKiPvN5LW8tl1rnfNy3rJ0GJpK8nZjkzHMztYrKYAe56pX4SvplpTyibTIiRXLyEVsmuByTHCZhO3fvGoFsav3ZuRhe9eAAWeqAh13eKDTcA0ufME3ZnmJheXEZ3OwrxnFjSf3U0clkWYVont3neh77ODKHhYnX0bOmnJJlr4RqFoLBitskY0kcGMKcZlaej21SENjDcFgaka3CfHbAH5vIFqnoX1JZrZPkQ65PZqQWImP79U3gXWKvz96lElyJZAFqn0Mbltllqw4MhlI766AvHraOmMsJoNvjv1QR7pCSnC0iX6nbqW1eVPaUSZDuZRtRIxfLA8HC9VbxufT2KZV3qG0l7wrZna5Di2MNcBE9uthuVLZcqp8vCmEhINDhRRlipR7tC2iRBHecS5WtxBCpbEm1y1kgNG5o60UKgAswxxuJ3RQ9Y49mPIApBMmp4LFpuKRfcrZb4UJnCfR3pNbQ70nnZ6Be2M7tuJUCoFfHrhqHXNz5A0uWMgxUS50c60zLl6QAELxHaCGba4WCMOHIo5nSKcUuYtDyDoDlrezALW5mZR4PRPRxnjrXxbJI14qrpymRReC3QgFDJp6sT5TLwvSHaavPlEbt2Eu0Kh5SXklGHXP9YuF3glGuJzSob3NakW1RXF5786U1MHhtJby64LyGWvNn4QXie3VjeL3QQu4C9crEAxSSiOJOfnL3DYIVOY4ipUkKFlF7Rp2q6gZazDvcUCp1cbcr7T7B4s22rXzjN7mHYWOyWuZGwlImeorY3aVKi7BaXbhgOFw6BUmIc1HeGFELHIEnPE9MwOjZam3LOm0rhBHlvJJZkXvJKmDUJrGlyqC5GtC5lDWLfXewyDWDqq7PY0atVQily5GWqib6wub6u6LZ3HZDNP8gK64Nf4kC259AE4V2hCohDnSsXAIoOkehwXyp6CkDT42NJb6sXHUv2N6cm292MiKA22PKWrwUGsan599KI2V67YRDfcfiB4ZHRDiSe62MBE0fGLIgXLIWw1xTWYbPQ9YAj3xovBvmewbJ1De4k6uS"); - b.bytes = data.len() as u64; - b.iter(|| { - let _out = keccak(&data); - }) +pub fn keccak_256_with_large_input(c: &mut Criterion) { + // 4096 chars + let data: Vec = From::from("IGxcKBr1Qp7tuqtpSVhAbvt7UgWLEi7mCA6Wa185seLSIJLFS8K1aAFO9AwtO9b3n9SM3Qg136JMmy9Mj9gZ84IaUm8XioPtloabFDU5ZR1wvauJT6jNTkvBVBpUigIsyU7C1u3s99vKP64LpXqvo1hwItZKtISxmUAgzzjv5q14V4G9bkKAnmc4M5xixgLsDGZmnj6HcOMY3XRkWtxN3RscSKwPA0bfpgtz27ZVHplbXwloYRgRLpjRhZJc7sqO8RFnTHKasVkxVRcUoDBvWNJK27TbLvQQcfxETI2Q1H6c2cBAchi8unSiuxqy5rIvVxcl9rsmmRY4IXLEG9qKntUGbiIRLjEffIP9ODoWog0GbWLmMtfvtf24hWVwXz6Ap5oUAR0kLgb7HYIYrOwKjvfV25iEF7GW8cjhl8yowXx1zcgW4t6NJNqJlGzRKx8MvRWQXvHz8h8JxcHl7S64i6PAkxI9eCLXLvs8cpbEQQHt05Zu6GKm6IInjc9mSh52WFuGhgjbno69XzfkBufJs6c9tZuBf6ErVPj4UxmT82ajCruDusk79Tlvb8oQMLjoplQc1alQaLQwSsMac9iVp9MiE3PeYnTTepJ1V10tp79fciDAnNPJgPcRfDYv0REcSFgR9Q7yWhbpPpyBjO7HwOykDQVGtV0ZbDFrFRygLAXagAIkOPc9HDfcBNID1Q2MGk8ijVWMyvmGz1wzbpNfFcQaSOm8olhwoLyHUGvkyXegh44iNsPBUvSicNxTTDowtMqO5azleuWEjzxCobYbASDopvl6JeJjRtEBBO5YCQJiHsYjlXh9QR5Q543GsqhzRLgcHNRSZYLMZqDmIABXZi8VRNJMZyWXDRKHOGDmcHWe55uZomW6FnyU0uSRKxxz66K0JWfxuFzzxAR0vR4ZZCTemgDRQuDwL1loC3KUMjDpU13jUgoPc4UJUVfwQ4f4BUY3X51Cfw9FLw4oX39KoFoiCP2Z6z27gZUY1IlE59WoXGLj4KjTp4C16ZihG080gfDIWlXnDEk3VwBuBFyKWARB63sGLrGnn27b1gHWMaop6sPvkQgWxkEKIqsxDIvXLZJg2s23V8Gqtt0FeA7R3RCvBysF4jNjQ7NiQTIQWQZ8G9gO4mEsftolSZv6FlSpNeBKIIwYWSO2R6vkgeiz06euE9bwwnenOjwPNGTGk8WHIOZBJ1hIP0ejVU2i2ca9ON0phSAnewqjo5W3PtZf2Q7mDvp9imuVWoy4t8XcZq8I2Un9jVjes9Xi0FLN2t71vLFWLWZmGDzwXxpqEgkARS1WjtJoYXCBmRnXEPj6jQfwMZWKPYSIrmOogxMVoWvA8wrof6utfJna9JezyTnrBJSCuGTSNmwwAXRLoFYxF1RITyN8mI2KmHSfvLXBrbE6kmAkjsm4XJb6kria7oUQQ1gzJuCyB7oNHjZTBFNhNa7VeQ1s1xLOwZXLOAjZ4MDTYKnF7giGJGyswb5KQxkOV9orbuAu6pJsjtql6h1UD3BcNUkG3oz8kJNepbuCN3vNCJcZOX1VrQi0PWkDwyvECrQ2E1CgbU6GpWatpg2sCTpo9W62pCcWBK2FKUFWqU3qo2T7T1Mk2ZtM6hE9I8op0M7xlGE91Mn7ea6aq93MWp7nvFlBvbaMIoeU4MpDx0BeOSkROY03ZBJ0x7K8nJrNUhAtvxp17c9oFk0VxLiuRbAAcwDUormOmpVXZNIcqnap4twEVYaSIowfcNojyUSrFL5nPc8ZG93WgNNl9rpUPZhssVml3DvXghI80A9SW3QauzohTQAX2bkWelFBHnuG2LKrsJ8en51N6CkjcS5b87y1DVMZELcZ1n5s8PCAA1wyn7OSZlgw00GRzch1YwMoHzBBgIUtMO9HrMyuhgqIPJP7KcKbQkKhtvBXKplX8SCfSlOwUkLwHNKm3HYVE0uVfJ91NAsUrGoCOjYiXYpoRT8bjAPWTm6fDlTq2sbPOyTMoc4xRasmiOJ7B0PT6UxPzCPImM4100sPFxp7Kofv4okKZWTPKTefeYiPefI3jRgfDtEIP9E6a35LZD75lBNMXYlAqL3qlnheUQD1WQimFTHiDsW6bmURptNvtkMjEXzXzpWbnyxBskUGTvP2YQjtSAhWliDXkv6t1x71cYav7TQbqvbIzMRQQsguSGYMbs8YIC4DC9ep5reWAfanlTxcxksbEhQ7FGzXOvcufeGnDl2C85gWfryVzwN7kOZiSEktFMOQ1ngRC23y1fCOiHQVQJ2nLnaW7GILb9wkN1mBTRuHsOefRJST0TnRxcn4bBq4MIibIitVyjPRy7G5XvPEcL4pFaW1HCPGm6pUOEEwTer32JObNGCyTFB1BI2cRLJu5BHPjgG3mmb0gGkGlIfh8D2b2amogpivqEn2r9Y1KOKQ8ufJvG2mYfkevco9DuEZ9Nmzkm6XkCTZaFMNHqbfQaKqsEYK7i2N1KfkBct1leW2H9MQ9QO7AHCqXHK47b1kWVIm6pSJA1yV4funzCqXnIJCEURQgHiKf38YpN7ylLhe1J4UvSG3KeesZNeFFIZOEP9HZUSFMpnN1MOrwejojK0D4qzwucYWtXrTQ8I7UP5QhlijIsCKckUa9C1Osjrq8cgSclYNGt19wpy0onUbX1rOQBUlAAUJs4CyXNU0wmVUjw7tG1LUC8my4s9KZDUj4R5UcPz3VaZRrx1RqYu6YxjroJW70I1LyG4WEiQbOkCoLmaiWo9WzbUS2cErlOo2RPymlkWHxbNnZawX2Bc872ivRHSWqNpRHyuR5QewXmcyghH3EhESBAxTel5E2xuQXfLCEVK0kEk0Mj22KPsckKKyH7sVYC1F4YItQh5hj9Titb7KflQb9vnXQ44UHxY3zBhTQT5PSYv1Kv8HxXCsnpmhZCiBru16iX9oEB33icBVB2KKcZZEEKnCGPVxJlM9RTlyNyQmjHf7z4GeTDuMAUrsMO31WvgZBnWcAOtn6ulBTUCAaqxJiWqzlMx2FSANAlyAjAxqzmQjzPLvQRjskUnBFN3woKB1m2bSo2c5thwA1fKiPvN5LW8tl1rnfNy3rJ0GJpK8nZjkzHMztYrKYAe56pX4SvplpTyibTIiRXLyEVsmuByTHCZhO3fvGoFsav3ZuRhe9eAAWeqAh13eKDTcA0ufME3ZnmJheXEZ3OwrxnFjSf3U0clkWYVont3neh77ODKHhYnX0bOmnJJlr4RqFoLBitskY0kcGMKcZlaej21SENjDcFgaka3CfHbAH5vIFqnoX1JZrZPkQ65PZqQWImP79U3gXWKvz96lElyJZAFqn0Mbltllqw4MhlI766AvHraOmMsJoNvjv1QR7pCSnC0iX6nbqW1eVPaUSZDuZRtRIxfLA8HC9VbxufT2KZV3qG0l7wrZna5Di2MNcBE9uthuVLZcqp8vCmEhINDhRRlipR7tC2iRBHecS5WtxBCpbEm1y1kgNG5o60UKgAswxxuJ3RQ9Y49mPIApBMmp4LFpuKRfcrZb4UJnCfR3pNbQ70nnZ6Be2M7tuJUCoFfHrhqHXNz5A0uWMgxUS50c60zLl6QAELxHaCGba4WCMOHIo5nSKcUuYtDyDoDlrezALW5mZR4PRPRxnjrXxbJI14qrpymRReC3QgFDJp6sT5TLwvSHaavPlEbt2Eu0Kh5SXklGHXP9YuF3glGuJzSob3NakW1RXF5786U1MHhtJby64LyGWvNn4QXie3VjeL3QQu4C9crEAxSSiOJOfnL3DYIVOY4ipUkKFlF7Rp2q6gZazDvcUCp1cbcr7T7B4s22rXzjN7mHYWOyWuZGwlImeorY3aVKi7BaXbhgOFw6BUmIc1HeGFELHIEnPE9MwOjZam3LOm0rhBHlvJJZkXvJKmDUJrGlyqC5GtC5lDWLfXewyDWDqq7PY0atVQily5GWqib6wub6u6LZ3HZDNP8gK64Nf4kC259AE4V2hCohDnSsXAIoOkehwXyp6CkDT42NJb6sXHUv2N6cm292MiKA22PKWrwUGsan599KI2V67YRDfcfiB4ZHRDiSe62MBE0fGLIgXLIWw1xTWYbPQ9YAj3xovBvmewbJ1De4k6uS"); + c.bench_function("keccak_256_with_large_input", |b| { + b.iter(|| { + let _out = keccak(black_box(&data)); + }) + }); } diff --git a/keccak-hash/src/lib.rs b/keccak-hash/src/lib.rs index 1b8926be1..a3c128e85 100644 --- a/keccak-hash/src/lib.rs +++ b/keccak-hash/src/lib.rs @@ -1,36 +1,36 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. #![cfg_attr(not(feature = "std"), no_std)] #[cfg(feature = "std")] use std::io; -use core::slice; pub use primitive_types::H256; -use tiny_keccak::Keccak; +use tiny_keccak::{Hasher, Keccak}; /// Get the KECCAK (i.e. Keccak) hash of the empty bytes string. -pub const KECCAK_EMPTY: H256 = H256( [0xc5, 0xd2, 0x46, 0x01, 0x86, 0xf7, 0x23, 0x3c, 0x92, 0x7e, 0x7d, 0xb2, 0xdc, 0xc7, 0x03, 0xc0, 0xe5, 0x00, 0xb6, 0x53, 0xca, 0x82, 0x27, 0x3b, 0x7b, 0xfa, 0xd8, 0x04, 0x5d, 0x85, 0xa4, 0x70] ); +pub const KECCAK_EMPTY: H256 = H256([ + 0xc5, 0xd2, 0x46, 0x01, 0x86, 0xf7, 0x23, 0x3c, 0x92, 0x7e, 0x7d, 0xb2, 0xdc, 0xc7, 0x03, 0xc0, 0xe5, 0x00, 0xb6, + 0x53, 0xca, 0x82, 0x27, 0x3b, 0x7b, 0xfa, 0xd8, 0x04, 0x5d, 0x85, 0xa4, 0x70, +]); /// The KECCAK of the RLP encoding of empty data. -pub const KECCAK_NULL_RLP: H256 = H256( [0x56, 0xe8, 0x1f, 0x17, 0x1b, 0xcc, 0x55, 0xa6, 0xff, 0x83, 0x45, 0xe6, 0x92, 0xc0, 0xf8, 0x6e, 0x5b, 0x48, 0xe0, 0x1b, 0x99, 0x6c, 0xad, 0xc0, 0x01, 0x62, 0x2f, 0xb5, 0xe3, 0x63, 0xb4, 0x21] ); +pub const KECCAK_NULL_RLP: H256 = H256([ + 0x56, 0xe8, 0x1f, 0x17, 0x1b, 0xcc, 0x55, 0xa6, 0xff, 0x83, 0x45, 0xe6, 0x92, 0xc0, 0xf8, 0x6e, 0x5b, 0x48, 0xe0, + 0x1b, 0x99, 0x6c, 0xad, 0xc0, 0x01, 0x62, 0x2f, 0xb5, 0xe3, 0x63, 0xb4, 0x21, +]); /// The KECCAK of the RLP encoding of empty list. -pub const KECCAK_EMPTY_LIST_RLP: H256 = H256( [0x1d, 0xcc, 0x4d, 0xe8, 0xde, 0xc7, 0x5d, 0x7a, 0xab, 0x85, 0xb5, 0x67, 0xb6, 0xcc, 0xd4, 0x1a, 0xd3, 0x12, 0x45, 0x1b, 0x94, 0x8a, 0x74, 0x13, 0xf0, 0xa1, 0x42, 0xfd, 0x40, 0xd4, 0x93, 0x47] ); +pub const KECCAK_EMPTY_LIST_RLP: H256 = H256([ + 0x1d, 0xcc, 0x4d, 0xe8, 0xde, 0xc7, 0x5d, 0x7a, 0xab, 0x85, 0xb5, 0x67, 0xb6, 0xcc, 0xd4, 0x1a, 0xd3, 0x12, 0x45, + 0x1b, 0x94, 0x8a, 0x74, 0x13, 0xf0, 0xa1, 0x42, 0xfd, 0x40, 0xd4, 0x93, 0x47, +]); pub fn keccak>(s: T) -> H256 { let mut result = [0u8; 32]; @@ -38,47 +38,104 @@ pub fn keccak>(s: T) -> H256 { H256(result) } -pub unsafe fn keccak_256_unchecked(out: *mut u8, outlen: usize, input: *const u8, inputlen: usize) { - // This is safe since `keccak_*` uses an internal buffer and copies the result to the output. This - // means that we can reuse the input buffer for both input and output. - Keccak::keccak256( - slice::from_raw_parts(input, inputlen), - slice::from_raw_parts_mut(out, outlen) - ); +/// Computes in-place keccak256 hash of `data`. +pub fn keccak256(data: &mut [u8]) { + let mut keccak256 = Keccak::v256(); + keccak256.update(data.as_ref()); + keccak256.finalize(data); +} + +/// Computes in-place keccak256 hash of `data[range]`. +/// +/// The `range` argument specifies a subslice of `data` in bytes to be hashed. +/// The resulting hash will be written back to `data`. +/// # Panics +/// +/// If `range` is out of bounds. +/// +/// # Example +/// +/// ``` +/// let mut data = [1u8; 32]; +/// // Hash the first 8 bytes of `data` and write the result, 32 bytes, to `data`. +/// keccak_hash::keccak256_range(&mut data, 0..8); +/// let expected = [ +/// 0x54, 0x84, 0x4f, 0x69, 0xb4, 0xda, 0x4b, 0xb4, 0xa9, 0x9f, 0x24, 0x59, 0xb5, 0x11, 0xd4, 0x42, +/// 0xcc, 0x5b, 0xd2, 0xfd, 0xf4, 0xc3, 0x54, 0xd2, 0x07, 0xbb, 0x13, 0x08, 0x94, 0x43, 0xaf, 0x68, +/// ]; +/// assert_eq!(&data, &expected); +/// ``` +pub fn keccak256_range(data: &mut [u8], range: core::ops::Range) { + let mut keccak256 = Keccak::v256(); + keccak256.update(&data[range]); + keccak256.finalize(data); } -pub unsafe fn keccak_512_unchecked(out: *mut u8, outlen: usize, input: *const u8, inputlen: usize) { - // This is safe since `keccak_*` uses an internal buffer and copies the result to the output. This - // means that we can reuse the input buffer for both input and output. - Keccak::keccak512( - slice::from_raw_parts(input, inputlen), - slice::from_raw_parts_mut(out, outlen) - ); +/// Computes in-place keccak512 hash of `data`. +pub fn keccak512(data: &mut [u8]) { + let mut keccak512 = Keccak::v512(); + keccak512.update(data.as_ref()); + keccak512.finalize(data); } -pub fn keccak_256(input: &[u8], mut output: &mut [u8]) { Keccak::keccak256(input, &mut output); } +/// Computes in-place keccak512 hash of `data[range]`. +/// +/// The `range` argument specifies a subslice of `data` in bytes to be hashed. +/// The resulting hash will be written back to `data`. +/// # Panics +/// +/// If `range` is out of bounds. +/// +/// # Example +/// +/// ``` +/// let mut data = [1u8; 64]; +/// keccak_hash::keccak512_range(&mut data, 0..8); +/// let expected = [ +/// 0x90, 0x45, 0xc5, 0x9e, 0xd3, 0x0e, 0x1f, 0x42, 0xac, 0x35, 0xcc, 0xc9, 0x55, 0x7c, 0x77, 0x17, +/// 0xc8, 0x89, 0x3a, 0x77, 0x6c, 0xea, 0x2e, 0xf3, 0x88, 0xea, 0xe5, 0xc0, 0xea, 0x40, 0x26, 0x64, +/// ]; +/// assert_eq!(&data[..32], &expected); +/// ``` +pub fn keccak512_range(data: &mut [u8], range: core::ops::Range) { + let mut keccak512 = Keccak::v512(); + keccak512.update(&data[range]); + keccak512.finalize(data); +} -pub fn keccak_512(input: &[u8], mut output: &mut [u8]) { Keccak::keccak512(input, &mut output); } +pub fn keccak_256(input: &[u8], output: &mut [u8]) { + write_keccak(input, output); +} -pub fn write_keccak>(s: T, dest: &mut [u8]) { Keccak::keccak256(s.as_ref(), dest); } +pub fn keccak_512(input: &[u8], output: &mut [u8]) { + let mut keccak512 = Keccak::v512(); + keccak512.update(input); + keccak512.finalize(output); +} + +pub fn write_keccak>(s: T, dest: &mut [u8]) { + let mut keccak256 = Keccak::v256(); + keccak256.update(s.as_ref()); + keccak256.finalize(dest); +} #[cfg(feature = "std")] pub fn keccak_pipe(r: &mut dyn io::BufRead, w: &mut dyn io::Write) -> Result { let mut output = [0u8; 32]; let mut input = [0u8; 1024]; - let mut keccak = Keccak::new_keccak256(); + let mut keccak256 = Keccak::v256(); // read file loop { let some = r.read(&mut input)?; if some == 0 { - break; + break } - keccak.update(&input[0..some]); + keccak256.update(&input[0..some]); w.write_all(&input[0..some])?; } - keccak.finalize(&mut output); + keccak256.finalize(&mut output); Ok(output.into()) } @@ -106,10 +163,8 @@ mod tests { assert_eq!( keccak([0x41u8; 32]), H256([ - 0x59, 0xca, 0xd5, 0x94, 0x86, 0x73, 0x62, 0x2c, - 0x1d, 0x64, 0xe2, 0x32, 0x24, 0x88, 0xbf, 0x01, - 0x61, 0x9f, 0x7f, 0xf4, 0x57, 0x89, 0x74, 0x1b, - 0x15, 0xa9, 0xf7, 0x82, 0xce, 0x92, 0x90, 0xa8 + 0x59, 0xca, 0xd5, 0x94, 0x86, 0x73, 0x62, 0x2c, 0x1d, 0x64, 0xe2, 0x32, 0x24, 0x88, 0xbf, 0x01, 0x61, + 0x9f, 0x7f, 0xf4, 0x57, 0x89, 0x74, 0x1b, 0x15, 0xa9, 0xf7, 0x82, 0xce, 0x92, 0x90, 0xa8 ]), ); } @@ -118,12 +173,10 @@ mod tests { fn write_keccak_with_content() { let data: Vec = From::from("hello world"); let expected = vec![ - 0x47, 0x17, 0x32, 0x85, 0xa8, 0xd7, 0x34, 0x1e, - 0x5e, 0x97, 0x2f, 0xc6, 0x77, 0x28, 0x63, 0x84, - 0xf8, 0x02, 0xf8, 0xef, 0x42, 0xa5, 0xec, 0x5f, - 0x03, 0xbb, 0xfa, 0x25, 0x4c, 0xb0, 0x1f, 0xad + 0x47, 0x17, 0x32, 0x85, 0xa8, 0xd7, 0x34, 0x1e, 0x5e, 0x97, 0x2f, 0xc6, 0x77, 0x28, 0x63, 0x84, 0xf8, 0x02, + 0xf8, 0xef, 0x42, 0xa5, 0xec, 0x5f, 0x03, 0xbb, 0xfa, 0x25, 0x4c, 0xb0, 0x1f, 0xad, ]; - let mut dest = [0u8;32]; + let mut dest = [0u8; 32]; write_keccak(data, &mut dest); assert_eq!(dest, expected.as_ref()); @@ -132,11 +185,13 @@ mod tests { #[cfg(feature = "std")] #[test] fn should_keccak_a_file() { - use std::fs; - use std::io::{Write, BufReader}; + use std::{ + fs, + io::{BufReader, Write}, + }; // given - let tmpdir = tempdir::TempDir::new("keccak").unwrap(); + let tmpdir = tempfile::Builder::new().prefix("keccak").tempdir().unwrap(); let mut path = tmpdir.path().to_owned(); path.push("should_keccak_a_file"); // Prepare file diff --git a/kvdb-memorydb/CHANGELOG.md b/kvdb-memorydb/CHANGELOG.md new file mode 100644 index 000000000..c8f3c12fe --- /dev/null +++ b/kvdb-memorydb/CHANGELOG.md @@ -0,0 +1,62 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.13.0] - 2022-11-29 +- Removed `parity-util-mem` support. [#696](https://github.com/paritytech/parity-common/pull/696) + +## [0.12.0] - 2022-09-20 +### Breaking +- Updated `kvdb` to 0.12. [662](https://github.com/paritytech/parity-common/pull/662) +- Updated `parity-util-mem` to 0.12. [#680](https://github.com/paritytech/parity-common/pull/680) + +## [0.11.0] - 2022-02-04 +### Breaking +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Updated `kvdb` to 0.11. [#623](https://github.com/paritytech/parity-common/pull/623) + +## [0.10.0] - 2021-07-02 +### Breaking +- Updated `parity-util-mem` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) +- Updated `kvdb` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) + +## [0.9.0] - 2021-01-27 +### Breaking +- Updated `parity-util-mem` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) +- Updated `kvdb` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) + +## [0.8.0] - 2021-01-05 +### Breaking +- Updated dependencies. [#470](https://github.com/paritytech/parity-common/pull/470) + +## [0.7.0] - 2020-06-24 +- Updated `kvdb` to 0.7. [#402](https://github.com/paritytech/parity-common/pull/402) + +## [0.6.0] - 2020-05-05 +### Breaking +- Updated to the new `kvdb` interface. [#313](https://github.com/paritytech/parity-common/pull/313) + +## [0.5.0] - 2020-03-16 +- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) +- Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) + +## [0.4.0] - 2019-02-05 +- Bump parking_lot to 0.10. [#332](https://github.com/paritytech/parity-common/pull/332) + +## [0.3.1] - 2019-01-06 +- Updated features and feature dependencies. [#307](https://github.com/paritytech/parity-common/pull/307) + +## [0.3.0] - 2019-01-03 +- InMemory key-value database now can report memory used (via `MallocSizeOf`). [#292](https://github.com/paritytech/parity-common/pull/292) + +## [0.2.0] - 2019-12-19 +### Fixed +- `iter_from_prefix` behaviour synced with the `kvdb-rocksdb` +### Changed +- Default column support removed from the API + - Column argument type changed from `Option` to `u32` + - Migration `None` -> unsupported, `Some(0)` -> `0`, `Some(1)` -> `1`, etc. diff --git a/kvdb-memorydb/Cargo.toml b/kvdb-memorydb/Cargo.toml index 301e562bf..3b82d5fda 100644 --- a/kvdb-memorydb/Cargo.toml +++ b/kvdb-memorydb/Cargo.toml @@ -1,11 +1,19 @@ [package] name = "kvdb-memorydb" -version = "0.1.0" +version = "0.13.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value in-memory database that implements the `KeyValueDB` trait" -license = "GPL-3.0" +license = "MIT OR Apache-2.0" +edition = "2021" +rust-version = "1.56.1" [dependencies] -parking_lot = "0.6" -kvdb = { version = "0.1", path = "../kvdb" } +parking_lot = "0.12.0" +kvdb = { version = "0.13", path = "../kvdb" } + +[dev-dependencies] +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.11" } + +[features] +default = [] diff --git a/kvdb-memorydb/src/lib.rs b/kvdb-memorydb/src/lib.rs index 7a4590632..67773b1ac 100644 --- a/kvdb-memorydb/src/lib.rs +++ b/kvdb-memorydb/src/lib.rs @@ -1,118 +1,167 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -extern crate parking_lot; -extern crate kvdb; - -use std::collections::{BTreeMap, HashMap}; -use std::io; +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use kvdb::{DBKeyValue, DBOp, DBTransaction, DBValue, KeyValueDB}; use parking_lot::RwLock; -use kvdb::{DBValue, DBTransaction, KeyValueDB, DBOp}; +use std::{ + collections::{BTreeMap, HashMap}, + io, +}; /// A key-value database fulfilling the `KeyValueDB` trait, living in memory. /// This is generally intended for tests and is not particularly optimized. #[derive(Default)] pub struct InMemory { - columns: RwLock, BTreeMap, DBValue>>>, + columns: RwLock, DBValue>>>, } /// Create an in-memory database with the given number of columns. /// Columns will be indexable by 0..`num_cols` pub fn create(num_cols: u32) -> InMemory { let mut cols = HashMap::new(); - cols.insert(None, BTreeMap::new()); for idx in 0..num_cols { - cols.insert(Some(idx), BTreeMap::new()); + cols.insert(idx, BTreeMap::new()); } - InMemory { - columns: RwLock::new(cols) - } + InMemory { columns: RwLock::new(cols) } +} + +fn invalid_column(col: u32) -> io::Error { + io::Error::new(io::ErrorKind::Other, format!("No such column family: {:?}", col)) } impl KeyValueDB for InMemory { - fn get(&self, col: Option, key: &[u8]) -> io::Result> { + fn get(&self, col: u32, key: &[u8]) -> io::Result> { let columns = self.columns.read(); match columns.get(&col) { - None => Err(io::Error::new(io::ErrorKind::Other, format!("No such column family: {:?}", col))), + None => Err(invalid_column(col)), Some(map) => Ok(map.get(key).cloned()), } } - fn get_by_prefix(&self, col: Option, prefix: &[u8]) -> Option> { + fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> io::Result> { let columns = self.columns.read(); match columns.get(&col) { - None => None, - Some(map) => - map.iter() - .find(|&(ref k ,_)| k.starts_with(prefix)) - .map(|(_, v)| v.to_vec().into_boxed_slice()) + None => Err(invalid_column(col)), + Some(map) => Ok(map.iter().find(|&(ref k, _)| k.starts_with(prefix)).map(|(_, v)| v.to_vec())), } } - fn write_buffered(&self, transaction: DBTransaction) { + fn write(&self, transaction: DBTransaction) -> io::Result<()> { let mut columns = self.columns.write(); let ops = transaction.ops; for op in ops { match op { - DBOp::Insert { col, key, value } => { + DBOp::Insert { col, key, value } => if let Some(col) = columns.get_mut(&col) { col.insert(key.into_vec(), value); - } - }, - DBOp::Delete { col, key } => { + }, + DBOp::Delete { col, key } => if let Some(col) = columns.get_mut(&col) { col.remove(&*key); - } - }, + }, + DBOp::DeletePrefix { col, prefix } => + if let Some(col) = columns.get_mut(&col) { + use std::ops::Bound; + if prefix.is_empty() { + col.clear(); + } else { + let start_range = Bound::Included(prefix.to_vec()); + let keys: Vec<_> = if let Some(end_range) = kvdb::end_prefix(&prefix[..]) { + col.range((start_range, Bound::Excluded(end_range))) + .map(|(k, _)| k.clone()) + .collect() + } else { + col.range((start_range, Bound::Unbounded)).map(|(k, _)| k.clone()).collect() + }; + for key in keys.into_iter() { + col.remove(&key[..]); + } + } + }, } } - } - - fn flush(&self) -> io::Result<()> { Ok(()) } - fn iter<'a>(&'a self, col: Option) -> Box, Box<[u8]>)> + 'a> { + fn iter<'a>(&'a self, col: u32) -> Box> + 'a> { match self.columns.read().get(&col) { - Some(map) => Box::new( // TODO: worth optimizing at all? - map.clone() - .into_iter() - .map(|(k, v)| (k.into_boxed_slice(), v.into_vec().into_boxed_slice())) + Some(map) => Box::new( + // TODO: worth optimizing at all? + map.clone().into_iter().map(|(k, v)| Ok((k.into(), v))), ), - None => Box::new(None.into_iter()), + None => Box::new(std::iter::once(Err(invalid_column(col)))), } } - fn iter_from_prefix<'a>(&'a self, col: Option, prefix: &'a [u8]) - -> Box, Box<[u8]>)> + 'a> - { + fn iter_with_prefix<'a>( + &'a self, + col: u32, + prefix: &'a [u8], + ) -> Box> + 'a> { match self.columns.read().get(&col) { Some(map) => Box::new( map.clone() .into_iter() - .skip_while(move |&(ref k, _)| !k.starts_with(prefix)) - .map(|(k, v)| (k.into_boxed_slice(), v.into_vec().into_boxed_slice())) + .filter(move |&(ref k, _)| k.starts_with(prefix)) + .map(|(k, v)| Ok((k.into(), v))), ), - None => Box::new(None.into_iter()), + None => Box::new(std::iter::once(Err(invalid_column(col)))), } } +} + +#[cfg(test)] +mod tests { + use super::create; + use kvdb_shared_tests as st; + use std::io; + + #[test] + fn get_fails_with_non_existing_column() -> io::Result<()> { + let db = create(1); + st::test_get_fails_with_non_existing_column(&db) + } + + #[test] + fn put_and_get() -> io::Result<()> { + let db = create(1); + st::test_put_and_get(&db) + } + + #[test] + fn delete_and_get() -> io::Result<()> { + let db = create(1); + st::test_delete_and_get(&db) + } + + #[test] + fn delete_prefix() -> io::Result<()> { + let db = create(st::DELETE_PREFIX_NUM_COLUMNS); + st::test_delete_prefix(&db) + } + + #[test] + fn iter() -> io::Result<()> { + let db = create(1); + st::test_iter(&db) + } + + #[test] + fn iter_with_prefix() -> io::Result<()> { + let db = create(1); + st::test_iter_with_prefix(&db) + } - fn restore(&self, _new_db: &str) -> io::Result<()> { - Err(io::Error::new(io::ErrorKind::Other, "Attempted to restore in-memory database")) + #[test] + fn complex() -> io::Result<()> { + let db = create(1); + st::test_complex(&db) } } diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md new file mode 100644 index 000000000..d3426aec6 --- /dev/null +++ b/kvdb-rocksdb/CHANGELOG.md @@ -0,0 +1,124 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.19.0] - 2023-05-10 +- Updated `rocksdb` to 0.21. [#750](https://github.com/paritytech/parity-common/pull/750) + +## [0.18.0] - 2023-04-21 +- Updated `rocksdb` to 0.20.1. [#743](https://github.com/paritytech/parity-common/pull/743) + +## [0.17.0] - 2022-11-29 +- Removed `parity-util-mem` support. [#696](https://github.com/paritytech/parity-common/pull/696) + +## [0.16.0] - 2022-09-20 +- Removed `owning_ref` from dependencies :tada:. [#662](https://github.com/paritytech/parity-common/pull/662) +- No longer attempt to repair on `open`. [#667](https://github.com/paritytech/parity-common/pull/667) +### Breaking +- Updated `kvdb` to 0.12. [#662](https://github.com/paritytech/parity-common/pull/662) + - `add_column` and `remove_last_column` now require `&mut self` + +## [0.15.2] - 2022-03-20 +- Disable `jemalloc` feature for `rocksdb` where it is not working. [#633](https://github.com/paritytech/parity-common/pull/633) + +## [0.15.1] - 2022-02-18 +- Updated `rocksdb` to 0.18 and enable `jemalloc` feature. [#629](https://github.com/paritytech/parity-common/pull/629) + +## [0.15.0] - 2022-02-04 +### Breaking +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Bumped `kvdb` and `parity-util-mem`. [#623](https://github.com/paritytech/parity-common/pull/623) + +## [0.14.0] - 2021-08-05 +### Breaking +- `Database` api uses now template argument `P: AsRef` instead of `&str` [#579](https://github.com/paritytech/parity-common/pull/579) + +## [0.13.0] - 2021-08-04 +### Breaking +- `DatabaseConfig` is now `#[non_exhaustive]`. [#576](https://github.com/paritytech/parity-common/pull/576) +- Added `create_if_missing` to `DatabaseConfig`. [#576](https://github.com/paritytech/parity-common/pull/576) + +## [0.12.1] - 2021-07-30 +- Bumped `rocksdb` to 0.17. [#573](https://github.com/paritytech/parity-common/pull/573) + +## [0.12.0] - 2021-07-02 +### Breaking +- Updated `kvdb` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) +- Updated `parity-util-mem` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) + +## [0.11.1] - 2021-05-03 +- Updated `rocksdb` to 0.16. [#537](https://github.com/paritytech/parity-common/pull/537) + +## [0.11.0] - 2021-01-27 +### Breaking +- Updated `kvdb` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) +- Updated `parity-util-mem` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) + +## [0.10.0] - 2021-01-05 +### Breaking +- Updated dependencies. [#470](https://github.com/paritytech/parity-common/pull/470) + +## [0.9.1] - 2020-08-26 +- Updated rocksdb to 0.15. [#424](https://github.com/paritytech/parity-common/pull/424) +- Set `format_version` to 5. [#395](https://github.com/paritytech/parity-common/pull/395) + +## [0.9.0] - 2020-06-24 +- Updated `kvdb` to 0.7. [#402](https://github.com/paritytech/parity-common/pull/402) + +## [0.8.0] - 2020-05-05 +- Updated RocksDB to 6.7.3. [#379](https://github.com/paritytech/parity-common/pull/379) +### Breaking +- Updated to the new `kvdb` interface. [#313](https://github.com/paritytech/parity-common/pull/313) +- Rename and optimize prefix iteration. [#365](https://github.com/paritytech/parity-common/pull/365) +- Added Secondary Instance API. [#384](https://github.com/paritytech/parity-common/pull/384) + +## [0.7.0] - 2020-03-16 +- Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) + +## [0.6.0] - 2020-02-28 +- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) +- Added `get_statistics` method and `enable_statistics` config parameter. [#347](https://github.com/paritytech/parity-common/pull/347) + +## [0.5.0] - 2019-02-05 +- Bump parking_lot to 0.10. [#332](https://github.com/paritytech/parity-common/pull/332) + +## [0.4.2] - 2019-02-04 +### Fixes +- Fixed `iter_from_prefix` being slow. [#326](https://github.com/paritytech/parity-common/pull/326) + +## [0.4.1] - 2019-01-06 +- Updated features and feature dependencies. [#307](https://github.com/paritytech/parity-common/pull/307) + +## [0.4.0] - 2019-01-03 +- Add I/O statistics for RocksDB. [#294](https://github.com/paritytech/parity-common/pull/294) +- Support querying memory footprint via `MallocSizeOf` trait. [#292](https://github.com/paritytech/parity-common/pull/292) + +## [0.3.0] - 2019-12-19 +- Use `get_pinned` API to save one allocation for each call to `get()`. [#274](https://github.com/paritytech/parity-common/pull/274) +- Rename `drop_column` to `remove_last_column`. [#274](https://github.com/paritytech/parity-common/pull/274) +- Rename `get_cf` to `cf`. [#274](https://github.com/paritytech/parity-common/pull/274) +- Default column support removed from the API. [#278](https://github.com/paritytech/parity-common/pull/278) + - Column argument type changed from `Option` to `u32` + - Migration + - Column index `None` -> unsupported, `Some(0)` -> `0`, `Some(1)` -> `1`, etc. + - Database must be opened with at least one column and existing DBs has to be opened with a number of columns increased by 1 to avoid having to migrate the data, e.g. before: `Some(9)`, after: `10`. + - `DatabaseConfig::default()` defaults to 1 column + - `Database::with_columns` still accepts `u32`, but panics if `0` is provided + - `Database::open` panics if configuration with 0 columns is provided +- Add `num_keys(col)` to get an estimate of the number of keys in a column. [#285](https://github.com/paritytech/parity-common/pull/285) +- Remove `ElasticArray` and use the new `DBValue` (alias for `Vec`) and `DBKey` types from `kvdb`. [#282](https://github.com/paritytech/parity-common/pull/282) + +## [0.2.0] - 2019-11-28 +- Switched away from using [parity-rocksdb](https://crates.io/crates/parity-rocksdb) in favour of upstream [rust-rocksdb](https://crates.io/crates/rocksdb). [#257](https://github.com/paritytech/parity-common/pull/257) +- Revamped configuration handling, allowing per-column memory budgeting. [#256](https://github.com/paritytech/parity-common/pull/256) +### Dependencies +- rust-rocksdb v0.13 + +## [0.1.6] - 2019-10-24 +- Updated to 2018 edition idioms. [#237](https://github.com/paritytech/parity-common/pull/237) +### Dependencies +- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 57e7e6a2b..b866b1344 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,22 +1,44 @@ [package] name = "kvdb-rocksdb" -version = "0.1.4" +version = "0.19.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" -description = "kvdb implementation backed by rocksDB" -license = "GPL-3.0" +description = "kvdb implementation backed by RocksDB" +license = "MIT OR Apache-2.0" +edition = "2021" +rust-version = "1.56.1" + +[[bench]] +name = "bench_read_perf" +harness = false [dependencies] -elastic-array = "0.10" -fs-swap = "0.2.4" -interleaved-ordered = "0.1.0" -kvdb = { version = "0.1", path = "../kvdb" } -log = "0.4" -num_cpus = "1.0" -parking_lot = "0.6" -regex = "1.0" -parity-rocksdb = "0.5" +smallvec = "1.0.0" +kvdb = { path = "../kvdb", version = "0.13" } +num_cpus = "1.10.1" +parking_lot = "0.12.0" +regex = "1.3.1" + +# OpenBSD and MSVC are unteested and shouldn't enable jemalloc: +# https://github.com/tikv/jemallocator/blob/52de4257fab3e770f73d5174c12a095b49572fba/jemalloc-sys/build.rs#L26-L27 +[target.'cfg(any(target_os = "openbsd", target_env = "msvc"))'.dependencies.rocksdb] +default-features = false +features = ["snappy"] +version = "0.22.0" + +[target.'cfg(not(any(target_os = "openbsd", target_env = "msvc")))'.dependencies.rocksdb] +default-features = false +features = ["snappy", "jemalloc"] +version = "0.22.0" [dev-dependencies] -tempdir = "0.3" -ethereum-types = { version = "0.7", path = "../ethereum-types" } +alloc_counter = "0.0.4" +criterion = "0.5" +ethereum-types = { path = "../ethereum-types", features = ["rand"] } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.11" } +rand = "0.8.0" +tempfile = "3.1.0" +keccak-hash = { path = "../keccak-hash" } +sysinfo = "0.30.13" +ctrlc = "3.1.4" +chrono = "0.4" diff --git a/kvdb-rocksdb/benches/.gitignore b/kvdb-rocksdb/benches/.gitignore new file mode 100644 index 000000000..85954e328 --- /dev/null +++ b/kvdb-rocksdb/benches/.gitignore @@ -0,0 +1 @@ +_rocksdb_bench_get diff --git a/kvdb-rocksdb/benches/bench_read_perf.rs b/kvdb-rocksdb/benches/bench_read_perf.rs new file mode 100644 index 000000000..3b87e8bb1 --- /dev/null +++ b/kvdb-rocksdb/benches/bench_read_perf.rs @@ -0,0 +1,206 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Benchmark RocksDB read performance. +//! The benchmark setup consists in writing `NEEDLES * NEEDLES_TO_HAYSTACK_RATIO` 32-bytes random +//! keys with random values 150 +/- 30 bytes long. With 10 000 keys and a ratio of 100 we get one +//! million keys; ideally the db should be deleted for each benchmark run but in practice it has +//! little impact on the performance numbers for these small database sizes. +//! Allocations (on the Rust side) are counted and printed. +//! +//! Note that this benchmark is not a good way to measure the performance of the database itself; +//! its purpose is to be a tool to gauge the performance of the glue code, or work as a starting point +//! for a more elaborate benchmark of a specific workload. + +const NEEDLES: usize = 10_000; +const NEEDLES_TO_HAYSTACK_RATIO: usize = 100; + +use std::{ + io, + time::{Duration, Instant}, +}; + +use alloc_counter::{count_alloc, AllocCounterSystem}; +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use ethereum_types::H256; +use rand::{distributions::Uniform, seq::SliceRandom, Rng}; + +use kvdb_rocksdb::{Database, DatabaseConfig}; + +#[global_allocator] +static A: AllocCounterSystem = AllocCounterSystem; + +criterion_group!(benches, get, iter); +criterion_main!(benches); + +/// Opens (or creates) a RocksDB database in the `benches/` folder of the crate with one column +/// family and default options. Needs manual cleanup. +fn open_db() -> Database { + let tempdir_str = "./benches/_rocksdb_bench_get"; + let cfg = DatabaseConfig::with_columns(1); + let db = Database::open(&cfg, tempdir_str).expect("rocksdb works"); + db +} + +/// Generate `n` random bytes +/- 20%. +/// The variability in the payload size lets us simulate payload allocation patterns: `DBValue` is +/// an `ElasticArray128` so sometimes we save on allocations. +fn n_random_bytes(n: usize) -> Vec { + let mut rng = rand::thread_rng(); + let variability: i64 = rng.gen_range(0..(n / 5) as i64); + let plus_or_minus: i64 = if variability % 2 == 0 { 1 } else { -1 }; + let range = Uniform::from(0..u8::max_value()); + rng.sample_iter(&range) + .take((n as i64 + plus_or_minus * variability) as usize) + .collect() +} + +/// Writes `NEEDLES * NEEDLES_TO_HAYSTACK_RATIO` keys to the DB. Keys are random, 32 bytes long and +/// values are random, 120-180 bytes long. Every `NEEDLES_TO_HAYSTACK_RATIO` keys are kept and +/// returned in a `Vec` for and used to benchmark point lookup performance. Keys are sorted +/// lexicographically in the DB, and the benchmark keys are random bytes making the needles are +/// effectively random points in the key set. +fn populate(db: &Database) -> io::Result> { + let mut needles = Vec::with_capacity(NEEDLES); + let mut batch = db.transaction(); + for i in 0..NEEDLES * NEEDLES_TO_HAYSTACK_RATIO { + let key = H256::random(); + if i % NEEDLES_TO_HAYSTACK_RATIO == 0 { + needles.push(key.clone()); + if i % 100_000 == 0 && i > 0 { + println!("[populate] {} keys", i); + } + } + // In ethereum keys are mostly 32 bytes and payloads ~140bytes. + batch.put(0, &key.as_bytes(), &n_random_bytes(140)); + } + db.write(batch)?; + Ok(needles) +} + +fn get(c: &mut Criterion) { + let db = open_db(); + let needles = populate(&db).expect("rocksdb works"); + + let mut total_iterations = 0; + let mut total_allocs = 0; + + c.bench_function("get key", |b| { + b.iter_custom(|iterations| { + total_iterations += iterations; + let mut elapsed = Duration::new(0, 0); + // NOTE: counts allocations on the Rust side only + let (alloc_stats, _) = count_alloc(|| { + let start = Instant::now(); + for _ in 0..iterations { + // This has no measurable impact on performance (~30ns) + let needle = needles.choose(&mut rand::thread_rng()).expect("needles is not empty"); + black_box(db.get(0, needle.as_bytes()).unwrap()); + } + elapsed = start.elapsed(); + }); + total_allocs += alloc_stats.0; + elapsed + }); + }); + if total_iterations > 0 { + println!( + "[get key] total: iterations={}, allocations={}; allocations per iter={:.2}\n", + total_iterations, + total_allocs, + total_allocs as f64 / total_iterations as f64 + ); + } + + total_iterations = 0; + total_allocs = 0; + c.bench_function("get key by prefix", |b| { + b.iter_custom(|iterations| { + total_iterations += iterations; + let mut elapsed = Duration::new(0, 0); + // NOTE: counts allocations on the Rust side only + let (alloc_stats, _) = count_alloc(|| { + let start = Instant::now(); + for _ in 0..iterations { + // This has no measurable impact on performance (~30ns) + let needle = needles.choose(&mut rand::thread_rng()).expect("needles is not empty"); + black_box(db.get_by_prefix(0, &needle.as_bytes()[..8]).unwrap()); + } + elapsed = start.elapsed(); + }); + total_allocs += alloc_stats.0; + elapsed + }); + }); + if total_iterations > 0 { + println!( + "[get key by prefix] total: iterations={}, allocations={}; allocations per iter={:.2}\n", + total_iterations, + total_allocs, + total_allocs as f64 / total_iterations as f64 + ); + } +} + +fn iter(c: &mut Criterion) { + let db = open_db(); + let mut total_iterations = 0; + let mut total_allocs = 0; + + c.bench_function("iterate over 1k keys", |b| { + b.iter_custom(|iterations| { + total_iterations += iterations; + let mut elapsed = Duration::new(0, 0); + // NOTE: counts allocations on the Rust side only + let (alloc_stats, _) = count_alloc(|| { + let start = Instant::now(); + for _ in 0..iterations { + black_box(db.iter(0).take(1000).collect::>()); + } + elapsed = start.elapsed(); + }); + total_allocs += alloc_stats.0; + elapsed + }); + }); + if total_iterations > 0 { + println!( + "[iterate over 1k keys] total: iterations={}, allocations={}; allocations per iter={:.2}\n", + total_iterations, + total_allocs, + total_allocs as f64 / total_iterations as f64 + ); + } + + total_allocs = 0; + total_iterations = 0; + c.bench_function("single key from iterator", |b| { + b.iter_custom(|iterations| { + total_iterations += iterations; + let mut elapsed = Duration::new(0, 0); + // NOTE: counts allocations on the Rust side only + let (alloc_stats, _) = count_alloc(|| { + let start = Instant::now(); + for _ in 0..iterations { + black_box(db.iter(0).next().unwrap().unwrap()); + } + elapsed = start.elapsed(); + }); + total_allocs += alloc_stats.0; + elapsed + }); + }); + if total_iterations > 0 { + println!( + "[single key from iterator] total: iterations={}, allocations={}; allocations per iter={:.2}\n", + total_iterations, + total_allocs, + total_allocs as f64 / total_iterations as f64 + ); + } +} diff --git a/kvdb-rocksdb/examples/memtest.rs b/kvdb-rocksdb/examples/memtest.rs new file mode 100644 index 000000000..f60ea50ad --- /dev/null +++ b/kvdb-rocksdb/examples/memtest.rs @@ -0,0 +1,152 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Parity Ethereum. + +// Parity Ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Ethereum. If not, see . + +// This program starts writing random data to the database with 100 (COLUMN_COUNT) +// columns and never stops until interrupted. + +use ethereum_types::H256; +use keccak_hash::keccak; +use kvdb_rocksdb::{Database, DatabaseConfig}; +use std::sync::{ + atomic::{AtomicBool, Ordering as AtomicOrdering}, + Arc, +}; +use sysinfo::{get_current_pid, System}; + +const COLUMN_COUNT: u32 = 100; + +#[derive(Clone)] +struct KeyValue { + key: H256, + val: H256, +} + +fn next(seed: H256) -> H256 { + let mut buf = [0u8; 33]; + buf[0..32].copy_from_slice(&seed[..]); + buf[32] = 1; + + keccak(&buf[..]) +} + +impl KeyValue { + fn with_seed(seed: H256) -> Self { + KeyValue { key: next(seed), val: next(next(seed)) } + } + + fn new() -> Self { + Self::with_seed(H256::random()) + } +} + +impl Iterator for KeyValue { + type Item = (H256, H256); + + fn next(&mut self) -> Option { + let result = (self.key, self.val); + self.key = next(self.val); + self.val = next(self.key); + + Some(result) + } +} + +fn proc_memory_usage() -> u64 { + let mut sys = System::new(); + let self_pid = get_current_pid().ok(); + let memory = if let Some(self_pid) = self_pid { + if sys.refresh_process(self_pid) { + let proc = sys + .process(self_pid) + .expect("Above refresh_process succeeds, this should be Some(), qed"); + proc.memory() + } else { + 0 + } + } else { + 0 + }; + + memory +} + +fn main() { + let mb_per_col = std::env::args() + .nth(1) + .map(|arg| arg.parse().expect("Megabytes per col - should be integer or missing")) + .unwrap_or(1); + + let exit = Arc::new(AtomicBool::new(false)); + let ctrlc_exit = exit.clone(); + + ctrlc::set_handler(move || { + println!("\nRemoving temp database...\n"); + ctrlc_exit.store(true, AtomicOrdering::Relaxed); + }) + .expect("Error setting Ctrl-C handler"); + + let mut config = DatabaseConfig::with_columns(COLUMN_COUNT); + + for c in 0..=COLUMN_COUNT { + config.memory_budget.insert(c, mb_per_col); + } + let dir = tempfile::Builder::new().prefix("rocksdb-example").tempdir().unwrap(); + + println!("Database is put in: {} (maybe check if it was deleted)", dir.path().to_string_lossy()); + let db = Database::open(&config, &dir.path()).unwrap(); + + let mut step = 0; + let mut keyvalues = KeyValue::new(); + while !exit.load(AtomicOrdering::Relaxed) { + let col = step % 100; + + let key_values: Vec<(H256, H256)> = keyvalues.clone().take(128).collect(); + let mut transaction = db.transaction(); + for (k, v) in key_values.iter() { + transaction.put(col, k.as_ref(), v.as_ref()); + } + db.write(transaction).expect("writing failed"); + + let mut seed = H256::zero(); + for (k, _) in key_values.iter() { + let mut buf = [0u8; 64]; + buf[0..32].copy_from_slice(seed.as_ref()); + let val = db.get(col, k.as_ref()).expect("Db fail").expect("Was put above"); + buf[32..64].copy_from_slice(val.as_ref()); + + seed = keccak(&buf[..]); + } + + let mut transaction = db.transaction(); + // delete all but one to avoid too much bloating + for (k, _) in key_values.iter().take(127) { + transaction.delete(col, k.as_ref()); + } + db.write(transaction).expect("delete failed"); + + keyvalues = KeyValue::with_seed(seed); + + if step % 10000 == 9999 { + let timestamp = chrono::Local::now().format("%Y-%m-%d %H:%M:%S"); + + println!("{}", timestamp); + println!("\tData written: {} keys - {} Mb", step + 1, ((step + 1) * 64 * 128) / 1024 / 1024); + println!("\tProcess memory used as seen by the OS: {} Mb", proc_memory_usage() / 1024); + } + + step += 1; + } +} diff --git a/kvdb-rocksdb/src/iter.rs b/kvdb-rocksdb/src/iter.rs new file mode 100644 index 000000000..08ed32022 --- /dev/null +++ b/kvdb-rocksdb/src/iter.rs @@ -0,0 +1,94 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! This module contains an implementation of a RocksDB iterator +//! wrapped inside a `RwLock`. Since `RwLock` "owns" the inner data, +//! we're using `owning_ref` to work around the borrowing rules of Rust. +//! +//! Note: this crate does not use "Prefix Seek" mode which means that the prefix iterator +//! will return keys not starting with the given prefix as well (as long as `key >= prefix`). +//! To work around this we set an upper bound to the prefix successor. +//! See https://github.com/facebook/rocksdb/wiki/Prefix-Seek-API-Changes for details. + +use crate::{other_io_err, DBAndColumns, DBKeyValue}; +use rocksdb::{DBIterator, Direction, IteratorMode, ReadOptions}; +use std::io; + +/// Instantiate iterators yielding `io::Result`s. +pub trait IterationHandler { + type Iterator: Iterator>; + + /// Create an `Iterator` over a `ColumnFamily` corresponding to the passed index. Takes + /// `ReadOptions` to allow configuration of the new iterator (see + /// https://github.com/facebook/rocksdb/blob/master/include/rocksdb/options.h#L1169). + fn iter(self, col: u32, read_opts: ReadOptions) -> Self::Iterator; + /// Create an `Iterator` over a `ColumnFamily` corresponding to the passed index. Takes + /// `ReadOptions` to allow configuration of the new iterator (see + /// https://github.com/facebook/rocksdb/blob/master/include/rocksdb/options.h#L1169). + /// The `Iterator` iterates over keys which start with the provided `prefix`. + fn iter_with_prefix(self, col: u32, prefix: &[u8], read_opts: ReadOptions) -> Self::Iterator; +} + +impl<'a> IterationHandler for &'a DBAndColumns { + type Iterator = EitherIter>, std::iter::Once>>; + + fn iter(self, col: u32, read_opts: ReadOptions) -> Self::Iterator { + match self.cf(col as usize) { + Ok(cf) => EitherIter::A(KvdbAdapter(self.db.iterator_cf_opt(cf, read_opts, IteratorMode::Start))), + Err(e) => EitherIter::B(std::iter::once(Err(e))), + } + } + + fn iter_with_prefix(self, col: u32, prefix: &[u8], read_opts: ReadOptions) -> Self::Iterator { + match self.cf(col as usize) { + Ok(cf) => EitherIter::A(KvdbAdapter(self.db.iterator_cf_opt( + cf, + read_opts, + IteratorMode::From(prefix, Direction::Forward), + ))), + Err(e) => EitherIter::B(std::iter::once(Err(e))), + } + } +} + +/// Small enum to avoid boxing iterators. +pub enum EitherIter { + A(A), + B(B), +} + +impl Iterator for EitherIter +where + A: Iterator, + B: Iterator, +{ + type Item = I; + + fn next(&mut self) -> Option { + match self { + Self::A(a) => a.next(), + Self::B(b) => b.next(), + } + } +} + +/// A simple wrapper that adheres to the `kvdb` interface. +pub struct KvdbAdapter(T); + +impl Iterator for KvdbAdapter +where + T: Iterator, Box<[u8]>), rocksdb::Error>>, +{ + type Item = io::Result; + + fn next(&mut self) -> Option { + self.0 + .next() + .map(|r| r.map_err(other_io_err).map(|(k, v)| (k.into_vec().into(), v.into()))) + } +} diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index b5ecf3059..711468487 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -1,82 +1,69 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -#[macro_use] -extern crate log; - -extern crate elastic_array; -extern crate fs_swap; -extern crate interleaved_ordered; -extern crate num_cpus; -extern crate parking_lot; -extern crate regex; -extern crate parity_rocksdb; - -#[cfg(test)] -extern crate ethereum_types; - -extern crate kvdb; - -use std::collections::HashMap; -use std::marker::PhantomData; -use std::{cmp, fs, io, mem, result, error}; -use std::path::Path; +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +mod iter; +mod stats; + +use std::{ + cmp, + collections::HashMap, + error, io, + path::{Path, PathBuf}, +}; -use parking_lot::{Mutex, MutexGuard, RwLock}; -use parity_rocksdb::{ - DB, Writable, WriteBatch, WriteOptions, IteratorMode, DBIterator, - Options, BlockBasedOptions, Direction, Cache, Column, ReadOptions +use rocksdb::{ + BlockBasedOptions, ColumnFamily, ColumnFamilyDescriptor, Options, ReadOptions, WriteBatch, WriteOptions, DB, }; -use interleaved_ordered::{interleave_ordered, InterleaveOrdered}; -use elastic_array::ElasticArray32; -use fs_swap::{swap, swap_nonatomic}; -use kvdb::{KeyValueDB, DBTransaction, DBValue, DBOp}; +use kvdb::{DBKeyValue, DBOp, DBTransaction, DBValue, KeyValueDB}; #[cfg(target_os = "linux")] use regex::Regex; #[cfg(target_os = "linux")] -use std::process::Command; -#[cfg(target_os = "linux")] use std::fs::File; #[cfg(target_os = "linux")] -use std::path::PathBuf; +use std::process::Command; -fn other_io_err(e: E) -> io::Error where E: Into> { +fn other_io_err(e: E) -> io::Error +where + E: Into>, +{ io::Error::new(io::ErrorKind::Other, e) } -const KB: usize = 1024; -const MB: usize = 1024 * KB; -const DB_DEFAULT_MEMORY_BUDGET_MB: usize = 128; - -enum KeyState { - Insert(DBValue), - Delete, +fn invalid_column(col: u32) -> io::Error { + other_io_err(format!("No such column family: {:?}", col)) } +// Used for memory budget. +type MiB = usize; + +const KB: usize = 1_024; +const MB: usize = 1_024 * KB; + +/// The default column memory budget in MiB. +pub const DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB: MiB = 128; + +/// The default memory budget in MiB. +pub const DB_DEFAULT_MEMORY_BUDGET_MB: MiB = 512; + /// Compaction profile for the database settings +/// Note, that changing these parameters may trigger +/// the compaction process of RocksDB on startup. +/// https://github.com/facebook/rocksdb/wiki/Leveled-Compaction#level_compaction_dynamic_level_bytes-is-true #[derive(Clone, Copy, PartialEq, Debug)] pub struct CompactionProfile { /// L0-L1 target file size + /// The minimum size should be calculated in accordance with the + /// number of levels and the expected size of the database. pub initial_file_size: u64, /// block size pub block_size: usize, - /// rate limiter for background flushes and compactions, bytes/sec, if any - pub write_rate_limit: Option, } impl Default for CompactionProfile { @@ -93,10 +80,12 @@ pub fn rotational_from_df_output(df_out: Vec) -> Option { str::from_utf8(df_out.as_slice()) .ok() // Get the drive name. - .and_then(|df_str| Regex::new(r"/dev/(sd[:alpha:]{1,2})") - .ok() - .and_then(|re| re.captures(df_str)) - .and_then(|captures| captures.get(1))) + .and_then(|df_str| { + Regex::new(r"/dev/(sd[:alpha:]{1,2})") + .ok() + .and_then(|re| re.captures(df_str)) + .and_then(|captures| captures.get(1)) + }) // Generate path e.g. /sys/block/sda/queue/rotational .map(|drive_path| { let mut p = PathBuf::from("/sys/block"); @@ -109,15 +98,13 @@ pub fn rotational_from_df_output(df_out: Vec) -> Option { impl CompactionProfile { /// Attempt to determine the best profile automatically, only Linux for now. #[cfg(target_os = "linux")] - pub fn auto(db_path: &Path) -> CompactionProfile { + pub fn auto>(db_path: P) -> CompactionProfile { use std::io::Read; let hdd_check_file = db_path + .as_ref() .to_str() .and_then(|path_str| Command::new("df").arg(path_str).output().ok()) - .and_then(|df_res| match df_res.status.success() { - true => Some(df_res.stdout), - false => None, - }) + .and_then(|df_res| if df_res.status.success() { Some(df_res.stdout) } else { None }) .and_then(rotational_from_df_output); // Read out the file and match compaction profile. if let Some(hdd_check) = hdd_check_file { @@ -125,9 +112,13 @@ impl CompactionProfile { let mut buffer = [0; 1]; if file.read_exact(&mut buffer).is_ok() { // 0 means not rotational. - if buffer == [48] { return Self::ssd(); } + if buffer == [48] { + return Self::ssd() + } // 1 means rotational. - if buffer == [49] { return Self::hdd(); } + if buffer == [49] { + return Self::hdd() + } } } } @@ -137,57 +128,104 @@ impl CompactionProfile { /// Just default for other platforms. #[cfg(not(target_os = "linux"))] - pub fn auto(_db_path: &Path) -> CompactionProfile { + pub fn auto>(_db_path: P) -> CompactionProfile { Self::default() } /// Default profile suitable for SSD storage pub fn ssd() -> CompactionProfile { - CompactionProfile { - initial_file_size: 64 * MB as u64, - block_size: 16 * KB, - write_rate_limit: None, - } + CompactionProfile { initial_file_size: 64 * MB as u64, block_size: 16 * KB } } /// Slow HDD compaction profile pub fn hdd() -> CompactionProfile { - CompactionProfile { - initial_file_size: 256 * MB as u64, - block_size: 64 * KB, - write_rate_limit: Some(16 * MB as u64), - } + CompactionProfile { initial_file_size: 256 * MB as u64, block_size: 64 * KB } } } /// Database configuration #[derive(Clone)] +#[non_exhaustive] pub struct DatabaseConfig { /// Max number of open files. pub max_open_files: i32, - /// Memory budget (in MiB) used for setting block cache size, write buffer size. - pub memory_budget: Option, - /// Compaction profile + /// Memory budget (in MiB) used for setting block cache size and + /// write buffer size for each column including the default one. + /// If the memory budget of a column is not specified, + /// `DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB` is used for that column. + pub memory_budget: HashMap, + /// Compaction profile. pub compaction: CompactionProfile, - /// Set number of columns - pub columns: Option, + /// Set number of columns. + /// + /// # Safety + /// + /// The number of columns must not be zero. + pub columns: u32, + /// Specify the maximum number of info/debug log files to be kept. + pub keep_log_file_num: i32, + /// Enable native RocksDB statistics. + /// Disabled by default. + /// + /// It can have a negative performance impact up to 10% according to + /// https://github.com/facebook/rocksdb/wiki/Statistics. + pub enable_statistics: bool, + /// Open the database as a secondary instance. + /// Specify a path for the secondary instance of the database. + /// Secondary instances are read-only and kept updated by tailing the rocksdb MANIFEST. + /// It is up to the user to call `catch_up_with_primary()` manually to update the secondary db. + /// Disabled by default. + /// + /// `max_open_files` is overridden to always equal `-1`. + /// May have a negative performance impact on the secondary instance + /// if the secondary instance reads and applies state changes before the primary instance compacts them. + /// More info: https://github.com/facebook/rocksdb/wiki/Secondary-instance + pub secondary: Option, + /// Limit the size (in bytes) of write ahead logs + /// More info: https://github.com/facebook/rocksdb/wiki/Write-Ahead-Log + pub max_total_wal_size: Option, + /// Creates a new database if no database exists. + /// Set to `true` by default for backwards compatibility. + pub create_if_missing: bool, } impl DatabaseConfig { /// Create new `DatabaseConfig` with default parameters and specified set of columns. /// Note that cache sizes must be explicitly set. - pub fn with_columns(columns: Option) -> Self { - let mut config = Self::default(); - config.columns = columns; - config + /// + /// # Safety + /// + /// The number of `columns` must not be zero. + pub fn with_columns(columns: u32) -> Self { + assert!(columns > 0, "the number of columns must not be zero"); + + Self { columns, ..Default::default() } + } + + /// Returns the total memory budget in bytes. + pub fn memory_budget(&self) -> MiB { + (0..self.columns) + .map(|i| self.memory_budget.get(&i).unwrap_or(&DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB) * MB) + .sum() } - pub fn memory_budget(&self) -> usize { - self.memory_budget.unwrap_or(DB_DEFAULT_MEMORY_BUDGET_MB) * MB + /// Returns the memory budget of the specified column in bytes. + fn memory_budget_for_col(&self, col: u32) -> MiB { + self.memory_budget.get(&col).unwrap_or(&DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB) * MB } - pub fn memory_budget_per_col(&self) -> usize { - self.memory_budget() / self.columns.unwrap_or(1) as usize + // Get column family configuration with the given block based options. + fn column_config(&self, block_opts: &BlockBasedOptions, col: u32) -> Options { + let column_mem_budget = self.memory_budget_for_col(col); + let mut opts = Options::default(); + + opts.set_level_compaction_dynamic_level_bytes(true); + opts.set_block_based_table_factory(block_opts); + opts.optimize_level_style_compaction(column_mem_budget); + opts.set_target_file_size_base(self.compaction.initial_file_size); + opts.set_compression_per_level(&[]); + + opts } } @@ -195,204 +233,183 @@ impl Default for DatabaseConfig { fn default() -> DatabaseConfig { DatabaseConfig { max_open_files: 512, - memory_budget: None, + memory_budget: HashMap::new(), compaction: CompactionProfile::default(), - columns: None, + columns: 1, + keep_log_file_num: 1, + enable_statistics: false, + secondary: None, + max_total_wal_size: None, + create_if_missing: true, } } } -/// Database iterator (for flushed data only) -// The compromise of holding only a virtual borrow vs. holding a lock on the -// inner DB (to prevent closing via restoration) may be re-evaluated in the future. -// -pub struct DatabaseIterator<'a> { - iter: InterleaveOrdered<::std::vec::IntoIter<(Box<[u8]>, Box<[u8]>)>, DBIterator>, - _marker: PhantomData<&'a Database>, -} - -impl<'a> Iterator for DatabaseIterator<'a> { - type Item = (Box<[u8]>, Box<[u8]>); - - fn next(&mut self) -> Option { - self.iter.next() - } -} - struct DBAndColumns { db: DB, - cfs: Vec, + column_names: Vec, } -// get column family configuration from database config. -fn col_config(config: &DatabaseConfig, block_opts: &BlockBasedOptions) -> io::Result { - let mut opts = Options::new(); - - opts.set_parsed_options("level_compaction_dynamic_level_bytes=true").map_err(other_io_err)?; - - opts.set_block_based_table_factory(block_opts); - - opts.set_parsed_options( - &format!("block_based_table_factory={{{};{}}}", - "cache_index_and_filter_blocks=true", - "pin_l0_filter_and_index_blocks_in_cache=true")).map_err(other_io_err)?; - - opts.optimize_level_style_compaction(config.memory_budget_per_col() as i32); - opts.set_target_file_size_base(config.compaction.initial_file_size); - - opts.set_parsed_options("compression_per_level=").map_err(other_io_err)?; - - Ok(opts) +impl DBAndColumns { + fn cf(&self, i: usize) -> io::Result<&ColumnFamily> { + let name = self.column_names.get(i).ok_or_else(|| invalid_column(i as u32))?; + self.db + .cf_handle(&name) + .ok_or_else(|| other_io_err(format!("invalid column name: {name}"))) + } } /// Key-Value database. pub struct Database { - db: RwLock>, + inner: DBAndColumns, config: DatabaseConfig, + opts: Options, write_opts: WriteOptions, read_opts: ReadOptions, block_opts: BlockBasedOptions, - path: String, - // Dirty values added with `write_buffered`. Cleaned on `flush`. - overlay: RwLock, KeyState>>>, - // Values currently being flushed. Cleared when `flush` completes. - flushing: RwLock, KeyState>>>, - // Prevents concurrent flushes. - // Value indicates if a flush is in progress. - flushing_lock: Mutex, + stats: stats::RunningDbStats, } -#[inline] -fn check_for_corruption>(path: P, res: result::Result) -> io::Result { - if let Err(ref s) = res { - if s.starts_with("Corruption:") { - warn!("DB corrupted: {}. Repair will be triggered on next restart", s); - let _ = fs::File::create(path.as_ref().join(Database::CORRUPTION_FILE_NAME)); - } +/// Generate the options for RocksDB, based on the given `DatabaseConfig`. +fn generate_options(config: &DatabaseConfig) -> Options { + let mut opts = Options::default(); + + opts.set_report_bg_io_stats(true); + if config.enable_statistics { + opts.enable_statistics(); } + opts.set_use_fsync(false); + opts.create_if_missing(config.create_if_missing); + if config.secondary.is_some() { + opts.set_max_open_files(-1) + } else { + opts.set_max_open_files(config.max_open_files); + } + opts.set_bytes_per_sync(1 * MB as u64); + opts.set_keep_log_file_num(1); + opts.increase_parallelism(cmp::max(1, num_cpus::get() as i32 / 2)); + if let Some(m) = config.max_total_wal_size { + opts.set_max_total_wal_size(m); + } + + opts +} - res.map_err(other_io_err) +fn generate_read_options() -> ReadOptions { + let mut read_opts = ReadOptions::default(); + read_opts.set_verify_checksums(false); + read_opts } -fn is_corrupted(s: &str) -> bool { - s.starts_with("Corruption:") || s.starts_with("Invalid argument: You have to open all column families") +/// Generate the block based options for RocksDB, based on the given `DatabaseConfig`. +fn generate_block_based_options(config: &DatabaseConfig) -> io::Result { + let mut block_opts = BlockBasedOptions::default(); + block_opts.set_block_size(config.compaction.block_size); + // See https://github.com/facebook/rocksdb/blob/a1523efcdf2f0e8133b9a9f6e170a0dad49f928f/include/rocksdb/table.h#L246-L271 for details on what the format versions are/do. + block_opts.set_format_version(5); + block_opts.set_block_restart_interval(16); + // Set cache size as recommended by + // https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning#block-cache-size + let cache_size = config.memory_budget() / 3; + if cache_size == 0 { + block_opts.disable_cache() + } else { + let cache = rocksdb::Cache::new_lru_cache(cache_size); + block_opts.set_block_cache(&cache); + // "index and filter blocks will be stored in block cache, together with all other data blocks." + // See: https://github.com/facebook/rocksdb/wiki/Memory-usage-in-RocksDB#indexes-and-filter-blocks + block_opts.set_cache_index_and_filter_blocks(true); + // Don't evict L0 filter/index blocks from the cache + block_opts.set_pin_l0_filter_and_index_blocks_in_cache(true); + } + block_opts.set_bloom_filter(10.0, true); + + Ok(block_opts) } impl Database { - const CORRUPTION_FILE_NAME: &'static str = "CORRUPTED"; + /// Open database file. + /// + /// # Safety + /// + /// The number of `config.columns` must not be zero. + pub fn open>(config: &DatabaseConfig, path: P) -> io::Result { + assert!(config.columns > 0, "the number of columns must not be zero"); + + let opts = generate_options(config); + let block_opts = generate_block_based_options(config)?; + + let column_names: Vec<_> = (0..config.columns).map(|c| format!("col{}", c)).collect(); + let write_opts = WriteOptions::default(); + let read_opts = generate_read_options(); + + let db = if let Some(secondary_path) = &config.secondary { + Self::open_secondary(&opts, path.as_ref(), secondary_path.as_ref(), column_names.as_slice())? + } else { + let column_names: Vec<&str> = column_names.iter().map(|s| s.as_str()).collect(); + Self::open_primary(&opts, path.as_ref(), config, column_names.as_slice(), &block_opts)? + }; - /// Open database with default settings. - pub fn open_default(path: &str) -> io::Result { - Database::open(&DatabaseConfig::default(), path) + Ok(Database { + inner: DBAndColumns { db, column_names }, + config: config.clone(), + opts, + read_opts, + write_opts, + block_opts, + stats: stats::RunningDbStats::new(), + }) } - /// Open database file. Creates if it does not exist. - pub fn open(config: &DatabaseConfig, path: &str) -> io::Result { - let mut opts = Options::new(); - - if let Some(rate_limit) = config.compaction.write_rate_limit { - opts.set_parsed_options(&format!("rate_limiter_bytes_per_sec={}", rate_limit)).map_err(other_io_err)?; - } - opts.set_use_fsync(false); - opts.create_if_missing(true); - opts.set_max_open_files(config.max_open_files); - opts.set_parsed_options("keep_log_file_num=1").map_err(other_io_err)?; - opts.set_parsed_options("bytes_per_sync=1048576").map_err(other_io_err)?; - opts.set_db_write_buffer_size(config.memory_budget_per_col() / 2); - opts.increase_parallelism(cmp::max(1, ::num_cpus::get() as i32 / 2)); - - let mut block_opts = BlockBasedOptions::new(); - - { - block_opts.set_block_size(config.compaction.block_size); - // Set cache size as recommended by - // https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning#block-cache-size - let cache_size = config.memory_budget() / 3; - let cache = Cache::new(cache_size); - block_opts.set_cache(cache); - } - - // attempt database repair if it has been previously marked as corrupted - let db_corrupted = Path::new(path).join(Database::CORRUPTION_FILE_NAME); - if db_corrupted.exists() { - warn!("DB has been previously marked as corrupted, attempting repair"); - DB::repair(&opts, path).map_err(other_io_err)?; - fs::remove_file(db_corrupted)?; - } - - let columns = config.columns.unwrap_or(0) as usize; - - let mut cf_options = Vec::with_capacity(columns); - let cfnames: Vec<_> = (0..columns).map(|c| format!("col{}", c)).collect(); - let cfnames: Vec<&str> = cfnames.iter().map(|n| n as &str).collect(); - - for _ in 0 .. config.columns.unwrap_or(0) { - cf_options.push(col_config(&config, &block_opts)?); - } - - let write_opts = WriteOptions::new(); - let mut read_opts = ReadOptions::new(); - read_opts.set_verify_checksums(false); - - let mut cfs: Vec = Vec::new(); - let db = match config.columns { - Some(_) => { - match DB::open_cf(&opts, path, &cfnames, &cf_options) { - Ok(db) => { - cfs = cfnames.iter().map(|n| db.cf_handle(n) - .expect("rocksdb opens a cf_handle for each cfname; qed")).collect(); - Ok(db) - } - Err(_) => { - // retry and create CFs - match DB::open_cf(&opts, path, &[], &[]) { - Ok(mut db) => { - cfs = cfnames.iter() - .enumerate() - .map(|(i, n)| db.create_cf(n, &cf_options[i])) - .collect::<::std::result::Result<_, _>>() - .map_err(other_io_err)?; - Ok(db) - }, - err => err, + /// Internal api to open a database in primary mode. + fn open_primary>( + opts: &Options, + path: P, + config: &DatabaseConfig, + column_names: &[&str], + block_opts: &BlockBasedOptions, + ) -> io::Result { + let cf_descriptors: Vec<_> = (0..config.columns) + .map(|i| ColumnFamilyDescriptor::new(column_names[i as usize], config.column_config(&block_opts, i))) + .collect(); + + let db = match DB::open_cf_descriptors(&opts, path.as_ref(), cf_descriptors) { + Err(_) => { + // retry and create CFs + match DB::open_cf(&opts, path.as_ref(), &[] as &[&str]) { + Ok(mut db) => { + for (i, name) in column_names.iter().enumerate() { + let _ = db + .create_cf(name, &config.column_config(&block_opts, i as u32)) + .map_err(other_io_err)?; } - } + Ok(db) + }, + err => err, } }, - None => DB::open(&opts, path) + ok => ok, }; - let db = match db { + Ok(match db { Ok(db) => db, - Err(ref s) if is_corrupted(s) => { - warn!("DB corrupted: {}, attempting repair", s); - DB::repair(&opts, path).map_err(other_io_err)?; - - match cfnames.is_empty() { - true => DB::open(&opts, path).map_err(other_io_err)?, - false => { - let db = DB::open_cf(&opts, path, &cfnames, &cf_options).map_err(other_io_err)?; - cfs = cfnames.iter().map(|n| db.cf_handle(n) - .expect("rocksdb opens a cf_handle for each cfname; qed")).collect(); - db - }, - } - }, - Err(s) => { - return Err(other_io_err(s)) - } - }; - let num_cols = cfs.len(); - Ok(Database { - db: RwLock::new(Some(DBAndColumns{ db: db, cfs: cfs })), - config: config.clone(), - write_opts: write_opts, - overlay: RwLock::new((0..(num_cols + 1)).map(|_| HashMap::new()).collect()), - flushing: RwLock::new((0..(num_cols + 1)).map(|_| HashMap::new()).collect()), - flushing_lock: Mutex::new(false), - path: path.to_owned(), - read_opts: read_opts, - block_opts: block_opts, + Err(s) => return Err(other_io_err(s)), + }) + } + + /// Internal api to open a database in secondary mode. + /// Secondary database needs a seperate path to store its own logs. + fn open_secondary>( + opts: &Options, + path: P, + secondary_path: P, + column_names: &[String], + ) -> io::Result { + let db = DB::open_cf_as_secondary(&opts, path.as_ref(), secondary_path.as_ref(), column_names); + + Ok(match db { + Ok(db) => db, + Err(s) => return Err(other_io_err(s)), }) } @@ -401,388 +418,323 @@ impl Database { DBTransaction::new() } - fn to_overlay_column(col: Option) -> usize { - col.map_or(0, |c| (c + 1) as usize) - } - /// Commit transaction to database. - pub fn write_buffered(&self, tr: DBTransaction) { - let mut overlay = self.overlay.write(); + pub fn write(&self, tr: DBTransaction) -> io::Result<()> { + let cfs = &self.inner; + let mut batch = WriteBatch::default(); let ops = tr.ops; + + self.stats.tally_writes(ops.len() as u64); + self.stats.tally_transactions(1); + + let mut stats_total_bytes = 0; + for op in ops { + let col = op.col(); + let cf = cfs.cf(col as usize)?; + match op { - DBOp::Insert { col, key, value } => { - let c = Self::to_overlay_column(col); - overlay[c].insert(key, KeyState::Insert(value)); + DBOp::Insert { col: _, key, value } => { + stats_total_bytes += key.len() + value.len(); + batch.put_cf(cf, &key, &value); }, - DBOp::Delete { col, key } => { - let c = Self::to_overlay_column(col); - overlay[c].insert(key, KeyState::Delete); + DBOp::Delete { col: _, key } => { + // We count deletes as writes. + stats_total_bytes += key.len(); + batch.delete_cf(cf, &key); }, - } - }; - } - - /// Commit buffered changes to database. Must be called under `flush_lock` - fn write_flushing_with_lock(&self, _lock: &mut MutexGuard) -> io::Result<()> { - match *self.db.read() { - Some(DBAndColumns { ref db, ref cfs }) => { - let batch = WriteBatch::new(); - mem::swap(&mut *self.overlay.write(), &mut *self.flushing.write()); - { - for (c, column) in self.flushing.read().iter().enumerate() { - for (key, state) in column.iter() { - match *state { - KeyState::Delete => { - if c > 0 { - batch.delete_cf(cfs[c - 1], key).map_err(other_io_err)?; - } else { - batch.delete(key).map_err(other_io_err)?; - } - }, - KeyState::Insert(ref value) => { - if c > 0 { - batch.put_cf(cfs[c - 1], key, value).map_err(other_io_err)?; - } else { - batch.put(key, value).map_err(other_io_err)?; - } - }, - } + DBOp::DeletePrefix { col, prefix } => { + let end_prefix = kvdb::end_prefix(&prefix[..]); + let no_end = end_prefix.is_none(); + let end_range = end_prefix.unwrap_or_else(|| vec![u8::max_value(); 16]); + batch.delete_range_cf(cf, &prefix[..], &end_range[..]); + if no_end { + let prefix = if prefix.len() > end_range.len() { &prefix[..] } else { &end_range[..] }; + for result in self.iter_with_prefix(col, prefix) { + let (key, _) = result?; + batch.delete_cf(cf, &key[..]); } } - } - - check_for_corruption( - &self.path, - db.write_opt(batch, &self.write_opts))?; - - for column in self.flushing.write().iter_mut() { - column.clear(); - column.shrink_to_fit(); - } - Ok(()) - }, - None => Err(other_io_err("Database is closed")) + }, + }; } - } + self.stats.tally_bytes_written(stats_total_bytes as u64); - /// Commit buffered changes to database. - pub fn flush(&self) -> io::Result<()> { - let mut lock = self.flushing_lock.lock(); - // If RocksDB batch allocation fails the thread gets terminated and the lock is released. - // The value inside the lock is used to detect that. - if *lock { - // This can only happen if another flushing thread is terminated unexpectedly. - return Err(other_io_err("Database write failure. Running low on memory perhaps?")) - } - *lock = true; - let result = self.write_flushing_with_lock(&mut lock); - *lock = false; - result + cfs.db.write_opt(batch, &self.write_opts).map_err(other_io_err) } - /// Commit transaction to database. - pub fn write(&self, tr: DBTransaction) -> io::Result<()> { - match *self.db.read() { - Some(DBAndColumns { ref db, ref cfs }) => { - let batch = WriteBatch::new(); - let ops = tr.ops; - for op in ops { - // remove any buffered operation for this key - self.overlay.write()[Self::to_overlay_column(op.col())].remove(op.key()); - - match op { - DBOp::Insert { col, key, value } => match col { - None => batch.put(&key, &value).map_err(other_io_err)?, - Some(c) => batch.put_cf(cfs[c as usize], &key, &value).map_err(other_io_err)?, - }, - DBOp::Delete { col, key } => match col { - None => batch.delete(&key).map_err(other_io_err)?, - Some(c) => batch.delete_cf(cfs[c as usize], &key).map_err(other_io_err)?, - } - } - } + /// Get value by key. + pub fn get(&self, col: u32, key: &[u8]) -> io::Result> { + let cfs = &self.inner; + let cf = cfs.cf(col as usize)?; + self.stats.tally_reads(1); + let value = cfs + .db + .get_pinned_cf_opt(cf, key, &self.read_opts) + .map(|r| r.map(|v| v.to_vec())) + .map_err(other_io_err); + + match value { + Ok(Some(ref v)) => self.stats.tally_bytes_read((key.len() + v.len()) as u64), + Ok(None) => self.stats.tally_bytes_read(key.len() as u64), + _ => {}, + }; - check_for_corruption(&self.path, db.write_opt(batch, &self.write_opts)) - }, - None => Err(other_io_err("Database is closed")), - } + value } - /// Get value by key. - pub fn get(&self, col: Option, key: &[u8]) -> io::Result> { - match *self.db.read() { - Some(DBAndColumns { ref db, ref cfs }) => { - let overlay = &self.overlay.read()[Self::to_overlay_column(col)]; - match overlay.get(key) { - Some(&KeyState::Insert(ref value)) => Ok(Some(value.clone())), - Some(&KeyState::Delete) => Ok(None), - None => { - let flushing = &self.flushing.read()[Self::to_overlay_column(col)]; - match flushing.get(key) { - Some(&KeyState::Insert(ref value)) => Ok(Some(value.clone())), - Some(&KeyState::Delete) => Ok(None), - None => { - col.map_or_else( - || db.get_opt(key, &self.read_opts).map(|r| r.map(|v| DBValue::from_slice(&v))), - |c| db.get_cf_opt(cfs[c as usize], key, &self.read_opts).map(|r| r.map(|v| DBValue::from_slice(&v)))) - .map_err(other_io_err) - }, - } - }, - } - }, - None => Ok(None), - } + /// Get value by partial key. Prefix size should match configured prefix size. + pub fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> io::Result> { + self.iter_with_prefix(col, prefix) + .next() + .transpose() + .map(|m| m.map(|(_k, v)| v)) } - /// Get value by partial key. Prefix size should match configured prefix size. Only searches flushed values. - // TODO: support prefix seek for unflushed data - pub fn get_by_prefix(&self, col: Option, prefix: &[u8]) -> Option> { - self.iter_from_prefix(col, prefix).and_then(|mut iter| { - match iter.next() { - // TODO: use prefix_same_as_start read option (not availabele in C API currently) - Some((k, v)) => if k[0 .. prefix.len()] == prefix[..] { Some(v) } else { None }, - _ => None - } - }) + /// Iterator over the data in the given database column index. + /// Will hold a lock until the iterator is dropped + /// preventing the database from being closed. + pub fn iter<'a>(&'a self, col: u32) -> impl Iterator> + 'a { + let read_opts = generate_read_options(); + iter::IterationHandler::iter(&self.inner, col, read_opts) } - /// Get database iterator for flushed data. - pub fn iter(&self, col: Option) -> Option { - match *self.db.read() { - Some(DBAndColumns { ref db, ref cfs }) => { - let overlay = &self.overlay.read()[Self::to_overlay_column(col)]; - let mut overlay_data = overlay.iter() - .filter_map(|(k, v)| match *v { - KeyState::Insert(ref value) => - Some((k.clone().into_vec().into_boxed_slice(), value.clone().into_vec().into_boxed_slice())), - KeyState::Delete => None, - }).collect::>(); - overlay_data.sort(); - - let iter = col.map_or_else( - || db.iterator_opt(IteratorMode::Start, &self.read_opts), - |c| db.iterator_cf_opt(cfs[c as usize], IteratorMode::Start, &self.read_opts) - .expect("iterator params are valid; qed") - ); - - Some(DatabaseIterator { - iter: interleave_ordered(overlay_data, iter), - _marker: PhantomData, - }) - }, - None => None, + /// Iterator over data in the `col` database column index matching the given prefix. + /// Will hold a lock until the iterator is dropped + /// preventing the database from being closed. + fn iter_with_prefix<'a>(&'a self, col: u32, prefix: &'a [u8]) -> impl Iterator> + 'a { + let mut read_opts = generate_read_options(); + // rocksdb doesn't work with an empty upper bound + if let Some(end_prefix) = kvdb::end_prefix(prefix) { + read_opts.set_iterate_upper_bound(end_prefix); } + iter::IterationHandler::iter_with_prefix(&self.inner, col, prefix, read_opts) } - fn iter_from_prefix(&self, col: Option, prefix: &[u8]) -> Option { - match *self.db.read() { - Some(DBAndColumns { ref db, ref cfs }) => { - let iter = col.map_or_else(|| db.iterator_opt(IteratorMode::From(prefix, Direction::Forward), &self.read_opts), - |c| db.iterator_cf_opt(cfs[c as usize], IteratorMode::From(prefix, Direction::Forward), &self.read_opts) - .expect("iterator params are valid; qed")); - - Some(DatabaseIterator { - iter: interleave_ordered(Vec::new(), iter), - _marker: PhantomData, - }) - }, - None => None, - } + /// The number of column families in the db. + pub fn num_columns(&self) -> u32 { + self.inner.column_names.len() as u32 } - /// Close the database - fn close(&self) { - *self.db.write() = None; - self.overlay.write().clear(); - self.flushing.write().clear(); + /// The number of keys in a column (estimated). + pub fn num_keys(&self, col: u32) -> io::Result { + const ESTIMATE_NUM_KEYS: &str = "rocksdb.estimate-num-keys"; + let cfs = &self.inner; + let cf = cfs.cf(col as usize)?; + match cfs.db.property_int_value_cf(cf, ESTIMATE_NUM_KEYS) { + Ok(estimate) => Ok(estimate.unwrap_or_default()), + Err(err_string) => Err(other_io_err(err_string)), + } } - /// Restore the database from a copy at given path. - pub fn restore(&self, new_db: &str) -> io::Result<()> { - self.close(); - - // swap is guaranteed to be atomic - match swap(new_db, &self.path) { - Ok(_) => { - // ignore errors - let _ = fs::remove_dir_all(new_db); - }, - Err(err) => { - debug!("DB atomic swap failed: {}", err); - match swap_nonatomic(new_db, &self.path) { - Ok(_) => { - // ignore errors - let _ = fs::remove_dir_all(new_db); - }, - Err(err) => { - warn!("Failed to swap DB directories: {:?}", err); - return Err(io::Error::new(io::ErrorKind::Other, "DB restoration failed: could not swap DB directories")); - } - } - } + /// Remove the last column family in the database. The deletion is definitive. + pub fn remove_last_column(&mut self) -> io::Result<()> { + let DBAndColumns { ref mut db, ref mut column_names } = self.inner; + if let Some(name) = column_names.pop() { + db.drop_cf(&name).map_err(other_io_err)?; } + Ok(()) + } - // reopen the database and steal handles into self - let db = Self::open(&self.config, &self.path)?; - *self.db.write() = mem::replace(&mut *db.db.write(), None); - *self.overlay.write() = mem::replace(&mut *db.overlay.write(), Vec::new()); - *self.flushing.write() = mem::replace(&mut *db.flushing.write(), Vec::new()); + /// Add a new column family to the DB. + pub fn add_column(&mut self) -> io::Result<()> { + let DBAndColumns { ref mut db, ref mut column_names } = self.inner; + let col = column_names.len() as u32; + let name = format!("col{}", col); + let col_config = self.config.column_config(&self.block_opts, col as u32); + let _ = db.create_cf(&name, &col_config).map_err(other_io_err)?; + column_names.push(name); Ok(()) } - /// The number of non-default column families. - pub fn num_columns(&self) -> u32 { - self.db.read().as_ref() - .and_then(|db| if db.cfs.is_empty() { None } else { Some(db.cfs.len()) } ) - .map(|n| n as u32) - .unwrap_or(0) - } - - /// Drop a column family. - pub fn drop_column(&self) -> io::Result<()> { - match *self.db.write() { - Some(DBAndColumns { ref mut db, ref mut cfs }) => { - if let Some(col) = cfs.pop() { - let name = format!("col{}", cfs.len()); - drop(col); - db.drop_cf(&name).map_err(other_io_err)?; - } - Ok(()) - }, - None => Ok(()), + /// Get RocksDB statistics. + pub fn get_statistics(&self) -> HashMap { + if let Some(stats) = self.opts.get_statistics() { + stats::parse_rocksdb_stats(&stats) + } else { + HashMap::new() } } - /// Add a column family. - pub fn add_column(&self) -> io::Result<()> { - match *self.db.write() { - Some(DBAndColumns { ref mut db, ref mut cfs }) => { - let col = cfs.len() as u32; - let name = format!("col{}", col); - cfs.push(db.create_cf(&name, &col_config(&self.config, &self.block_opts)?).map_err(other_io_err)?); - Ok(()) - }, - None => Ok(()), - } + /// Try to catch up a secondary instance with + /// the primary by reading as much from the logs as possible. + /// + /// Guaranteed to have changes up to the the time that `try_catch_up_with_primary` is called + /// if it finishes succesfully. + /// + /// Blocks until the MANIFEST file and any state changes in the corresponding Write-Ahead-Logs + /// are applied to the secondary instance. If the manifest files are very large + /// this method could take a long time. + /// + /// If Write-Ahead-Logs have been purged by the primary instance before the secondary + /// is able to open them, the secondary will not be caught up + /// until this function is called again and new Write-Ahead-Logs are identified. + /// + /// If called while the primary is writing, the catch-up may fail. + /// + /// If the secondary is unable to catch up because of missing logs, + /// this method fails silently and no error is returned. + /// + /// Calling this as primary will return an error. + pub fn try_catch_up_with_primary(&self) -> io::Result<()> { + self.inner.db.try_catch_up_with_primary().map_err(other_io_err) } } // duplicate declaration of methods here to avoid trait import in certain existing cases // at time of addition. impl KeyValueDB for Database { - fn get(&self, col: Option, key: &[u8]) -> io::Result> { + fn get(&self, col: u32, key: &[u8]) -> io::Result> { Database::get(self, col, key) } - fn get_by_prefix(&self, col: Option, prefix: &[u8]) -> Option> { + fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> io::Result> { Database::get_by_prefix(self, col, prefix) } - fn write_buffered(&self, transaction: DBTransaction) { - Database::write_buffered(self, transaction) - } - fn write(&self, transaction: DBTransaction) -> io::Result<()> { Database::write(self, transaction) } - fn flush(&self) -> io::Result<()> { - Database::flush(self) - } - - fn iter<'a>(&'a self, col: Option) -> Box, Box<[u8]>)> + 'a> { + fn iter<'a>(&'a self, col: u32) -> Box> + 'a> { let unboxed = Database::iter(self, col); - Box::new(unboxed.into_iter().flat_map(|inner| inner)) + Box::new(unboxed.into_iter()) } - fn iter_from_prefix<'a>(&'a self, col: Option, prefix: &'a [u8]) - -> Box, Box<[u8]>)> + 'a> - { - let unboxed = Database::iter_from_prefix(self, col, prefix); - Box::new(unboxed.into_iter().flat_map(|inner| inner)) + fn iter_with_prefix<'a>( + &'a self, + col: u32, + prefix: &'a [u8], + ) -> Box> + 'a> { + let unboxed = Database::iter_with_prefix(self, col, prefix); + Box::new(unboxed.into_iter()) } - fn restore(&self, new_db: &str) -> io::Result<()> { - Database::restore(self, new_db) - } -} + fn io_stats(&self, kind: kvdb::IoStatsKind) -> kvdb::IoStats { + let rocksdb_stats = self.get_statistics(); + let cache_hit_count = rocksdb_stats.get("block.cache.hit").map(|s| s.count).unwrap_or(0u64); + let overall_stats = self.stats.overall(); + let old_cache_hit_count = overall_stats.raw.cache_hit_count; + + self.stats.tally_cache_hit_count(cache_hit_count - old_cache_hit_count); + + let taken_stats = match kind { + kvdb::IoStatsKind::Overall => self.stats.overall(), + kvdb::IoStatsKind::SincePrevious => self.stats.since_previous(), + }; + + let mut stats = kvdb::IoStats::empty(); + + stats.reads = taken_stats.raw.reads; + stats.writes = taken_stats.raw.writes; + stats.transactions = taken_stats.raw.transactions; + stats.bytes_written = taken_stats.raw.bytes_written; + stats.bytes_read = taken_stats.raw.bytes_read; + stats.cache_reads = taken_stats.raw.cache_hit_count; + stats.started = taken_stats.started; + stats.span = taken_stats.started.elapsed(); -impl Drop for Database { - fn drop(&mut self) { - // write all buffered changes if we can. - let _ = self.flush(); + stats } } #[cfg(test)] mod tests { - extern crate tempdir; - - use std::str::FromStr; - use self::tempdir::TempDir; - use ethereum_types::H256; use super::*; + use kvdb_shared_tests as st; + use std::io::{self, Read}; + use tempfile::Builder as TempfileBuilder; - fn test_db(config: &DatabaseConfig) { - let tempdir = TempDir::new("").unwrap(); - let db = Database::open(config, tempdir.path().to_str().unwrap()).unwrap(); - let key1 = H256::from_str("02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); - let key2 = H256::from_str("03c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); - let key3 = H256::from_str("01c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); + fn create(columns: u32) -> io::Result { + let tempdir = TempfileBuilder::new().prefix("").tempdir()?; + let config = DatabaseConfig::with_columns(columns); + Database::open(&config, tempdir.path().to_str().expect("tempdir path is valid unicode")) + } - let mut batch = db.transaction(); - batch.put(None, key1.as_bytes(), b"cat"); - batch.put(None, key2.as_bytes(), b"dog"); - db.write(batch).unwrap(); + #[test] + fn get_fails_with_non_existing_column() -> io::Result<()> { + let db = create(1)?; + st::test_get_fails_with_non_existing_column(&db) + } - assert_eq!(&*db.get(None, key1.as_bytes()).unwrap().unwrap(), b"cat"); + #[test] + fn put_and_get() -> io::Result<()> { + let db = create(1)?; + st::test_put_and_get(&db) + } - let contents: Vec<_> = db.iter(None).into_iter().flat_map(|inner| inner).collect(); - assert_eq!(contents.len(), 2); - assert_eq!(&*contents[0].0, key1.as_bytes()); - assert_eq!(&*contents[0].1, b"cat"); - assert_eq!(&*contents[1].0, key2.as_bytes()); - assert_eq!(&*contents[1].1, b"dog"); + #[test] + fn delete_and_get() -> io::Result<()> { + let db = create(1)?; + st::test_delete_and_get(&db) + } - let mut batch = db.transaction(); - batch.delete(None, key1.as_bytes()); - db.write(batch).unwrap(); + #[test] + fn delete_prefix() -> io::Result<()> { + let db = create(st::DELETE_PREFIX_NUM_COLUMNS)?; + st::test_delete_prefix(&db) + } + + #[test] + fn iter() -> io::Result<()> { + let db = create(1)?; + st::test_iter(&db) + } - assert!(db.get(None, key1.as_bytes()).unwrap().is_none()); + #[test] + fn iter_with_prefix() -> io::Result<()> { + let db = create(1)?; + st::test_iter_with_prefix(&db) + } - let mut batch = db.transaction(); - batch.put(None, key1.as_bytes(), b"cat"); - db.write(batch).unwrap(); + #[test] + fn complex() -> io::Result<()> { + let db = create(1)?; + st::test_complex(&db) + } - let mut transaction = db.transaction(); - transaction.put(None, key3.as_bytes(), b"elephant"); - transaction.delete(None, key1.as_bytes()); - db.write(transaction).unwrap(); - assert!(db.get(None, key1.as_bytes()).unwrap().is_none()); - assert_eq!(&*db.get(None, key3.as_bytes()).unwrap().unwrap(), b"elephant"); + #[test] + fn stats() -> io::Result<()> { + let db = create(st::IO_STATS_NUM_COLUMNS)?; + st::test_io_stats(&db) + } - assert_eq!(&*db.get_by_prefix(None, key3.as_bytes()).unwrap(), b"elephant"); - assert_eq!(&*db.get_by_prefix(None, key2.as_bytes()).unwrap(), b"dog"); + #[test] + fn secondary_db_get() -> io::Result<()> { + let primary = TempfileBuilder::new().prefix("").tempdir()?; + let secondary = TempfileBuilder::new().prefix("").tempdir()?; + let config = DatabaseConfig::with_columns(1); + let db = Database::open(&config, primary.path()).unwrap(); + let key1 = b"key1"; let mut transaction = db.transaction(); - transaction.put(None, key1.as_bytes(), b"horse"); - transaction.delete(None, key3.as_bytes()); - db.write_buffered(transaction); - assert!(db.get(None, key3.as_bytes()).unwrap().is_none()); - assert_eq!(&*db.get(None, key1.as_bytes()).unwrap().unwrap(), b"horse"); + transaction.put(0, key1, b"horse"); + db.write(transaction)?; - db.flush().unwrap(); - assert!(db.get(None, key3.as_bytes()).unwrap().is_none()); - assert_eq!(&*db.get(None, key1.as_bytes()).unwrap().unwrap(), b"horse"); + let config = DatabaseConfig { secondary: Some(secondary.path().to_owned()), ..DatabaseConfig::with_columns(1) }; + let second_db = Database::open(&config, primary.path()).unwrap(); + assert_eq!(&*second_db.get(0, key1)?.unwrap(), b"horse"); + Ok(()) } #[test] - fn kvdb() { - let tempdir = TempDir::new("").unwrap(); - let _ = Database::open_default(tempdir.path().to_str().unwrap()).unwrap(); - test_db(&DatabaseConfig::default()); + fn secondary_db_catch_up() -> io::Result<()> { + let primary = TempfileBuilder::new().prefix("").tempdir()?; + let secondary = TempfileBuilder::new().prefix("").tempdir()?; + let config = DatabaseConfig::with_columns(1); + let db = Database::open(&config, primary.path()).unwrap(); + + let config = DatabaseConfig { secondary: Some(secondary.path().to_owned()), ..DatabaseConfig::with_columns(1) }; + let second_db = Database::open(&config, primary.path()).unwrap(); + + let mut transaction = db.transaction(); + transaction.put(0, b"key1", b"mule"); + transaction.put(0, b"key2", b"cat"); + db.write(transaction)?; + + second_db.try_catch_up_with_primary()?; + assert_eq!(&*second_db.get(0, b"key2")?.unwrap(), b"cat"); + Ok(()) } #[test] @@ -790,26 +742,45 @@ mod tests { fn df_to_rotational() { use std::path::PathBuf; // Example df output. - let example_df = vec![70, 105, 108, 101, 115, 121, 115, 116, 101, 109, 32, 32, 32, 32, 32, 49, 75, 45, 98, 108, 111, 99, 107, 115, 32, 32, 32, 32, 32, 85, 115, 101, 100, 32, 65, 118, 97, 105, 108, 97, 98, 108, 101, 32, 85, 115, 101, 37, 32, 77, 111, 117, 110, 116, 101, 100, 32, 111, 110, 10, 47, 100, 101, 118, 47, 115, 100, 97, 49, 32, 32, 32, 32, 32, 32, 32, 54, 49, 52, 48, 57, 51, 48, 48, 32, 51, 56, 56, 50, 50, 50, 51, 54, 32, 32, 49, 57, 52, 52, 52, 54, 49, 54, 32, 32, 54, 55, 37, 32, 47, 10]; + let example_df = vec![ + 70, 105, 108, 101, 115, 121, 115, 116, 101, 109, 32, 32, 32, 32, 32, 49, 75, 45, 98, 108, 111, 99, 107, + 115, 32, 32, 32, 32, 32, 85, 115, 101, 100, 32, 65, 118, 97, 105, 108, 97, 98, 108, 101, 32, 85, 115, 101, + 37, 32, 77, 111, 117, 110, 116, 101, 100, 32, 111, 110, 10, 47, 100, 101, 118, 47, 115, 100, 97, 49, 32, + 32, 32, 32, 32, 32, 32, 54, 49, 52, 48, 57, 51, 48, 48, 32, 51, 56, 56, 50, 50, 50, 51, 54, 32, 32, 49, 57, + 52, 52, 52, 54, 49, 54, 32, 32, 54, 55, 37, 32, 47, 10, + ]; let expected_output = Some(PathBuf::from("/sys/block/sda/queue/rotational")); assert_eq!(rotational_from_df_output(example_df), expected_output); } + #[test] + #[should_panic] + fn db_config_with_zero_columns() { + let _cfg = DatabaseConfig::with_columns(0); + } + + #[test] + #[should_panic] + fn open_db_with_zero_columns() { + let cfg = DatabaseConfig { columns: 0, ..Default::default() }; + let _db = Database::open(&cfg, ""); + } + #[test] fn add_columns() { - let config = DatabaseConfig::default(); - let config_5 = DatabaseConfig::with_columns(Some(5)); + let config_1 = DatabaseConfig::default(); + let config_5 = DatabaseConfig::with_columns(5); - let tempdir = TempDir::new("").unwrap(); + let tempdir = TempfileBuilder::new().prefix("").tempdir().unwrap(); - // open empty, add 5. + // open 1, add 4. { - let db = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap(); - assert_eq!(db.num_columns(), 0); + let mut db = Database::open(&config_1, tempdir.path().to_str().unwrap()).unwrap(); + assert_eq!(db.num_columns(), 1); - for i in 0..5 { + for i in 2..=5 { db.add_column().unwrap(); - assert_eq!(db.num_columns(), i + 1); + assert_eq!(db.num_columns(), i); } } @@ -821,44 +792,161 @@ mod tests { } #[test] - fn drop_columns() { - let config = DatabaseConfig::default(); - let config_5 = DatabaseConfig::with_columns(Some(5)); + fn remove_columns() { + let config_1 = DatabaseConfig::default(); + let config_5 = DatabaseConfig::with_columns(5); - let tempdir = TempDir::new("").unwrap(); + let tempdir = TempfileBuilder::new().prefix("drop_columns").tempdir().unwrap(); - // open 5, remove all. + // open 5, remove 4. { - let db = Database::open(&config_5, tempdir.path().to_str().unwrap()).unwrap(); + let mut db = Database::open(&config_5, tempdir.path()).expect("open with 5 columns"); assert_eq!(db.num_columns(), 5); - for i in (0..5).rev() { - db.drop_column().unwrap(); + for i in (1..5).rev() { + db.remove_last_column().unwrap(); assert_eq!(db.num_columns(), i); } } - // reopen as 0. + // reopen as 1. { - let db = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap(); - assert_eq!(db.num_columns(), 0); + let db = Database::open(&config_1, tempdir.path().to_str().unwrap()).unwrap(); + assert_eq!(db.num_columns(), 1); } } #[test] - fn write_clears_buffered_ops() { - let tempdir = TempDir::new("").unwrap(); - let config = DatabaseConfig::default(); - let db = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap(); + fn test_num_keys() { + let tempdir = TempfileBuilder::new().prefix("").tempdir().unwrap(); + let config = DatabaseConfig::with_columns(1); + let db = Database::open(&config, tempdir.path()).unwrap(); + assert_eq!(db.num_keys(0).unwrap(), 0, "database is empty after creation"); + let key1 = b"beef"; let mut batch = db.transaction(); - batch.put(None, b"foo", b"bar"); - db.write_buffered(batch); - - let mut batch = db.transaction(); - batch.put(None, b"foo", b"baz"); + batch.put(0, key1, key1); db.write(batch).unwrap(); + assert_eq!(db.num_keys(0).unwrap(), 1, "adding a key increases the count"); + } + + #[test] + fn default_memory_budget() { + let c = DatabaseConfig::default(); + assert_eq!(c.columns, 1); + assert_eq!(c.memory_budget(), DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB * MB, "total memory budget is default"); + assert_eq!( + c.memory_budget_for_col(0), + DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB * MB, + "total memory budget for column 0 is the default" + ); + assert_eq!( + c.memory_budget_for_col(999), + DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB * MB, + "total memory budget for any column is the default" + ); + } + + #[test] + fn memory_budget() { + let mut c = DatabaseConfig::with_columns(3); + c.memory_budget = [(0, 10), (1, 15), (2, 20)].iter().cloned().collect(); + assert_eq!(c.memory_budget(), 45 * MB, "total budget is the sum of the column budget"); + } + + #[test] + fn test_stats_parser() { + let raw = r#"rocksdb.row.cache.hit COUNT : 1 +rocksdb.db.get.micros P50 : 2.000000 P95 : 3.000000 P99 : 4.000000 P100 : 5.000000 COUNT : 0 SUM : 15 +"#; + let stats = stats::parse_rocksdb_stats(raw); + assert_eq!(stats["row.cache.hit"].count, 1); + assert!(stats["row.cache.hit"].times.is_none()); + assert_eq!(stats["db.get.micros"].count, 0); + let get_times = stats["db.get.micros"].times.unwrap(); + assert_eq!(get_times.sum, 15); + assert_eq!(get_times.p50, 2.0); + assert_eq!(get_times.p95, 3.0); + assert_eq!(get_times.p99, 4.0); + assert_eq!(get_times.p100, 5.0); + } - assert_eq!(db.get(None, b"foo").unwrap().unwrap().as_ref(), b"baz"); + #[test] + fn rocksdb_settings() { + const NUM_COLS: usize = 2; + let mut cfg = DatabaseConfig { enable_statistics: true, ..DatabaseConfig::with_columns(NUM_COLS as u32) }; + cfg.max_open_files = 123; // is capped by the OS fd limit (typically 1024) + cfg.compaction.block_size = 323232; + cfg.compaction.initial_file_size = 102030; + cfg.memory_budget = [(0, 30), (1, 300)].iter().cloned().collect(); + + let db_path = TempfileBuilder::new() + .prefix("config_test") + .tempdir() + .expect("the OS can create tmp dirs"); + let db = Database::open(&cfg, db_path.path()).expect("can open a db"); + let statistics = db.get_statistics(); + assert!(statistics.contains_key("block.cache.hit")); + drop(db); + + let mut rocksdb_log = std::fs::File::open(format!("{}/LOG", db_path.path().to_str().unwrap())) + .expect("rocksdb creates a LOG file"); + let mut settings = String::new(); + rocksdb_log.read_to_string(&mut settings).unwrap(); + // Check column count + assert!(settings.contains("Options for column family [default]"), "no default col"); + assert!(settings.contains("Options for column family [col0]"), "no col0"); + assert!(settings.contains("Options for column family [col1]"), "no col1"); + + // Check max_open_files + assert!(settings.contains("max_open_files: 123")); + + // Check block size + assert!(settings.contains(" block_size: 323232")); + + // LRU cache (default column) + assert!(settings.contains("block_cache_options:\n capacity : 115343360")); + // LRU cache for non-default columns is ⅓ of memory budget (including default column) + let lru_size = (330 * MB) / 3; + let needle = format!("block_cache_options:\n capacity : {}", lru_size); + let lru = settings.match_indices(&needle).collect::>().len(); + assert_eq!(lru, NUM_COLS); + + // Index/filters share cache + let include_indexes = settings.matches("cache_index_and_filter_blocks: 1").collect::>().len(); + assert_eq!(include_indexes, NUM_COLS); + // Pin index/filters on L0 + let pins = settings + .matches("pin_l0_filter_and_index_blocks_in_cache: 1") + .collect::>() + .len(); + assert_eq!(pins, NUM_COLS); + + // Check target file size, aka initial file size + let l0_sizes = settings.matches("target_file_size_base: 102030").collect::>().len(); + assert_eq!(l0_sizes, NUM_COLS); + // The default column uses the default of 64Mb regardless of the setting. + assert!(settings.contains("target_file_size_base: 67108864")); + + // Check compression settings + let snappy_compression = settings.matches("Options.compression: Snappy").collect::>().len(); + // All columns use Snappy + assert_eq!(snappy_compression, NUM_COLS + 1); + // …even for L7 + let snappy_bottommost = settings + .matches("Options.bottommost_compression: Disabled") + .collect::>() + .len(); + assert_eq!(snappy_bottommost, NUM_COLS + 1); + + // 7 levels + let levels = settings.matches("Options.num_levels: 7").collect::>().len(); + assert_eq!(levels, NUM_COLS + 1); + + // Don't fsync every store + assert!(settings.contains("Options.use_fsync: 0")); + + // We're using the new format + assert!(settings.contains("format_version: 5")); } } diff --git a/kvdb-rocksdb/src/stats.rs b/kvdb-rocksdb/src/stats.rs new file mode 100644 index 000000000..ca7c4888d --- /dev/null +++ b/kvdb-rocksdb/src/stats.rs @@ -0,0 +1,196 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use parking_lot::RwLock; +use std::{ + collections::HashMap, + str::FromStr, + sync::atomic::{AtomicU64, Ordering as AtomicOrdering}, + time::Instant, +}; + +#[derive(Default, Clone, Copy)] +pub struct RawDbStats { + pub reads: u64, + pub writes: u64, + pub bytes_written: u64, + pub bytes_read: u64, + pub transactions: u64, + pub cache_hit_count: u64, +} + +#[derive(Default, Debug, Clone, Copy)] +pub struct RocksDbStatsTimeValue { + /// 50% percentile + pub p50: f64, + /// 95% percentile + pub p95: f64, + /// 99% percentile + pub p99: f64, + /// 100% percentile + pub p100: f64, + pub sum: u64, +} + +#[derive(Default, Debug, Clone, Copy)] +pub struct RocksDbStatsValue { + pub count: u64, + pub times: Option, +} + +pub fn parse_rocksdb_stats(stats: &str) -> HashMap { + stats.lines().map(|line| parse_rocksdb_stats_row(line.splitn(2, ' '))).collect() +} + +fn parse_rocksdb_stats_row<'a>(mut iter: impl Iterator) -> (String, RocksDbStatsValue) { + const PROOF: &str = "rocksdb statistics format is valid and hasn't changed"; + const SEPARATOR: &str = " : "; + let key = iter.next().expect(PROOF).trim_start_matches("rocksdb.").to_owned(); + let values = iter.next().expect(PROOF); + let value = if values.starts_with("COUNT") { + // rocksdb.row.cache.hit COUNT : 0 + RocksDbStatsValue { + count: u64::from_str(values.rsplit(SEPARATOR).next().expect(PROOF)).expect(PROOF), + times: None, + } + } else { + // rocksdb.db.get.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0 + let values: Vec<&str> = values.split_whitespace().filter(|s| *s != ":").collect(); + let times = RocksDbStatsTimeValue { + p50: f64::from_str(values.get(1).expect(PROOF)).expect(PROOF), + p95: f64::from_str(values.get(3).expect(PROOF)).expect(PROOF), + p99: f64::from_str(values.get(5).expect(PROOF)).expect(PROOF), + p100: f64::from_str(values.get(7).expect(PROOF)).expect(PROOF), + sum: u64::from_str(values.get(11).expect(PROOF)).expect(PROOF), + }; + RocksDbStatsValue { count: u64::from_str(values.get(9).expect(PROOF)).expect(PROOF), times: Some(times) } + }; + (key, value) +} + +impl RawDbStats { + fn combine(&self, other: &RawDbStats) -> Self { + RawDbStats { + reads: self.reads + other.reads, + writes: self.writes + other.writes, + bytes_written: self.bytes_written + other.bytes_written, + bytes_read: self.bytes_read + other.bytes_written, + transactions: self.transactions + other.transactions, + cache_hit_count: self.cache_hit_count + other.cache_hit_count, + } + } +} + +struct OverallDbStats { + stats: RawDbStats, + last_taken: Instant, + started: Instant, +} + +impl OverallDbStats { + fn new() -> Self { + OverallDbStats { stats: RawDbStats::default(), last_taken: Instant::now(), started: Instant::now() } + } +} + +pub struct RunningDbStats { + reads: AtomicU64, + writes: AtomicU64, + bytes_written: AtomicU64, + bytes_read: AtomicU64, + transactions: AtomicU64, + cache_hit_count: AtomicU64, + overall: RwLock, +} + +pub struct TakenDbStats { + pub raw: RawDbStats, + pub started: Instant, +} + +impl RunningDbStats { + pub fn new() -> Self { + Self { + reads: 0.into(), + bytes_read: 0.into(), + writes: 0.into(), + bytes_written: 0.into(), + transactions: 0.into(), + cache_hit_count: 0.into(), + overall: OverallDbStats::new().into(), + } + } + + pub fn tally_reads(&self, val: u64) { + self.reads.fetch_add(val, AtomicOrdering::Relaxed); + } + + pub fn tally_bytes_read(&self, val: u64) { + self.bytes_read.fetch_add(val, AtomicOrdering::Relaxed); + } + + pub fn tally_writes(&self, val: u64) { + self.writes.fetch_add(val, AtomicOrdering::Relaxed); + } + + pub fn tally_bytes_written(&self, val: u64) { + self.bytes_written.fetch_add(val, AtomicOrdering::Relaxed); + } + + pub fn tally_transactions(&self, val: u64) { + self.transactions.fetch_add(val, AtomicOrdering::Relaxed); + } + + pub fn tally_cache_hit_count(&self, val: u64) { + self.cache_hit_count.fetch_add(val, AtomicOrdering::Relaxed); + } + + fn take_current(&self) -> RawDbStats { + RawDbStats { + reads: self.reads.swap(0, AtomicOrdering::Relaxed), + writes: self.writes.swap(0, AtomicOrdering::Relaxed), + bytes_written: self.bytes_written.swap(0, AtomicOrdering::Relaxed), + bytes_read: self.bytes_read.swap(0, AtomicOrdering::Relaxed), + transactions: self.transactions.swap(0, AtomicOrdering::Relaxed), + cache_hit_count: self.cache_hit_count.swap(0, AtomicOrdering::Relaxed), + } + } + + fn peek_current(&self) -> RawDbStats { + RawDbStats { + reads: self.reads.load(AtomicOrdering::Relaxed), + writes: self.writes.load(AtomicOrdering::Relaxed), + bytes_written: self.bytes_written.load(AtomicOrdering::Relaxed), + bytes_read: self.bytes_read.load(AtomicOrdering::Relaxed), + transactions: self.transactions.load(AtomicOrdering::Relaxed), + cache_hit_count: self.cache_hit_count.load(AtomicOrdering::Relaxed), + } + } + + pub fn since_previous(&self) -> TakenDbStats { + let mut overall_lock = self.overall.write(); + + let current = self.take_current(); + + overall_lock.stats = overall_lock.stats.combine(¤t); + + let stats = TakenDbStats { raw: current, started: overall_lock.last_taken }; + + overall_lock.last_taken = Instant::now(); + + stats + } + + pub fn overall(&self) -> TakenDbStats { + let overall_lock = self.overall.read(); + + let current = self.peek_current(); + + TakenDbStats { raw: overall_lock.stats.combine(¤t), started: overall_lock.started } + } +} diff --git a/kvdb-shared-tests/CHANGELOG.md b/kvdb-shared-tests/CHANGELOG.md new file mode 100644 index 000000000..767ffbcd9 --- /dev/null +++ b/kvdb-shared-tests/CHANGELOG.md @@ -0,0 +1,22 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.11.0] - 2022-11-29 +- Removed `parity-util-mem` support. [#696](https://github.com/paritytech/parity-common/pull/696) + +## [0.10.0] - 2022-09-20 +### Breaking +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Updated `kvdb` to 0.12. [#680](https://github.com/paritytech/parity-common/pull/680) + +### Breaking +- Updated `kvdb` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) + +## [0.7.0] - 2021-01-27 +### Breaking +- Updated `kvdb` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) diff --git a/kvdb-shared-tests/Cargo.toml b/kvdb-shared-tests/Cargo.toml new file mode 100644 index 000000000..6da808795 --- /dev/null +++ b/kvdb-shared-tests/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "kvdb-shared-tests" +version = "0.11.0" +authors = ["Parity Technologies "] +edition = "2021" +rust-version = "1.56.1" +description = "Shared tests for kvdb functionality, to be executed against actual implementations" +license = "MIT OR Apache-2.0" + +[dependencies] +kvdb = { path = "../kvdb", version = "0.13" } diff --git a/kvdb-shared-tests/src/lib.rs b/kvdb-shared-tests/src/lib.rs new file mode 100644 index 000000000..391e7821c --- /dev/null +++ b/kvdb-shared-tests/src/lib.rs @@ -0,0 +1,299 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Shared tests for kvdb functionality, to be executed against actual implementations. + +use kvdb::{IoStatsKind, KeyValueDB}; +use std::io; + +/// A test for `KeyValueDB::get`. +pub fn test_put_and_get(db: &dyn KeyValueDB) -> io::Result<()> { + let key1 = b"key1"; + + let mut transaction = db.transaction(); + transaction.put(0, key1, b"horse"); + db.write(transaction)?; + assert_eq!(&*db.get(0, key1)?.unwrap(), b"horse"); + Ok(()) +} + +/// A test for `KeyValueDB::get`. +pub fn test_delete_and_get(db: &dyn KeyValueDB) -> io::Result<()> { + let key1 = b"key1"; + + let mut transaction = db.transaction(); + transaction.put(0, key1, b"horse"); + db.write(transaction)?; + assert_eq!(&*db.get(0, key1)?.unwrap(), b"horse"); + + let mut transaction = db.transaction(); + transaction.delete(0, key1); + db.write(transaction)?; + assert!(db.get(0, key1)?.is_none()); + Ok(()) +} + +/// A test for `KeyValueDB::get`. +/// Assumes the `db` has only 1 column. +pub fn test_get_fails_with_non_existing_column(db: &dyn KeyValueDB) -> io::Result<()> { + assert!(db.get(1, &[]).is_err()); + Ok(()) +} + +/// A test for `KeyValueDB::write`. +pub fn test_write_clears_buffered_ops(db: &dyn KeyValueDB) -> io::Result<()> { + let mut batch = db.transaction(); + batch.put(0, b"foo", b"bar"); + db.write(batch)?; + + assert_eq!(db.get(0, b"foo")?.unwrap(), b"bar"); + + let mut batch = db.transaction(); + batch.put(0, b"foo", b"baz"); + db.write(batch)?; + + assert_eq!(db.get(0, b"foo")?.unwrap(), b"baz"); + Ok(()) +} + +/// A test for `KeyValueDB::iter`. +pub fn test_iter(db: &dyn KeyValueDB) -> io::Result<()> { + let key1 = b"key1"; + let key2 = b"key2"; + + let mut transaction = db.transaction(); + transaction.put(0, key1, key1); + transaction.put(0, key2, key2); + db.write(transaction)?; + + let contents: Vec<_> = db.iter(0).into_iter().map(Result::unwrap).collect(); + assert_eq!(contents.len(), 2); + assert_eq!(&*contents[0].0, key1); + assert_eq!(&*contents[0].1, key1); + assert_eq!(&*contents[1].0, key2); + assert_eq!(&*contents[1].1, key2); + Ok(()) +} + +/// A test for `KeyValueDB::iter_with_prefix`. +pub fn test_iter_with_prefix(db: &dyn KeyValueDB) -> io::Result<()> { + let key1 = b"0"; + let key2 = b"ab"; + let key3 = b"abc"; + let key4 = b"abcd"; + + let mut batch = db.transaction(); + batch.put(0, key1, key1); + batch.put(0, key2, key2); + batch.put(0, key3, key3); + batch.put(0, key4, key4); + db.write(batch)?; + + // empty prefix + let contents: Vec<_> = db.iter_with_prefix(0, b"").into_iter().map(Result::unwrap).collect(); + assert_eq!(contents.len(), 4); + assert_eq!(&*contents[0].0, key1); + assert_eq!(&*contents[1].0, key2); + assert_eq!(&*contents[2].0, key3); + assert_eq!(&*contents[3].0, key4); + + // prefix a + let contents: Vec<_> = db.iter_with_prefix(0, b"a").into_iter().map(Result::unwrap).collect(); + assert_eq!(contents.len(), 3); + assert_eq!(&*contents[0].0, key2); + assert_eq!(&*contents[1].0, key3); + assert_eq!(&*contents[2].0, key4); + + // prefix abc + let contents: Vec<_> = db.iter_with_prefix(0, b"abc").into_iter().map(Result::unwrap).collect(); + assert_eq!(contents.len(), 2); + assert_eq!(&*contents[0].0, key3); + assert_eq!(&*contents[1].0, key4); + + // prefix abcde + let contents: Vec<_> = db.iter_with_prefix(0, b"abcde").into_iter().map(Result::unwrap).collect(); + assert_eq!(contents.len(), 0); + + // prefix 0 + let contents: Vec<_> = db.iter_with_prefix(0, b"0").into_iter().map(Result::unwrap).collect(); + assert_eq!(contents.len(), 1); + assert_eq!(&*contents[0].0, key1); + Ok(()) +} + +/// The number of columns required to run `test_io_stats`. +pub const IO_STATS_NUM_COLUMNS: u32 = 3; + +/// A test for `KeyValueDB::io_stats`. +/// Assumes that the `db` has at least 3 columns. +pub fn test_io_stats(db: &dyn KeyValueDB) -> io::Result<()> { + let key1 = b"kkk"; + let mut batch = db.transaction(); + batch.put(0, key1, key1); + batch.put(1, key1, key1); + batch.put(2, key1, key1); + + for _ in 0..10 { + db.get(0, key1)?; + } + + db.write(batch)?; + + let io_stats = db.io_stats(IoStatsKind::SincePrevious); + assert_eq!(io_stats.transactions, 1); + assert_eq!(io_stats.writes, 3); + assert_eq!(io_stats.bytes_written, 18); + assert_eq!(io_stats.reads, 10); + assert_eq!(io_stats.bytes_read, 30); + + let new_io_stats = db.io_stats(IoStatsKind::SincePrevious); + // Since we taken previous statistic period, + // this is expected to be totally empty. + assert_eq!(new_io_stats.transactions, 0); + + // but the overall should be there + let new_io_stats = db.io_stats(IoStatsKind::Overall); + assert_eq!(new_io_stats.bytes_written, 18); + + let mut batch = db.transaction(); + batch.delete(0, key1); + batch.delete(1, key1); + batch.delete(2, key1); + + // transaction is not commited yet + assert_eq!(db.io_stats(IoStatsKind::SincePrevious).writes, 0); + + db.write(batch)?; + // now it is, and delete is counted as write + assert_eq!(db.io_stats(IoStatsKind::SincePrevious).writes, 3); + Ok(()) +} + +/// The number of columns required to run `test_delete_prefix`. +pub const DELETE_PREFIX_NUM_COLUMNS: u32 = 7; + +/// A test for `KeyValueDB::delete_prefix`. +pub fn test_delete_prefix(db: &dyn KeyValueDB) -> io::Result<()> { + let keys = [ + &[][..], + &[0u8][..], + &[0, 1][..], + &[1][..], + &[1, 0][..], + &[1, 255][..], + &[1, 255, 255][..], + &[2][..], + &[2, 0][..], + &[2, 255][..], + &[255; 16][..], + ]; + let init_db = |ix: u32| -> io::Result<()> { + let mut batch = db.transaction(); + for (i, key) in keys.iter().enumerate() { + batch.put(ix, key, &[i as u8]); + } + db.write(batch)?; + Ok(()) + }; + let check_db = |ix: u32, content: [bool; 11]| -> io::Result<()> { + let mut state = [true; 11]; + for (c, key) in keys.iter().enumerate() { + state[c] = db.get(ix, key)?.is_some(); + } + assert_eq!(state, content, "at {}", ix); + Ok(()) + }; + let tests: [_; DELETE_PREFIX_NUM_COLUMNS as usize] = [ + // standard + (&[1u8][..], [true, true, true, false, false, false, false, true, true, true, true]), + // edge + (&[1u8, 255, 255][..], [true, true, true, true, true, true, false, true, true, true, true]), + // none 1 + (&[1, 2][..], [true, true, true, true, true, true, true, true, true, true, true]), + // none 2 + (&[8][..], [true, true, true, true, true, true, true, true, true, true, true]), + // last value + (&[255, 255][..], [true, true, true, true, true, true, true, true, true, true, false]), + // last value, limit prefix + (&[255][..], [true, true, true, true, true, true, true, true, true, true, false]), + // all + (&[][..], [false, false, false, false, false, false, false, false, false, false, false]), + ]; + for (ix, test) in tests.iter().enumerate() { + let ix = ix as u32; + init_db(ix)?; + let mut batch = db.transaction(); + batch.delete_prefix(ix, test.0); + db.write(batch)?; + check_db(ix, test.1)?; + } + + Ok(()) +} + +/// A complex test. +pub fn test_complex(db: &dyn KeyValueDB) -> io::Result<()> { + let key1 = b"02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc"; + let key2 = b"03c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc"; + let key3 = b"04c00000000b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc"; + let key4 = b"04c01111110b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc"; + let key5 = b"04c02222220b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc"; + + let mut batch = db.transaction(); + batch.put(0, key1, b"cat"); + batch.put(0, key2, b"dog"); + batch.put(0, key3, b"caterpillar"); + batch.put(0, key4, b"beef"); + batch.put(0, key5, b"fish"); + db.write(batch)?; + + assert_eq!(&*db.get(0, key1)?.unwrap(), b"cat"); + + let contents: Vec<_> = db.iter(0).into_iter().map(Result::unwrap).collect(); + assert_eq!(contents.len(), 5); + assert_eq!(contents[0].0.to_vec(), key1.to_vec()); + assert_eq!(&*contents[0].1, b"cat"); + assert_eq!(contents[1].0.to_vec(), key2.to_vec()); + assert_eq!(&*contents[1].1, b"dog"); + + let mut prefix_iter = db.iter_with_prefix(0, b"04c0"); + assert_eq!(*prefix_iter.next().unwrap().unwrap().1, b"caterpillar"[..]); + assert_eq!(*prefix_iter.next().unwrap().unwrap().1, b"beef"[..]); + assert_eq!(*prefix_iter.next().unwrap().unwrap().1, b"fish"[..]); + + let mut batch = db.transaction(); + batch.delete(0, key1); + db.write(batch)?; + + assert!(db.get(0, key1)?.is_none()); + + let mut batch = db.transaction(); + batch.put(0, key1, b"cat"); + db.write(batch)?; + + let mut transaction = db.transaction(); + transaction.put(0, key3, b"elephant"); + transaction.delete(0, key1); + db.write(transaction)?; + assert!(db.get(0, key1)?.is_none()); + assert_eq!(&*db.get(0, key3)?.unwrap(), b"elephant"); + + assert_eq!(&*db.get_by_prefix(0, key3).unwrap().unwrap(), b"elephant"); + assert_eq!(&*db.get_by_prefix(0, key2).unwrap().unwrap(), b"dog"); + + let mut transaction = db.transaction(); + transaction.put(0, key1, b"horse"); + transaction.delete(0, key3); + db.write(transaction)?; + assert!(db.get(0, key3)?.is_none()); + assert_eq!(&*db.get(0, key1)?.unwrap(), b"horse"); + + assert!(db.get(0, key3)?.is_none()); + assert_eq!(&*db.get(0, key1)?.unwrap(), b"horse"); + Ok(()) +} diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md new file mode 100644 index 000000000..5e4305c6f --- /dev/null +++ b/kvdb/CHANGELOG.md @@ -0,0 +1,74 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + + +## [0.13.0] - 2022-11-29 +- Removed `parity-util-mem` support. [#696](https://github.com/paritytech/parity-common/pull/696) + +## [0.12.0] - 2022-09-20 +### Breaking +- Removed `fn restore` from `KeyValueDB` trait. [#662](https://github.com/paritytech/parity-common/pull/662) +- Streamlined API. [#661](https://github.com/paritytech/parity-common/pull/661) + - `fn get_by_prefix` return type changed to `io::Result>` + - `fn has_prefix` return type changed to `io::Result` + - Iterator item changed to `io::Result` +- Updated `parity-util-mem` to 0.12. [#680](https://github.com/paritytech/parity-common/pull/680) + +## [0.11.0] - 2022-02-04 +### Breaking +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Updated `parity-util-mem` to 0.11. [#623](https://github.com/paritytech/parity-common/pull/623) + +## [0.10.0] - 2021-07-02 +### Breaking +- Updated `parity-util-mem` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) + +## [0.9.0] - 2021-01-27 +### Breaking +- Updated `parity-util-mem` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) + +## [0.8.0] - 2021-01-05 +### Breaking +- Updated `parity-util-mem` to 0.8. [#470](https://github.com/paritytech/parity-common/pull/470) + +## [0.7.0] - 2020-06-24 +- Updated `parity-util-mem` to 0.7. [#402](https://github.com/paritytech/parity-common/pull/402) + +## [0.6.0] - 2020-05-05 +### Breaking +- Removed `write_buffered` and `flush` methods. [#313](https://github.com/paritytech/parity-common/pull/313) +- Introduced a new `DeletePrefix` database operation. [#360](https://github.com/paritytech/parity-common/pull/360) +- Renamed prefix iteration to `iter_with_prefix`. [#365](https://github.com/paritytech/parity-common/pull/365) + +## [0.5.0] - 2020-03-16 +- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) +- Remove dependency on parity-bytes. [#351](https://github.com/paritytech/parity-common/pull/351) +- Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) + +## [0.4.0] - 2019-01-06 +- Bump parking_lot to 0.10. [#332](https://github.com/paritytech/parity-common/pull/332) + +## [0.3.1] - 2019-01-06 +- Updated features and feature dependencies. [#307](https://github.com/paritytech/parity-common/pull/307) + +## [0.3.0] - 2020-01-03 +- I/O statistics API. [#294](https://github.com/paritytech/parity-common/pull/294) +- Removed `KeyValueDBHandler` trait. [#304](https://github.com/paritytech/parity-common/pull/304) + +## [0.2.0] - 2019-12-19 +### Changed +- Default column support removed from the API + - Column argument type changed from `Option` to `u32` + - Migration `None` -> unsupported, `Some(0)` -> `0`, `Some(1)` -> `1`, etc. +- Remove `ElasticArray` and change `DBValue` to be a type alias for `Vec` and add a `DBKey` backed by a `SmallVec`. [#282](https://github.com/paritytech/parity-common/pull/282) + +## [0.1.1] - 2019-10-24 +### Dependencies +- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) +### Changed +- Migrated to 2018 edition. [#205](https://github.com/paritytech/parity-common/pull/205) diff --git a/kvdb/Cargo.toml b/kvdb/Cargo.toml index e0306dfe2..c66b024b8 100644 --- a/kvdb/Cargo.toml +++ b/kvdb/Cargo.toml @@ -1,12 +1,12 @@ [package] name = "kvdb" -version = "0.1.0" +version = "0.13.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Generic key-value trait" -license = "GPL-3.0" -edition = "2018" +license = "MIT OR Apache-2.0" +edition = "2021" +rust-version = "1.56.1" [dependencies] -elastic-array = "0.10" -bytes = { package = "parity-bytes", version = "0.1", path = "../parity-bytes" } +smallvec = "1.0.0" diff --git a/kvdb/src/io_stats.rs b/kvdb/src/io_stats.rs new file mode 100644 index 000000000..9c1a89f97 --- /dev/null +++ b/kvdb/src/io_stats.rs @@ -0,0 +1,133 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Generic statistics for key-value databases + +/// Statistic kind to query. +pub enum Kind { + /// Overall statistics since start. + Overall, + /// Statistics since previous query. + SincePrevious, +} + +/// Statistic for the `span` period +#[derive(Debug, Clone)] +pub struct IoStats { + /// Number of transaction. + pub transactions: u64, + /// Number of read operations. + pub reads: u64, + /// Number of reads resulted in a read from cache. + pub cache_reads: u64, + /// Number of write operations. + pub writes: u64, + /// Number of bytes read + pub bytes_read: u64, + /// Number of bytes read from cache + pub cache_read_bytes: u64, + /// Number of bytes write + pub bytes_written: u64, + /// Start of the statistic period. + pub started: std::time::Instant, + /// Total duration of the statistic period. + pub span: std::time::Duration, +} + +impl IoStats { + /// Empty statistic report. + pub fn empty() -> Self { + Self { + transactions: 0, + reads: 0, + cache_reads: 0, + writes: 0, + bytes_read: 0, + cache_read_bytes: 0, + bytes_written: 0, + started: std::time::Instant::now(), + span: std::time::Duration::default(), + } + } + + /// Average batch (transaction) size (writes per transaction) + pub fn avg_batch_size(&self) -> f64 { + if self.writes == 0 { + return 0.0 + } + self.transactions as f64 / self.writes as f64 + } + + /// Read operations per second. + pub fn reads_per_sec(&self) -> f64 { + if self.span.as_secs_f64() == 0.0 { + return 0.0 + } + + self.reads as f64 / self.span.as_secs_f64() + } + + pub fn byte_reads_per_sec(&self) -> f64 { + if self.span.as_secs_f64() == 0.0 { + return 0.0 + } + + self.bytes_read as f64 / self.span.as_secs_f64() + } + + /// Write operations per second. + pub fn writes_per_sec(&self) -> f64 { + if self.span.as_secs_f64() == 0.0 { + return 0.0 + } + + self.writes as f64 / self.span.as_secs_f64() + } + + pub fn byte_writes_per_sec(&self) -> f64 { + if self.span.as_secs_f64() == 0.0 { + return 0.0 + } + + self.bytes_written as f64 / self.span.as_secs_f64() + } + + /// Total number of operations per second. + pub fn ops_per_sec(&self) -> f64 { + if self.span.as_secs_f64() == 0.0 { + return 0.0 + } + + (self.writes as f64 + self.reads as f64) / self.span.as_secs_f64() + } + + /// Transactions per second. + pub fn transactions_per_sec(&self) -> f64 { + if self.span.as_secs_f64() == 0.0 { + return 0.0 + } + + (self.transactions as f64) / self.span.as_secs_f64() + } + + pub fn avg_transaction_size(&self) -> f64 { + if self.transactions == 0 { + return 0.0 + } + + self.bytes_written as f64 / self.transactions as f64 + } + + pub fn cache_hit_ratio(&self) -> f64 { + if self.reads == 0 { + return 0.0 + } + + self.cache_reads as f64 / self.reads as f64 + } +} diff --git a/kvdb/src/lib.rs b/kvdb/src/lib.rs index 93a4902fd..f44461cf0 100644 --- a/kvdb/src/lib.rs +++ b/kvdb/src/lib.rs @@ -1,32 +1,29 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity. +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Key-Value store abstraction with `RocksDB` backend. +//! Key-Value store abstraction. +use smallvec::SmallVec; use std::io; -use std::path::Path; -use std::sync::Arc; -use elastic_array::{ElasticArray128, ElasticArray32}; -use bytes::Bytes; + +mod io_stats; /// Required length of prefixes. pub const PREFIX_LEN: usize = 12; /// Database value. -pub type DBValue = ElasticArray128; +pub type DBValue = Vec; +/// Database keys. +pub type DBKey = SmallVec<[u8; 32]>; +/// A tuple holding key and value data, used in the iterator item type. +pub type DBKeyValue = (DBKey, DBValue); + +pub use io_stats::{IoStats, Kind as IoStatsKind}; /// Write transaction. Batches a sequence of put/delete operations for efficiency. #[derive(Default, Clone, PartialEq)] @@ -38,15 +35,9 @@ pub struct DBTransaction { /// Database operation. #[derive(Clone, PartialEq)] pub enum DBOp { - Insert { - col: Option, - key: ElasticArray32, - value: DBValue, - }, - Delete { - col: Option, - key: ElasticArray32, - } + Insert { col: u32, key: DBKey, value: DBValue }, + Delete { col: u32, key: DBKey }, + DeletePrefix { col: u32, prefix: DBKey }, } impl DBOp { @@ -55,14 +46,16 @@ impl DBOp { match *self { DBOp::Insert { ref key, .. } => key, DBOp::Delete { ref key, .. } => key, + DBOp::DeletePrefix { ref prefix, .. } => prefix, } } /// Returns the column associated with this operation. - pub fn col(&self) -> Option { + pub fn col(&self) -> u32 { match *self { DBOp::Insert { col, .. } => col, DBOp::Delete { col, .. } => col, + DBOp::DeletePrefix { col, .. } => col, } } } @@ -75,99 +68,124 @@ impl DBTransaction { /// Create new transaction with capacity. pub fn with_capacity(cap: usize) -> DBTransaction { - DBTransaction { - ops: Vec::with_capacity(cap) - } + DBTransaction { ops: Vec::with_capacity(cap) } } /// Insert a key-value pair in the transaction. Any existing value will be overwritten upon write. - pub fn put(&mut self, col: Option, key: &[u8], value: &[u8]) { - let mut ekey = ElasticArray32::new(); - ekey.append_slice(key); - self.ops.push(DBOp::Insert { - col: col, - key: ekey, - value: DBValue::from_slice(value), - }); + pub fn put(&mut self, col: u32, key: &[u8], value: &[u8]) { + self.ops + .push(DBOp::Insert { col, key: DBKey::from_slice(key), value: value.to_vec() }) } /// Insert a key-value pair in the transaction. Any existing value will be overwritten upon write. - pub fn put_vec(&mut self, col: Option, key: &[u8], value: Bytes) { - let mut ekey = ElasticArray32::new(); - ekey.append_slice(key); - self.ops.push(DBOp::Insert { - col: col, - key: ekey, - value: DBValue::from_vec(value), - }); + pub fn put_vec(&mut self, col: u32, key: &[u8], value: Vec) { + self.ops.push(DBOp::Insert { col, key: DBKey::from_slice(key), value }); } /// Delete value by key. - pub fn delete(&mut self, col: Option, key: &[u8]) { - let mut ekey = ElasticArray32::new(); - ekey.append_slice(key); - self.ops.push(DBOp::Delete { - col: col, - key: ekey, - }); + pub fn delete(&mut self, col: u32, key: &[u8]) { + self.ops.push(DBOp::Delete { col, key: DBKey::from_slice(key) }); + } + + /// Delete all values with the given key prefix. + /// Using an empty prefix here will remove all keys + /// (all keys start with the empty prefix). + pub fn delete_prefix(&mut self, col: u32, prefix: &[u8]) { + self.ops.push(DBOp::DeletePrefix { col, prefix: DBKey::from_slice(prefix) }); } } /// Generic key-value database. /// -/// This makes a distinction between "buffered" and "flushed" values. Values which have been -/// written can always be read, but may be present in an in-memory buffer. Values which have -/// been flushed have been moved to backing storage, like a RocksDB instance. There are certain -/// operations which are only guaranteed to operate on flushed data and not buffered, -/// although implementations may differ in this regard. -/// -/// The contents of an interior buffer may be explicitly flushed using the `flush` method. -/// -/// The `KeyValueDB` also deals in "column families", which can be thought of as distinct +/// The `KeyValueDB` deals with "column families", which can be thought of as distinct /// stores within a database. Keys written in one column family will not be accessible from /// any other. The number of column families must be specified at initialization, with a -/// differing interface for each database. The `None` argument in place of a column index -/// is always supported. +/// differing interface for each database. /// /// The API laid out here, along with the `Sync` bound implies interior synchronization for /// implementation. pub trait KeyValueDB: Sync + Send { /// Helper to create a new transaction. - fn transaction(&self) -> DBTransaction { DBTransaction::new() } + fn transaction(&self) -> DBTransaction { + DBTransaction::new() + } /// Get a value by key. - fn get(&self, col: Option, key: &[u8]) -> io::Result>; - - /// Get a value by partial key. Only works for flushed data. - fn get_by_prefix(&self, col: Option, prefix: &[u8]) -> Option>; + fn get(&self, col: u32, key: &[u8]) -> io::Result>; - /// Write a transaction of changes to the buffer. - fn write_buffered(&self, transaction: DBTransaction); + /// Get the first value matching the given prefix. + fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> io::Result>; /// Write a transaction of changes to the backing store. - fn write(&self, transaction: DBTransaction) -> io::Result<()> { - self.write_buffered(transaction); - self.flush() + fn write(&self, transaction: DBTransaction) -> io::Result<()>; + + /// Iterate over the data for a given column. + fn iter<'a>(&'a self, col: u32) -> Box> + 'a>; + + /// Iterate over the data for a given column, returning all key/value pairs + /// where the key starts with the given prefix. + fn iter_with_prefix<'a>( + &'a self, + col: u32, + prefix: &'a [u8], + ) -> Box> + 'a>; + + /// Query statistics. + /// + /// Not all kvdb implementations are able or expected to implement this, so by + /// default, empty statistics is returned. Also, not all kvdb implementations + /// can return every statistic or configured to do so (some statistics gathering + /// may impede the performance and might be off by default). + fn io_stats(&self, _kind: IoStatsKind) -> IoStats { + IoStats::empty() } - /// Flush all buffered data. - fn flush(&self) -> io::Result<()>; - - /// Iterate over flushed data for a given column. - fn iter<'a>(&'a self, col: Option) - -> Box, Box<[u8]>)> + 'a>; + /// Check for the existence of a value by key. + fn has_key(&self, col: u32, key: &[u8]) -> io::Result { + self.get(col, key).map(|opt| opt.is_some()) + } - /// Iterate over flushed data for a given column, starting from a given prefix. - fn iter_from_prefix<'a>(&'a self, col: Option, prefix: &'a [u8]) - -> Box, Box<[u8]>)> + 'a>; + /// Check for the existence of a value by prefix. + fn has_prefix(&self, col: u32, prefix: &[u8]) -> io::Result { + self.get_by_prefix(col, prefix).map(|opt| opt.is_some()) + } +} - /// Attempt to replace this database with a new one located at the given path. - fn restore(&self, new_db: &str) -> io::Result<()>; +/// For a given start prefix (inclusive), returns the correct end prefix (non-inclusive). +/// This assumes the key bytes are ordered in lexicographical order. +/// Since key length is not limited, for some case we return `None` because there is +/// no bounded limit (every keys in the serie `[]`, `[255]`, `[255, 255]` ...). +pub fn end_prefix(prefix: &[u8]) -> Option> { + let mut end_range = prefix.to_vec(); + while let Some(0xff) = end_range.last() { + end_range.pop(); + } + if let Some(byte) = end_range.last_mut() { + *byte += 1; + Some(end_range) + } else { + None + } } -/// Generic key-value database handler. This trait contains one function `open`. -/// When called, it opens database with a predefined config. -pub trait KeyValueDBHandler: Send + Sync { - /// Open the predefined key-value database. - fn open(&self, path: &Path) -> io::Result>; +#[cfg(test)] +mod test { + use super::end_prefix; + + #[test] + fn end_prefix_test() { + assert_eq!(end_prefix(&[5, 6, 7]), Some(vec![5, 6, 8])); + assert_eq!(end_prefix(&[5, 6, 255]), Some(vec![5, 7])); + // This is not equal as the result is before start. + assert_ne!(end_prefix(&[5, 255, 255]), Some(vec![5, 255])); + // This is equal ([5, 255] will not be deleted because + // it is before start). + assert_eq!(end_prefix(&[5, 255, 255]), Some(vec![6])); + assert_eq!(end_prefix(&[255, 255, 255]), None); + + assert_eq!(end_prefix(&[0x00, 0xff]), Some(vec![0x01])); + assert_eq!(end_prefix(&[0xff]), None); + assert_eq!(end_prefix(&[]), None); + assert_eq!(end_prefix(b"0"), Some(b"1".to_vec())); + } } diff --git a/rlp/license-header b/license-header similarity index 87% rename from rlp/license-header rename to license-header index 03df169c8..67d3f3a1a 100644 --- a/rlp/license-header +++ b/license-header @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/parity-bytes/CHANGELOG.md b/parity-bytes/CHANGELOG.md new file mode 100644 index 000000000..a3c092d58 --- /dev/null +++ b/parity-bytes/CHANGELOG.md @@ -0,0 +1,17 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) + +## [0.1.2] - 2020-03-16 +- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) + +## [0.1.1] - 2019-10-24 +### Dependencies +- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) +### Added +- Added no-std support. [#154](https://github.com/paritytech/parity-common/pull/154) diff --git a/parity-bytes/Cargo.toml b/parity-bytes/Cargo.toml index 38be22ced..6d745aca3 100644 --- a/parity-bytes/Cargo.toml +++ b/parity-bytes/Cargo.toml @@ -1,11 +1,12 @@ [package] name = "parity-bytes" -version = "0.1.0" +version = "0.1.2" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "byte utilities for Parity" -license = "GPL-3.0" -edition = "2018" +license = "MIT OR Apache-2.0" +edition = "2021" +rust-version = "1.56.1" [dependencies] diff --git a/parity-bytes/src/lib.rs b/parity-bytes/src/lib.rs index 475864537..9314d854c 100644 --- a/parity-bytes/src/lib.rs +++ b/parity-bytes/src/lib.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! General bytes-related utilities. //! @@ -32,7 +24,7 @@ use core::{cmp::min, fmt, ops}; pub struct PrettySlice<'a>(&'a [u8]); impl<'a> fmt::Debug for PrettySlice<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { for i in 0..self.0.len() { if i > 0 { write!(f, "·{:02x}", self.0[i])?; @@ -45,7 +37,7 @@ impl<'a> fmt::Debug for PrettySlice<'a> { } impl<'a> fmt::Display for PrettySlice<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { for i in 0..self.0.len() { write!(f, "{:02x}", self.0[i])?; } @@ -57,7 +49,7 @@ impl<'a> fmt::Display for PrettySlice<'a> { /// defaults cannot otherwise be avoided. pub trait ToPretty { /// Convert a type into a derivative form in order to make `format!` print it prettily. - fn pretty(&self) -> PrettySlice; + fn pretty(&self) -> PrettySlice<'_>; /// Express the object as a hex string. fn to_hex(&self) -> String { format!("{}", self.pretty()) @@ -65,7 +57,7 @@ pub trait ToPretty { } impl> ToPretty for T { - fn pretty(&self) -> PrettySlice { + fn pretty(&self) -> PrettySlice<'_> { PrettySlice(self.as_ref()) } } @@ -75,7 +67,7 @@ pub enum BytesRef<'a> { /// This is a reference to a vector Flexible(&'a mut Bytes), /// This is a reference to a slice - Fixed(&'a mut [u8]) + Fixed(&'a mut [u8]), } impl<'a> BytesRef<'a> { @@ -97,7 +89,7 @@ impl<'a> BytesRef<'a> { data[offset..(max + offset)].copy_from_slice(&input[..max]); max }, - _ => 0 + _ => 0, } } } @@ -127,9 +119,9 @@ pub type Bytes = Vec; #[cfg(test)] mod tests { + use super::BytesRef; #[cfg(not(feature = "std"))] use alloc::vec; - use super::BytesRef; #[test] fn should_write_bytes_to_fixed_bytesref() { diff --git a/parity-crypto/Cargo.toml b/parity-crypto/Cargo.toml deleted file mode 100644 index 5bfda0aca..000000000 --- a/parity-crypto/Cargo.toml +++ /dev/null @@ -1,32 +0,0 @@ -[package] -name = "parity-crypto" -version = "0.4.1" -authors = ["Parity Technologies "] -repository = "https://github.com/paritytech/parity-common" -description = "Crypto utils used by ethstore and network." -license = "GPL-3.0" -autobenches = false -edition = "2018" - -[[bench]] -name = "bench" -harness = false - - -[dependencies] -tiny-keccak = "1.4" -scrypt = { version = "0.2", default-features = false } -ripemd160 = "0.8.0" -sha2 = "0.8.0" -digest = "0.8" -hmac = "0.7" -aes = "0.3.2" -aes-ctr = "0.3.0" -block-modes = "0.3.3" -pbkdf2 = "0.3.0" -subtle = "2.1" -zeroize = "0.9.1" - -[dev-dependencies] -criterion = "0.2" -hex-literal = "0.2" diff --git a/parity-crypto/README.md b/parity-crypto/README.md deleted file mode 100644 index 51390fe4f..000000000 --- a/parity-crypto/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# parity-crypto - -General cryptographic utilities for Ethereum. - - -## Changelog - -The 0.4 release removes the dependency on `ring` and replaces it with prue-rust alternatives. As a consequence of this, AES GCM support has been removed. `subtle` is used for constant time equality testing and error handling is pared down to the bare minimum required. diff --git a/parity-crypto/benches/bench.rs b/parity-crypto/benches/bench.rs deleted file mode 100644 index f9f68dfd1..000000000 --- a/parity-crypto/benches/bench.rs +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - - -extern crate parity_crypto; - -#[macro_use] -extern crate criterion; - -use criterion::{Criterion, Bencher}; - -criterion_group!(benches, input_len); - -criterion_main!(benches); - -/// general benches for multiple input size -fn input_len(c: &mut Criterion) { - - c.bench_function_over_inputs("ripemd", - |b: &mut Bencher, size: &usize| { - let data = vec![0u8; *size]; - b.iter(|| parity_crypto::digest::ripemd160(&data[..])); - }, - vec![100, 500, 1_000, 10_000, 100_000] - ); - - c.bench_function_over_inputs("aes_ctr", - |b: &mut Bencher, size: &usize| { - let data = vec![0u8; *size]; - let mut dest = vec![0; *size]; - let k = [0; 16]; - let iv = [0; 16]; - - b.iter(||{ - parity_crypto::aes::encrypt_128_ctr(&k[..], &iv[..], &data[..], &mut dest[..]).unwrap(); - // same as encrypt but add it just in case - parity_crypto::aes::decrypt_128_ctr(&k[..], &iv[..], &data[..], &mut dest[..]).unwrap(); - }); - }, - vec![100, 500, 1_000, 10_000, 100_000] - ); - -} diff --git a/parity-crypto/src/aes.rs b/parity-crypto/src/aes.rs deleted file mode 100644 index dc62674d4..000000000 --- a/parity-crypto/src/aes.rs +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use block_modes::{ - BlockMode, - Cbc, - Ecb, - block_padding::{Pkcs7, ZeroPadding} -}; -use aes_ctr::stream_cipher::{ NewStreamCipher, SyncStreamCipher }; -use aes::{ Aes128, Aes256 }; -use aes::block_cipher_trait::generic_array::GenericArray; - -use crate::error::SymmError; - - -/// One time encoder/decoder for Ecb mode Aes256 with zero padding -pub struct AesEcb256(Ecb); - -impl AesEcb256 { - - /// New encoder/decoder, no iv for ecb - pub fn new(key: &[u8]) -> Result { - Ok(AesEcb256(Ecb::new_var(key, &[])?)) - } - - /// Encrypt data in place without padding. The data length must be a multiple - /// of the block size. - pub fn encrypt(self, content: &mut [u8]) -> Result<(), SymmError> { - let len = content.len(); - self.0.encrypt(content, len)?; - Ok(()) - } - - /// Decrypt data in place without padding. The data length must be a multiple - /// of the block size. - pub fn decrypt(self, content: &mut [u8]) -> Result<(), SymmError> { - self.0.decrypt(content)?; - Ok(()) - } -} - - -/// Reusable encoder/decoder for Aes256 in Ctr mode and no padding -pub struct AesCtr256(aes_ctr::Aes256Ctr); - -impl AesCtr256 { - - /// New encoder/decoder - pub fn new(key: &[u8], iv: &[u8]) -> Result { - Ok(AesCtr256( - aes_ctr::Aes256Ctr::new(GenericArray::from_slice(key), GenericArray::from_slice(iv)) - )) - } - - /// In place encrypt a content without padding, the content length must be a multiple - /// of the block size. - pub fn encrypt(&mut self, content: &mut[u8]) -> Result<(), SymmError> { - self.0.try_apply_keystream(content)?; - Ok(()) - } - - /// In place decrypt a content without padding, the content length must be a multiple - /// of the block size. - pub fn decrypt(&mut self, content: &mut[u8]) -> Result<(), SymmError> { - self.0.try_apply_keystream(content)?; - Ok(()) - } -} - -/// Encrypt a message (CTR mode). -/// -/// Key (`k`) length and initialisation vector (`iv`) length have to be 16 bytes each. -/// An error is returned if the input lengths are invalid. -/// If possible prefer `inplace_encrypt_128_ctr` to avoid a slice copy. -pub fn encrypt_128_ctr(k: &[u8], iv: &[u8], plain: &[u8], dest: &mut [u8]) -> Result<(), SymmError> { - let mut encryptor = aes_ctr::Aes128Ctr::new( - GenericArray::from_slice(k), - GenericArray::from_slice(iv), - ); - &mut dest[..plain.len()].copy_from_slice(plain); - encryptor.try_apply_keystream(dest)?; - Ok(()) - -} - -/// Encrypt a message (CTR mode). -/// -/// Key (`k`) length and initialisation vector (`iv`) length have to be 16 bytes each. -/// An error is returned if the input lengths are invalid. -pub fn inplace_encrypt_128_ctr(k: &[u8], iv: &[u8], data: &mut [u8]) -> Result<(), SymmError> { - let mut encryptor = aes_ctr::Aes128Ctr::new( - GenericArray::from_slice(k), - GenericArray::from_slice(iv), - ); - encryptor.try_apply_keystream(data)?; - Ok(()) - -} - -/// Decrypt a message (CTR mode). -/// -/// Key (`k`) length and initialisation vector (`iv`) length have to be 16 bytes each. -/// An error is returned if the input lengths are invalid. -/// If possible prefer `inplace_decrypt_128_ctr` instead. -pub fn decrypt_128_ctr(k: &[u8], iv: &[u8], encrypted: &[u8], dest: &mut [u8]) -> Result<(), SymmError> { - let mut encryptor = aes_ctr::Aes128Ctr::new( - GenericArray::from_slice(k), - GenericArray::from_slice(iv), - ); - - &mut dest[..encrypted.len()].copy_from_slice(encrypted); - encryptor.try_apply_keystream(dest)?; - Ok(()) -} - -/// Decrypt a message (CTR mode). -/// -/// Key (`k`) length and initialisation vector (`iv`) length have to be 16 bytes each. -/// An error is returned if the input lengths are invalid. -pub fn inplace_decrypt_128_ctr(k: &[u8], iv: &[u8], data: &mut [u8]) -> Result<(), SymmError> { - let mut encryptor = aes_ctr::Aes128Ctr::new( - GenericArray::from_slice(k), - GenericArray::from_slice(iv), - ); - - encryptor.try_apply_keystream(data)?; - Ok(()) -} - - -/// Decrypt a message (CBC mode). -/// -/// Key (`k`) length and initialisation vector (`iv`) length have to be 16 bytes each. -/// An error is returned if the input lengths are invalid. -pub fn decrypt_128_cbc(k: &[u8], iv: &[u8], encrypted: &[u8], dest: &mut [u8]) -> Result { - let encryptor = Cbc::::new_var(k, iv)?; - &mut dest[..encrypted.len()].copy_from_slice(encrypted); - let unpad_length = { - encryptor.decrypt(&mut dest[..encrypted.len()])?.len() - }; - Ok(unpad_length) -} - - -#[cfg(test)] -mod tests { - - use super::*; - - // only use for test could be expose in the future - fn encrypt_128_cbc(k: &[u8], iv: &[u8], plain: &[u8], dest: &mut [u8]) -> Result<(), SymmError> { - let encryptor = Cbc::::new_var(k, iv)?; - &mut dest[..plain.len()].copy_from_slice(plain); - encryptor.encrypt(dest, plain.len())?; - Ok(()) - } - - #[test] - pub fn test_aes_short() -> Result<(),SymmError> { - let key = [97, 110, 121, 99, 111, 110, 116, 101, 110, 116, 116, 111, 114, 101, 97, 99, 104, 49, 50, 56, 98, 105, 116, 115, 105, 122, 101, 10]; - let salt = [109, 121, 115, 97, 108, 116, 115, 104, 111, 117, 108, 100, 102, 105, 108, 108, 115, 111, 109, 109, 101, 98, 121, 116, 101, 108, 101, 110, 103, 116, 104, 10]; - let content = [83, 111, 109, 101, 32, 99, 111, 110, 116, 101, 110, 116, 32, 116, 111, 32, 116, 101, 115, 116, - 32, 97, 101, 115, 44, 10, 110, 111, 116, 32, 116, 111, 32, 109, 117, 99, 104, 32, 44, 32, 111, 110, 108, 121, - 32, 118, 101, 114, 121, 32, 98, 97, 115, 105, 99, 32, 116, 101, 115, 116, 32, 116, 111, 32, 97, 118, 111, 105, - 100, 32, 111, 98, 118, 105, 111, 117, 115, 32, 114, 101, 103, 114, 101, 115, 115, 105, 111, 110, 32, 119, 104, - 101, 110, 32, 115, 119, 105, 116, 99, 104, 105, 110, 103, 32, 108, 105, 98, 115, 46, 10]; - let ctr_enc = [65, 55, 246, 75, 24, 117, 30, 233, 218, 139, 91, 251, 251, 179, 171, 69, 60, 244, 249, 44, 238, 60, - 10, 66, 71, 10, 199, 111, 54, 24, 124, 223, 153, 250, 159, 154, 164, 109, 232, 82, 20, 199, 182, 40, 174, 104, 64, - 203, 236, 94, 222, 184, 117, 54, 234, 189, 253, 122, 135, 121, 100, 44, 227, 241, 123, 120, 110, 188, 109, 148, 112, - 160, 131, 205, 116, 104, 232, 8, 22, 170, 80, 231, 155, 246, 255, 115, 101, 5, 234, 104, 220, 199, 192, 166, 181, 156, - 113, 255, 187, 51, 38, 128, 75, 29, 237, 178, 205, 98, 101, 110]; - let cbc_enc = [167, 248, 5, 90, 11, 140, 215, 138, 165, 125, 137, 76, 47, 243, 191, 48, 183, 247, 109, 86, 24, 45, - 81, 215, 0, 51, 221, 185, 131, 97, 234, 189, 244, 255, 107, 210, 70, 60, 41, 221, 43, 137, 185, 166, 42, 65, 18, 200, - 151, 233, 255, 192, 109, 25, 105, 115, 161, 209, 126, 235, 99, 192, 241, 241, 19, 249, 87, 244, 28, 146, 186, 189, 108, - 9, 243, 132, 4, 105, 53, 162, 8, 235, 84, 107, 213, 59, 158, 113, 227, 120, 162, 50, 237, 123, 70, 187, 83, 73, 146, 13, - 44, 191, 53, 4, 125, 207, 176, 45, 8, 153, 175, 198]; - let mut dest = vec![0;110]; - let mut dest_padded = vec![0;112]; - let mut dest_padded2 = vec![0;128]; // TODO RustLib need an extra 16bytes in dest : looks extra buggy but function is not currently use (keep it private for now) - encrypt_128_cbc(&key[..16], &salt[..16], &content, &mut dest_padded2)?; - assert!(&dest_padded2[..112] == &cbc_enc[..]); - encrypt_128_ctr(&key[..16], &salt[..16], &content, &mut dest)?; - assert!(&dest[..] == &ctr_enc[..]); - let mut content_data = content.to_vec(); - inplace_encrypt_128_ctr(&key[..16], &salt[..16], &mut content_data[..])?; - assert!(&content_data[..] == &ctr_enc[..]); - decrypt_128_ctr(&key[..16], &salt[..16], &ctr_enc[..], &mut dest)?; - assert!(&dest[..] == &content[..]); - let mut content_data = ctr_enc.to_vec(); - inplace_decrypt_128_ctr(&key[..16], &salt[..16], &mut content_data[..])?; - assert!(&content_data[..] == &content[..]); - let l = decrypt_128_cbc(&key[..16], &salt[..16], &cbc_enc[..], &mut dest_padded)?; - assert!(&dest_padded[..l] == &content[..]); - Ok(()) - } -} diff --git a/parity-crypto/src/digest.rs b/parity-crypto/src/digest.rs deleted file mode 100644 index 1851ac01d..000000000 --- a/parity-crypto/src/digest.rs +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use std::marker::PhantomData; -use std::ops::Deref; - -use digest::generic_array::{GenericArray, typenum::{U20, U32, U64}}; -use sha2::Digest as RDigest; - -/// The message digest. -pub struct Digest(InnerDigest, PhantomData); - -enum InnerDigest { - Sha256(GenericArray), - Sha512(GenericArray), - Ripemd160(GenericArray), -} - -impl Deref for Digest { - type Target = [u8]; - fn deref(&self) -> &Self::Target { - match self.0 { - InnerDigest::Sha256(ref d) => &d[..], - InnerDigest::Sha512(ref d) => &d[..], - InnerDigest::Ripemd160(ref d) => &d[..], - } - } -} - -/// Single-step sha256 digest computation. -pub fn sha256(data: &[u8]) -> Digest { - let mut hasher = Hasher::sha256(); - hasher.update(data); - hasher.finish() -} - -/// Single-step sha512 digest computation. -pub fn sha512(data: &[u8]) -> Digest { - let mut hasher = Hasher::sha512(); - hasher.update(data); - hasher.finish() -} - -/// Single-step ripemd160 digest computation. -pub fn ripemd160(data: &[u8]) -> Digest { - let mut hasher = Hasher::ripemd160(); - hasher.update(data); - hasher.finish() -} - -#[derive(Debug)] -pub enum Sha256 {} -#[derive(Debug)] -pub enum Sha512 {} -#[derive(Debug)] -pub enum Ripemd160 {} - -/// Stateful digest computation. -pub struct Hasher(Inner, PhantomData); - -enum Inner { - Sha256(sha2::Sha256), - Sha512(sha2::Sha512), - Ripemd160(ripemd160::Ripemd160) -} - -impl Hasher { - pub fn sha256() -> Hasher { - Hasher(Inner::Sha256(sha2::Sha256::default()), PhantomData) - } -} - -impl Hasher { - pub fn sha512() -> Hasher { - Hasher(Inner::Sha512(sha2::Sha512::default()), PhantomData) - } -} - -impl Hasher { - pub fn ripemd160() -> Hasher { - Hasher(Inner::Ripemd160(ripemd160::Ripemd160::default()), PhantomData) - } -} - -impl Hasher { - pub fn update(&mut self, data: &[u8]) { - match self.0 { - Inner::Sha256(ref mut ctx) => { - ctx.input(data) - }, - Inner::Sha512(ref mut ctx) => { - ctx.input(data) - }, - Inner::Ripemd160(ref mut ctx) => { - ctx.input(data) - } - } - } - - pub fn finish(self) -> Digest { - match self.0 { - Inner::Sha256(ctx) => { - Digest(InnerDigest::Sha256(ctx.result()), PhantomData) - }, - Inner::Sha512(ctx) => { - Digest(InnerDigest::Sha512(ctx.result()), PhantomData) - }, - Inner::Ripemd160(ctx) => { - Digest(InnerDigest::Ripemd160(ctx.result()), PhantomData) - } - } - } -} diff --git a/parity-crypto/src/error.rs b/parity-crypto/src/error.rs deleted file mode 100644 index 888bebb21..000000000 --- a/parity-crypto/src/error.rs +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use std::{fmt, result, error::Error as StdError}; - -#[derive(Debug)] -pub enum Error { - Scrypt(ScryptError), - Symm(SymmError), -} - -#[derive(Debug)] -pub enum ScryptError { - // log(N) < r / 16 - InvalidN, - // p <= (2^31-1 * 32)/(128 * r) - InvalidP, - ScryptParam(scrypt::errors::InvalidParams), - ScryptLength(scrypt::errors::InvalidOutputLen), -} - -#[derive(Debug)] -pub struct SymmError(PrivSymmErr); - -#[derive(Debug)] -enum PrivSymmErr { - BlockMode(block_modes::BlockModeError), - KeyStream(aes_ctr::stream_cipher::LoopError), - InvalidKeyLength(block_modes::InvalidKeyIvLength), -} - -impl StdError for Error { - fn source(&self) -> Option<&(StdError + 'static)> { - match self { - Error::Scrypt(scrypt_err) => Some(scrypt_err), - Error::Symm(symm_err) => Some(symm_err), - } - } -} - -impl StdError for ScryptError { - fn source(&self) -> Option<&(StdError + 'static)> { - match self { - ScryptError::ScryptParam(err) => Some(err), - ScryptError::ScryptLength(err) => Some(err), - _ => None, - } - } -} - -impl StdError for SymmError { - fn source(&self) -> Option<&(StdError + 'static)> { - match &self.0 { - PrivSymmErr::BlockMode(err) => Some(err), - PrivSymmErr::InvalidKeyLength(err) => Some(err), - _ => None, - } - } -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { - match self { - Error::Scrypt(err)=> write!(f, "scrypt error: {}", err), - Error::Symm(err) => write!(f, "symm error: {}", err), - } - } -} - -impl fmt::Display for ScryptError { - fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { - match self { - ScryptError::InvalidN => write!(f, "invalid n argument"), - ScryptError::InvalidP => write!(f, "invalid p argument"), - ScryptError::ScryptParam(err) => write!(f, "invalid params: {}", err), - ScryptError::ScryptLength(err) => write!(f, "invalid output length: {}", err), - } - } -} - -impl fmt::Display for SymmError { - fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { - match self { - SymmError(PrivSymmErr::BlockMode(err)) => write!(f, "block cipher error: {}", err), - SymmError(PrivSymmErr::KeyStream(err)) => write!(f, "ctr key stream ended: {}", err), - SymmError(PrivSymmErr::InvalidKeyLength(err)) => write!(f, "block cipher key length: {}", err), - } - } -} - -impl Into for Error { - fn into(self) -> std::io::Error { - std::io::Error::new(std::io::ErrorKind::Other, format!("Crypto error: {}",self)) - } -} - -impl From for SymmError { - fn from(e: block_modes::BlockModeError) -> SymmError { - SymmError(PrivSymmErr::BlockMode(e)) - } -} - -impl From for SymmError { - fn from(e: block_modes::InvalidKeyIvLength) -> SymmError { - SymmError(PrivSymmErr::InvalidKeyLength(e)) - } -} - -impl From for SymmError { - fn from(e: aes_ctr::stream_cipher::LoopError) -> SymmError { - SymmError(PrivSymmErr::KeyStream(e)) - } -} - -impl From for ScryptError { - fn from(e: scrypt::errors::InvalidParams) -> ScryptError { - ScryptError::ScryptParam(e) - } -} - -impl From for ScryptError { - fn from(e: scrypt::errors::InvalidOutputLen) -> ScryptError { - ScryptError::ScryptLength(e) - } -} - -impl From for Error { - fn from(e: ScryptError) -> Error { - Error::Scrypt(e) - } -} - -impl From for Error { - fn from(e: SymmError) -> Error { - Error::Symm(e) - } -} - diff --git a/parity-crypto/src/hmac/mod.rs b/parity-crypto/src/hmac/mod.rs deleted file mode 100644 index 43721fb0a..000000000 --- a/parity-crypto/src/hmac/mod.rs +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use std::marker::PhantomData; -use std::ops::Deref; - -use digest::generic_array::{GenericArray, typenum::{U32, U64}}; -use hmac::{Hmac, Mac as _}; -use zeroize::Zeroize; - -use crate::digest::{Sha256, Sha512}; - -/// HMAC signature. -#[derive(Debug)] -pub struct Signature(HashInner, PhantomData); - -#[derive(Debug)] -enum HashInner { - Sha256(GenericArray), - Sha512(GenericArray), -} - -impl Deref for Signature { - type Target = [u8]; - - fn deref(&self) -> &Self::Target { - match &self.0 { - HashInner::Sha256(a) => a.as_slice(), - HashInner::Sha512(a) => a.as_slice(), - } - } -} - -/// HMAC signing key. -pub struct SigKey(KeyInner, PhantomData); - -#[derive(PartialEq)] -// Using `Box[u8]` guarantees no reallocation can happen -struct DisposableBox(Box<[u8]>); - -impl std::fmt::Debug for DisposableBox { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{:?}", &self.0.as_ref()) - } -} - -impl DisposableBox { - fn from_slice(data: &[u8]) -> Self { - Self(data.to_vec().into_boxed_slice()) - } -} - -impl Drop for DisposableBox { - fn drop(&mut self) { - self.0.zeroize() - } -} - -#[derive(Debug, PartialEq)] -enum KeyInner { - Sha256(DisposableBox), - Sha512(DisposableBox), -} - -impl SigKey { - pub fn sha256(key: &[u8]) -> SigKey { - SigKey( - KeyInner::Sha256(DisposableBox::from_slice(key)), - PhantomData - ) - } -} - -impl SigKey { - pub fn sha512(key: &[u8]) -> SigKey { - SigKey( - KeyInner::Sha512(DisposableBox::from_slice(key)), - PhantomData - ) - } -} - -/// Compute HMAC signature of `data`. -pub fn sign(k: &SigKey, data: &[u8]) -> Signature { - let mut signer = Signer::with(k); - signer.update(data); - signer.sign() -} - -/// Stateful HMAC computation. -pub struct Signer(SignerInner, PhantomData); - -enum SignerInner { - Sha256(Hmac), - Sha512(Hmac), -} - -impl Signer { - pub fn with(key: &SigKey) -> Signer { - match &key.0 { - KeyInner::Sha256(key_bytes) => { - Signer( - SignerInner::Sha256( - Hmac::::new_varkey(&key_bytes.0) - .expect("always returns Ok; qed") - ), - PhantomData - ) - }, - KeyInner::Sha512(key_bytes) => { - Signer( - SignerInner::Sha512( - Hmac::::new_varkey(&key_bytes.0) - .expect("always returns Ok; qed") - ), PhantomData - ) - }, - } - } - - pub fn update(&mut self, data: &[u8]) { - match &mut self.0 { - SignerInner::Sha256(hmac) => hmac.input(data), - SignerInner::Sha512(hmac) => hmac.input(data), - } - } - - pub fn sign(self) -> Signature { - match self.0 { - SignerInner::Sha256(hmac) => Signature(HashInner::Sha256(hmac.result().code()), PhantomData), - SignerInner::Sha512(hmac) => Signature(HashInner::Sha512(hmac.result().code()), PhantomData), - } - } -} - -/// HMAC signature verification key. -pub struct VerifyKey(KeyInner, PhantomData); - -impl VerifyKey { - pub fn sha256(key: &[u8]) -> VerifyKey { - VerifyKey( - KeyInner::Sha256(DisposableBox::from_slice(key)), - PhantomData - ) - } -} - -impl VerifyKey { - pub fn sha512(key: &[u8]) -> VerifyKey { - VerifyKey( - KeyInner::Sha512(DisposableBox::from_slice(key)), - PhantomData - ) - } -} - -/// Verify HMAC signature of `data`. -pub fn verify(key: &VerifyKey, data: &[u8], sig: &[u8]) -> bool { - match &key.0 { - KeyInner::Sha256(key_bytes) => { - let mut ctx = Hmac::::new_varkey(&key_bytes.0) - .expect("always returns Ok; qed"); - ctx.input(data); - ctx.verify(sig).is_ok() - }, - KeyInner::Sha512(key_bytes) => { - let mut ctx = Hmac::::new_varkey(&key_bytes.0) - .expect("always returns Ok; qed"); - ctx.input(data); - ctx.verify(sig).is_ok() - }, - } -} - -#[cfg(test)] -mod test; diff --git a/parity-crypto/src/hmac/test.rs b/parity-crypto/src/hmac/test.rs deleted file mode 100644 index 9e0c34e12..000000000 --- a/parity-crypto/src/hmac/test.rs +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use super::*; -use hex_literal::hex; - -#[test] -fn simple_mac_and_verify() { - let input = b"Some bytes"; - let big_input = vec![7u8;2000]; - - let key1 = vec![3u8;64]; - let key2 = vec![4u8;128]; - - let sig_key1 = SigKey::sha256(&key1[..]); - let sig_key2 = SigKey::sha512(&key2[..]); - - let mut signer1 = Signer::with(&sig_key1); - let mut signer2 = Signer::with(&sig_key2); - - signer1.update(&input[..]); - for i in 0 .. big_input.len() / 33 { - signer2.update(&big_input[i*33..(i+1)*33]); - } - signer2.update(&big_input[(big_input.len() / 33)*33..]); - let sig1 = signer1.sign(); - assert_eq!(&sig1[..], [223, 208, 90, 69, 144, 95, 145, 180, 56, 155, 78, 40, 86, 238, 205, 81, 160, 245, 88, 145, 164, 67, 254, 180, 202, 107, 93, 249, 64, 196, 86, 225]); - let sig2 = signer2.sign(); - assert_eq!(&sig2[..], &[29, 63, 46, 122, 27, 5, 241, 38, 86, 197, 91, 79, 33, 107, 152, 195, 118, 221, 117, 119, 84, 114, 46, 65, 243, 157, 105, 12, 147, 176, 190, 37, 210, 164, 152, 8, 58, 243, 59, 206, 80, 10, 230, 197, 255, 110, 191, 180, 93, 22, 255, 0, 99, 79, 237, 229, 209, 199, 125, 83, 15, 179, 134, 89][..]); - assert_eq!(&sig1[..], &sign(&sig_key1, &input[..])[..]); - assert_eq!(&sig2[..], &sign(&sig_key2, &big_input[..])[..]); - let verif_key1 = VerifyKey::sha256(&key1[..]); - let verif_key2 = VerifyKey::sha512(&key2[..]); - assert!(verify(&verif_key1, &input[..], &sig1[..])); - assert!(verify(&verif_key2, &big_input[..], &sig2[..])); -} - -fn check_test_vector( - key: &[u8], - data: &[u8], - expected_256: &[u8], - expected_512: &[u8], -) { - // Sha-256 - let sig_key = SigKey::sha256(&key); - let mut signer = Signer::with(&sig_key); - signer.update(&data); - let signature = signer.sign(); - assert_eq!(&signature[..], expected_256); - assert_eq!(&signature[..], &sign(&sig_key, data)[..]); - let ver_key = VerifyKey::sha256(&key); - assert!(verify(&ver_key, data,&signature)); - - // Sha-512 - let sig_key = SigKey::sha512(&key); - let mut signer = Signer::with(&sig_key); - signer.update(&data); - let signature = signer.sign(); - assert_eq!(&signature[..], expected_512); - assert_eq!(&signature[..], &sign(&sig_key, data)[..]); - let ver_key = VerifyKey::sha512(&key); - assert!(verify(&ver_key, data,&signature)); -} - -#[test] -fn ietf_test_vectors() { - // Test vectors from https://tools.ietf.org/html/rfc4231.html#section-4 - - // Test Case 1 - check_test_vector( - &hex!("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b"), - &hex!("4869205468657265"), - &hex!(" - b0344c61d8db38535ca8afceaf0bf12b - 881dc200c9833da726e9376c2e32cff7"), - &hex!(" - 87aa7cdea5ef619d4ff0b4241a1d6cb0 - 2379f4e2ce4ec2787ad0b30545e17cde - daa833b7d6b8a702038b274eaea3f4e4 - be9d914eeb61f1702e696c203a126854") - ); - - // Test Case 2 - check_test_vector( - &hex!("4a656665"), - &hex!("7768617420646f2079612077616e7420666f72206e6f7468696e673f"), - &hex!(" - 5bdcc146bf60754e6a042426089575c7 - 5a003f089d2739839dec58b964ec3843"), - &hex!(" - 164b7a7bfcf819e2e395fbe73b56e0a3 - 87bd64222e831fd610270cd7ea250554 - 9758bf75c05a994a6d034f65f8f0e6fd - caeab1a34d4a6b4b636e070a38bce737") - ); - // Test Case 3 - check_test_vector( - &hex!("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), - &hex!("dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd"), - &hex!(" - 773ea91e36800e46854db8ebd09181a7 - 2959098b3ef8c122d9635514ced565fe"), - &hex!(" - fa73b0089d56a284efb0f0756c890be9 - b1b5dbdd8ee81a3655f83e33b2279d39 - bf3e848279a722c806b485a47e67c807 - b946a337bee8942674278859e13292fb") - ); - - // Test Case 4 - check_test_vector( - &hex!("0102030405060708090a0b0c0d0e0f10111213141516171819"), - &hex!(" - cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd - cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd - cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd - cdcd"), - &hex!(" - 82558a389a443c0ea4cc819899f2083a - 85f0faa3e578f8077a2e3ff46729665b"), - &hex!(" - b0ba465637458c6990e5a8c5f61d4af7 - e576d97ff94b872de76f8050361ee3db - a91ca5c11aa25eb4d679275cc5788063 - a5f19741120c4f2de2adebeb10a298dd") - ); - - // Test Case 6 - check_test_vector( - &hex!(" - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaa"), - &hex!(" - 54657374205573696e67204c61726765 - 72205468616e20426c6f636b2d53697a - 65204b6579202d2048617368204b6579 - 204669727374"), - &hex!(" - 60e431591ee0b67f0d8a26aacbf5b77f - 8e0bc6213728c5140546040f0ee37f54"), - &hex!(" - 80b24263c7c1a3ebb71493c1dd7be8b4 - 9b46d1f41b4aeec1121b013783f8f352 - 6b56d037e05f2598bd0fd2215d6a1e52 - 95e64f73f63f0aec8b915a985d786598") - ); - - // Test Case 7 - check_test_vector( - &hex!(" - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaa"), - &hex!(" - 54686973206973206120746573742075 - 73696e672061206c6172676572207468 - 616e20626c6f636b2d73697a65206b65 - 7920616e642061206c61726765722074 - 68616e20626c6f636b2d73697a652064 - 6174612e20546865206b6579206e6565 - 647320746f2062652068617368656420 - 6265666f7265206265696e6720757365 - 642062792074686520484d414320616c - 676f726974686d2e"), - &hex!(" - 9b09ffa71b942fcb27635fbcd5b0e944 - bfdc63644f0713938a7f51535c3a35e2"), - &hex!(" - e37b6a775dc87dbaa4dfa9f96e5e3ffd - debd71f8867289865df5a32d20cdc944 - b6022cac3c4982b10d5eeb55c3e4de15 - 134676fb6de0446065c97440fa8c6a58") - ); -} - -#[test] -fn secrets_are_zeroed_on_drop() { - let ptr: *const KeyInner; - let zeros = KeyInner::Sha256(DisposableBox::from_slice(&[0u8; 6][..])); - let expected = KeyInner::Sha256(DisposableBox::from_slice(b"sikrit")); - { - let secret = b"sikrit"; - let signing_key = SigKey::sha256(secret); - ptr = &signing_key.0; - unsafe { - assert_eq!(*ptr, expected); - } - } - unsafe { - assert_eq!(*ptr, zeros); - } -} diff --git a/parity-crypto/src/lib.rs b/parity-crypto/src/lib.rs deleted file mode 100644 index ce680f929..000000000 --- a/parity-crypto/src/lib.rs +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Crypto utils used by ethstore and network. - -pub mod aes; -pub mod error; -pub mod scrypt; -pub mod digest; -pub mod hmac; -pub mod pbkdf2; - -pub use crate::error::Error; - -use tiny_keccak::Keccak; -use subtle::ConstantTimeEq; - -pub const KEY_LENGTH: usize = 32; -pub const KEY_ITERATIONS: usize = 10240; -pub const KEY_LENGTH_AES: usize = KEY_LENGTH / 2; - -/// Default authenticated data to use (in RPC). -pub const DEFAULT_MAC: [u8; 2] = [0, 0]; - -pub trait Keccak256 { - fn keccak256(&self) -> T where T: Sized; -} - -impl Keccak256<[u8; 32]> for T where T: AsRef<[u8]> { - fn keccak256(&self) -> [u8; 32] { - let mut keccak = Keccak::new_keccak256(); - let mut result = [0u8; 32]; - keccak.update(self.as_ref()); - keccak.finalize(&mut result); - result - } -} - -pub fn derive_key_iterations(password: &[u8], salt: &[u8], c: u32) -> (Vec, Vec) { - let mut derived_key = [0u8; KEY_LENGTH]; - pbkdf2::sha256(c, pbkdf2::Salt(salt), pbkdf2::Secret(password), &mut derived_key); - let derived_right_bits = &derived_key[0..KEY_LENGTH_AES]; - let derived_left_bits = &derived_key[KEY_LENGTH_AES..KEY_LENGTH]; - (derived_right_bits.to_vec(), derived_left_bits.to_vec()) -} - -pub fn derive_mac(derived_left_bits: &[u8], cipher_text: &[u8]) -> Vec { - let mut mac = vec![0u8; KEY_LENGTH_AES + cipher_text.len()]; - mac[0..KEY_LENGTH_AES].copy_from_slice(derived_left_bits); - mac[KEY_LENGTH_AES..cipher_text.len() + KEY_LENGTH_AES].copy_from_slice(cipher_text); - mac -} - -pub fn is_equal(a: &[u8], b: &[u8]) -> bool { - a.ct_eq(b).into() -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn can_test_for_equality() { - let a = b"abc"; - let b = b"abc"; - let c = b"efg"; - assert!(is_equal(a, b)); - assert!(!is_equal(a, c)); - } -} diff --git a/parity-crypto/src/pbkdf2/mod.rs b/parity-crypto/src/pbkdf2/mod.rs deleted file mode 100644 index 83445e7e7..000000000 --- a/parity-crypto/src/pbkdf2/mod.rs +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -pub struct Salt<'a>(pub &'a [u8]); -pub struct Secret<'a>(pub &'a [u8]); - -pub fn sha256(iter: u32, salt: Salt, sec: Secret, out: &mut [u8; 32]) { - pbkdf2::pbkdf2::>(sec.0, salt.0, iter as usize, out) -} - -pub fn sha512(iter: u32, salt: Salt, sec: Secret, out: &mut [u8; 64]) { - pbkdf2::pbkdf2::>(sec.0, salt.0, iter as usize, out) -} - -#[cfg(test)] -mod test; diff --git a/parity-crypto/src/pbkdf2/test.rs b/parity-crypto/src/pbkdf2/test.rs deleted file mode 100644 index 0aca66967..000000000 --- a/parity-crypto/src/pbkdf2/test.rs +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use super::*; - -#[test] -fn basic_test() { - let mut dest = [0;32]; - let salt = [5;32]; - let secret = [7;32]; - sha256(3, Salt(&salt[..]), Secret(&secret[..]), &mut dest); - let res = [242, 33, 31, 124, 36, 223, 179, 185, 206, 175, 190, 253, 85, 33, 23, 126, 141, 29, 23, 97, 66, 63, 51, 196, 27, 255, 135, 206, 74, 137, 172, 87]; - assert_eq!(res, dest); -} diff --git a/parity-crypto/src/scrypt.rs b/parity-crypto/src/scrypt.rs deleted file mode 100644 index 9c8443146..000000000 --- a/parity-crypto/src/scrypt.rs +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use crate::error::ScryptError; -use super::{KEY_LENGTH_AES, KEY_LENGTH}; -use scrypt::{scrypt, ScryptParams}; - -#[cfg(test)] -use std::io::Error; - -pub fn derive_key(pass: &[u8], salt: &[u8], n: u32, p: u32, r: u32) -> Result<(Vec, Vec), ScryptError> { - // sanity checks - let log_n = (32 - n.leading_zeros() - 1) as u8; - if log_n as u32 >= r * 16 { - return Err(ScryptError::InvalidN); - } - - if p as u64 > ((u32::max_value() as u64 - 1) * 32)/(128 * (r as u64)) { - return Err(ScryptError::InvalidP); - } - - let mut derived_key = vec![0u8; KEY_LENGTH]; - let scrypt_params = ScryptParams::new(log_n, r, p)?; - scrypt(pass, salt, &scrypt_params, &mut derived_key)?; - let derived_right_bits = &derived_key[0..KEY_LENGTH_AES]; - let derived_left_bits = &derived_key[KEY_LENGTH_AES..KEY_LENGTH]; - Ok((derived_right_bits.to_vec(), derived_left_bits.to_vec())) -} - - -// test is build from previous crypto lib behaviour, values may be incorrect -// if previous crypto lib got a bug. -#[test] -pub fn test_derive() -> Result<(),Error> { - let pass = [109, 121, 112, 97, 115, 115, 10]; - let salt = [109, 121, 115, 97, 108, 116, 115, 104, 111, 117, 108, 100, 102, 105, - 108, 108, 115, 111, 109, 109, 101, 98, 121, 116, 101, 108, 101, 110, 103, 116, 104, 10]; - let r1 = [93, 134, 79, 68, 223, 27, 44, 174, 236, 184, 179, 203, 74, 139, 73, 66]; - let r2 = [2, 24, 239, 131, 172, 164, 18, 171, 132, 207, 22, 217, 150, 20, 203, 37]; - let l1 = [6, 90, 119, 45, 67, 2, 99, 151, 81, 88, 166, 210, 244, 19, 123, 208]; - let l2 = [253, 123, 132, 12, 188, 89, 196, 2, 107, 224, 239, 231, 135, 177, 125, 62]; - - let (l,r) = derive_key(&pass[..],&salt, 262, 1, 8).unwrap(); - assert!(l == r1); - assert!(r == l1); - let (l,r) = derive_key(&pass[..],&salt, 144, 4, 4).unwrap(); - assert!(l == r2); - assert!(r == l2); - Ok(()) -} diff --git a/parity-path/Cargo.toml b/parity-path/Cargo.toml deleted file mode 100644 index e5f2d4f0e..000000000 --- a/parity-path/Cargo.toml +++ /dev/null @@ -1,9 +0,0 @@ -[package] -name = "parity-path" -version = "0.1.1" -authors = ["Parity Technologies "] -repository = "https://github.com/paritytech/parity-common" -description = "Path utilities" -license = "GPL-3.0" - -[dependencies] diff --git a/parity-path/src/lib.rs b/parity-path/src/lib.rs deleted file mode 100644 index 38608db66..000000000 --- a/parity-path/src/lib.rs +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Path utilities -use std::path::Path; -use std::path::PathBuf; - -#[cfg(target_os = "macos")] -/// Get the config path for application `name`. -/// `name` should be capitalized, e.g. `"Ethereum"`, `"Parity"`. -pub fn config_path(name: &str) -> PathBuf { - let mut home = ::std::env::home_dir().expect("Failed to get home dir"); - home.push("Library"); - home.push(name); - home -} - -#[cfg(windows)] -/// Get the config path for application `name`. -/// `name` should be capitalized, e.g. `"Ethereum"`, `"Parity"`. -pub fn config_path(name: &str) -> PathBuf { - let mut home = ::std::env::home_dir().expect("Failed to get home dir"); - home.push("AppData"); - home.push("Roaming"); - home.push(name); - home -} - -#[cfg(not(any(target_os = "macos", windows)))] -/// Get the config path for application `name`. -/// `name` should be capitalized, e.g. `"Ethereum"`, `"Parity"`. -pub fn config_path(name: &str) -> PathBuf { - let mut home = ::std::env::home_dir().expect("Failed to get home dir"); - home.push(format!(".{}", name.to_lowercase())); - home -} - -/// Get the specific folder inside a config path. -pub fn config_path_with(name: &str, then: &str) -> PathBuf { - let mut path = config_path(name); - path.push(then); - path -} - -/// Default ethereum paths -pub mod ethereum { - use std::path::PathBuf; - - /// Default path for ethereum installation on Mac Os - pub fn default() -> PathBuf { super::config_path("Ethereum") } - - /// Default path for ethereum installation (testnet) - pub fn test() -> PathBuf { - let mut path = default(); - path.push("testnet"); - path - } - - /// Get the specific folder inside default ethereum installation - pub fn with_default(s: &str) -> PathBuf { - let mut path = default(); - path.push(s); - path - } - - /// Get the specific folder inside default ethereum installation configured for testnet - pub fn with_testnet(s: &str) -> PathBuf { - let mut path = default(); - path.push("testnet"); - path.push(s); - path - } -} - -/// Restricts the permissions of given path only to the owner. -#[cfg(unix)] -pub fn restrict_permissions_owner(file_path: &Path, write: bool, executable: bool) -> Result<(), String> { - let perms = ::std::os::unix::fs::PermissionsExt::from_mode(0o400 + write as u32 * 0o200 + executable as u32 * 0o100); - ::std::fs::set_permissions(file_path, perms).map_err(|e| format!("{:?}", e)) -} - -/// Restricts the permissions of given path only to the owner. -#[cfg(not(unix))] -pub fn restrict_permissions_owner(_file_path: &Path, _write: bool, _executable: bool) -> Result<(), String> { - //TODO: implement me - Ok(()) -} diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml deleted file mode 100644 index 8462e65e2..000000000 --- a/parity-util-mem/Cargo.toml +++ /dev/null @@ -1,47 +0,0 @@ -[package] -name = "parity-util-mem" -version = "0.2.0" -authors = ["Parity Technologies "] -repository = "https://github.com/paritytech/parity-common" -description = "Collection of memory related utilities" -license = "GPL-3.0" - -[dependencies] -cfg-if = "0.1.6" -malloc_size_of_derive = "0.1.0" -dlmalloc = { version = "0.1", features = ["global"], optional = true } -wee_alloc = { version = "0.4", optional = true } -# from https://github.com/microsoft/mimalloc: -# mimalloc can be built in secure mode, -# adding guard pages, randomized allocation, encrypted free lists, etc. -# to protect against various heap vulnerabilities. -# The performance penalty is only around 3% on average over our benchmarks. -mimallocator = { version = "0.1", features = ["secure"], optional = true } -mimalloc-sys = { version = "0.1", optional = true } - -elastic-array = { version = "0", optional = true } -ethereum-types = { version = "0", optional = true } -parking_lot = { version = "0", optional = true } - -[target.'cfg(target_os = "windows")'.dependencies.winapi] -version = "0.3.4" - -[target.'cfg(not(target_os = "windows"))'.dependencies.jemallocator] -version = "0.1" -optional = true - -[features] -default = ["std", "ethereum-impls"] -std = [] -# use dlmalloc as global allocator -dlmalloc-global = ["dlmalloc", "estimate-heapsize"] -# use wee_alloc as global allocator -weealloc-global = ["wee_alloc", "estimate-heapsize"] -# use jemalloc as global allocator -jemalloc-global = ["jemallocator"] -# use mimalloc as global allocator -mimalloc-global = ["mimallocator", "mimalloc-sys"] -# implement additional types -ethereum-impls = ["ethereum-types", "elastic-array", "parking_lot"] -# Full estimate: no call to allocator -estimate-heapsize = [] diff --git a/parity-util-mem/README.md b/parity-util-mem/README.md deleted file mode 100644 index 14d6e6c19..000000000 --- a/parity-util-mem/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# parity-util-mem - -Collection of memory related utilities. - -## Features - -- estimate-heapsize : Do not use allocator, but `size_of` or `size_of_val`. - -Others features define global allocator, see `src/alloc.rs`. - -## Dependency - -This crate groups common dependency, a patched copy of unpublished [`malloc_size_of`](https://github.com/servo/servo/tree/master/components/malloc_size_of) from servo project is copied and partially reexported. - -`Malloc_size_of` code is used internally as a module with a few modification to be able to implement type locally. - -For existing code using deprecated `HeapsizeOf` crate, calls to `heapsize_of_children` should be replace by calls to `size_of`. diff --git a/parity-util-mem/get_malloc_size_src.sh b/parity-util-mem/get_malloc_size_src.sh deleted file mode 100755 index 6eb52131f..000000000 --- a/parity-util-mem/get_malloc_size_src.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# script/process to update code from servo project (malloc_size_of) -# untested, note that we do not use submodule due to size of git repo -git clone https://github.com/servo/servo.git -cd servo -git checkout 5bdea7dc1c80790a852a3fb03edfb2b8fbd403dc -git apply ../slim_malloc_size_of.patch -#git merge master -#cp components/malloc_size_of/lib.rs ../src/malloc_size.rs -#cd .. -#rm -rf ./servo diff --git a/parity-util-mem/slim_malloc_size_of.patch b/parity-util-mem/slim_malloc_size_of.patch deleted file mode 100644 index 7c31cf606..000000000 --- a/parity-util-mem/slim_malloc_size_of.patch +++ /dev/null @@ -1,746 +0,0 @@ -diff --git a/components/malloc_size_of/lib.rs b/components/malloc_size_of/lib.rs -index 778082b5f0..e13745d6af 100644 ---- a/components/malloc_size_of/lib.rs -+++ b/components/malloc_size_of/lib.rs -@@ -43,55 +43,39 @@ - //! measured as well as the thing it points to. E.g. - //! ` as MallocSizeOf>::size_of(field, ops)`. - --extern crate app_units; --#[cfg(feature = "servo")] --extern crate crossbeam_channel; --extern crate cssparser; --extern crate euclid; --extern crate hashglobe; --#[cfg(feature = "servo")] --extern crate hyper; --#[cfg(feature = "servo")] --extern crate hyper_serde; --#[cfg(feature = "servo")] --extern crate keyboard_types; --#[cfg(feature = "servo")] --extern crate mozjs as js; --extern crate selectors; --#[cfg(feature = "servo")] --extern crate serde; --#[cfg(feature = "servo")] --extern crate serde_bytes; --extern crate servo_arc; --extern crate smallbitvec; --extern crate smallvec; --#[cfg(feature = "servo")] --extern crate string_cache; --extern crate thin_slice; --#[cfg(feature = "servo")] --extern crate time; --#[cfg(feature = "url")] --extern crate url; --extern crate void; --#[cfg(feature = "webrender_api")] --extern crate webrender_api; --#[cfg(feature = "servo")] --extern crate xml5ever; -- --#[cfg(feature = "servo")] --use serde_bytes::ByteBuf; -+ -+// This file is patched at commit 5bdea7dc1c80790a852a3fb03edfb2b8fbd403dc DO NOT EDIT. -+ -+ -+#[cfg(not(feature = "std"))] -+use alloc::vec::Vec; -+#[cfg(not(feature = "std"))] -+use alloc::string::String; -+#[cfg(not(feature = "std"))] -+mod std { -+ pub use core::*; -+ pub use alloc::collections; -+} -+ -+#[cfg(feature = "std")] -+use std::sync::Arc; -+ - use std::hash::{BuildHasher, Hash}; - use std::mem::size_of; - use std::ops::Range; - use std::ops::{Deref, DerefMut}; -+#[cfg(feature = "std")] - use std::os::raw::c_void; --use void::Void; -+#[cfg(not(feature = "std"))] -+use core::ffi::c_void; -+#[cfg(not(feature = "std"))] -+pub use alloc::boxed::Box; - - /// A C function that takes a pointer to a heap allocation and returns its size. --type VoidPtrToSizeFn = unsafe extern "C" fn(ptr: *const c_void) -> usize; -+pub type VoidPtrToSizeFn = unsafe extern "C" fn(ptr: *const c_void) -> usize; - - /// A closure implementing a stateful predicate on pointers. --type VoidPtrToBoolFnMut = FnMut(*const c_void) -> bool; -+pub type VoidPtrToBoolFnMut = FnMut(*const c_void) -> bool; - - /// Operations used when measuring heap usage of data structures. - pub struct MallocSizeOfOps { -@@ -216,44 +200,62 @@ pub trait MallocConditionalShallowSizeOf { - fn conditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; - } - --impl MallocSizeOf for String { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- unsafe { ops.malloc_size_of(self.as_ptr()) } -+#[cfg(not(any( -+ all( -+ target_os = "macos", -+ not(feature = "jemalloc-global"), -+ ), -+ feature = "estimate-heapsize" -+)))] -+pub mod inner_allocator_use { -+ -+use super::*; -+ -+impl MallocShallowSizeOf for Box { -+ fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -+ unsafe { ops.malloc_size_of(&**self) } - } - } - --impl<'a, T: ?Sized> MallocSizeOf for &'a T { -- fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { -- // Zero makes sense for a non-owning reference. -- 0 -+impl MallocShallowSizeOf for Vec { -+ fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -+ unsafe { ops.malloc_size_of(self.as_ptr()) } - } - } - --impl MallocShallowSizeOf for Box { -- fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- unsafe { ops.malloc_size_of(&**self) } -+// currently this seems only fine with jemalloc -+#[cfg(feature = "std")] -+#[cfg(any(target_os = "macos", target_os = "ios", target_os = "android", feature = "jemalloc-global"))] -+impl MallocUnconditionalShallowSizeOf for Arc { -+ fn unconditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -+ unsafe { ops.malloc_size_of(arc_ptr(self)) } - } - } - --impl MallocSizeOf for Box { -+#[cfg(feature = "std")] -+#[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "android", feature = "jemalloc-global")))] -+impl MallocUnconditionalShallowSizeOf for Arc { -+ fn unconditional_shallow_size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { -+ size_of::() -+ } -+} -+ -+} -+ -+impl MallocSizeOf for String { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- self.shallow_size_of(ops) + (**self).size_of(ops) -+ unsafe { ops.malloc_size_of(self.as_ptr()) } - } - } - --impl MallocShallowSizeOf for thin_slice::ThinBoxedSlice { -- fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- let mut n = 0; -- unsafe { -- n += thin_slice::ThinBoxedSlice::spilled_storage(self) -- .map_or(0, |ptr| ops.malloc_size_of(ptr)); -- n += ops.malloc_size_of(&**self); -- } -- n -+impl<'a, T: ?Sized> MallocSizeOf for &'a T { -+ fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { -+ // Zero makes sense for a non-owning reference. -+ 0 - } - } - --impl MallocSizeOf for thin_slice::ThinBoxedSlice { -+impl MallocSizeOf for Box { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.shallow_size_of(ops) + (**self).size_of(ops) - } -@@ -329,6 +331,7 @@ impl MallocSizeOf for std::cell::RefCell { - } - } - -+#[cfg(feature = "std")] - impl<'a, B: ?Sized + ToOwned> MallocSizeOf for std::borrow::Cow<'a, B> - where - B::Owned: MallocSizeOf, -@@ -351,30 +354,6 @@ impl MallocSizeOf for [T] { - } - } - --#[cfg(feature = "servo")] --impl MallocShallowSizeOf for ByteBuf { -- fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- unsafe { ops.malloc_size_of(self.as_ptr()) } -- } --} -- --#[cfg(feature = "servo")] --impl MallocSizeOf for ByteBuf { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- let mut n = self.shallow_size_of(ops); -- for elem in self.iter() { -- n += elem.size_of(ops); -- } -- n -- } --} -- --impl MallocShallowSizeOf for Vec { -- fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- unsafe { ops.malloc_size_of(self.as_ptr()) } -- } --} -- - impl MallocSizeOf for Vec { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = self.shallow_size_of(ops); -@@ -412,30 +391,7 @@ impl MallocSizeOf for std::collections::VecDeque { - } - } - --impl MallocShallowSizeOf for smallvec::SmallVec { -- fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- if self.spilled() { -- unsafe { ops.malloc_size_of(self.as_ptr()) } -- } else { -- 0 -- } -- } --} -- --impl MallocSizeOf for smallvec::SmallVec --where -- A: smallvec::Array, -- A::Item: MallocSizeOf, --{ -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- let mut n = self.shallow_size_of(ops); -- for elem in self.iter() { -- n += elem.size_of(ops); -- } -- n -- } --} -- -+#[cfg(feature = "std")] - impl MallocShallowSizeOf for std::collections::HashSet - where - T: Eq + Hash, -@@ -457,6 +413,7 @@ where - } - } - -+#[cfg(feature = "std")] - impl MallocSizeOf for std::collections::HashSet - where - T: Eq + Hash + MallocSizeOf, -@@ -471,59 +428,7 @@ where - } - } - --impl MallocShallowSizeOf for hashglobe::hash_set::HashSet --where -- T: Eq + Hash, -- S: BuildHasher, --{ -- fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- // See the implementation for std::collections::HashSet for details. -- if ops.has_malloc_enclosing_size_of() { -- self.iter() -- .next() -- .map_or(0, |t| unsafe { ops.malloc_enclosing_size_of(t) }) -- } else { -- self.capacity() * (size_of::() + size_of::()) -- } -- } --} -- --impl MallocSizeOf for hashglobe::hash_set::HashSet --where -- T: Eq + Hash + MallocSizeOf, -- S: BuildHasher, --{ -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- let mut n = self.shallow_size_of(ops); -- for t in self.iter() { -- n += t.size_of(ops); -- } -- n -- } --} -- --impl MallocShallowSizeOf for hashglobe::fake::HashSet --where -- T: Eq + Hash, -- S: BuildHasher, --{ -- fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- use std::ops::Deref; -- self.deref().shallow_size_of(ops) -- } --} -- --impl MallocSizeOf for hashglobe::fake::HashSet --where -- T: Eq + Hash + MallocSizeOf, -- S: BuildHasher, --{ -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- use std::ops::Deref; -- self.deref().size_of(ops) -- } --} -- -+#[cfg(feature = "std")] - impl MallocShallowSizeOf for std::collections::HashMap - where - K: Eq + Hash, -@@ -541,6 +446,7 @@ where - } - } - -+#[cfg(feature = "std")] - impl MallocSizeOf for std::collections::HashMap - where - K: Eq + Hash + MallocSizeOf, -@@ -587,62 +493,6 @@ where - } - } - --impl MallocShallowSizeOf for hashglobe::hash_map::HashMap --where -- K: Eq + Hash, -- S: BuildHasher, --{ -- fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- // See the implementation for std::collections::HashSet for details. -- if ops.has_malloc_enclosing_size_of() { -- self.values() -- .next() -- .map_or(0, |v| unsafe { ops.malloc_enclosing_size_of(v) }) -- } else { -- self.capacity() * (size_of::() + size_of::() + size_of::()) -- } -- } --} -- --impl MallocSizeOf for hashglobe::hash_map::HashMap --where -- K: Eq + Hash + MallocSizeOf, -- V: MallocSizeOf, -- S: BuildHasher, --{ -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- let mut n = self.shallow_size_of(ops); -- for (k, v) in self.iter() { -- n += k.size_of(ops); -- n += v.size_of(ops); -- } -- n -- } --} -- --impl MallocShallowSizeOf for hashglobe::fake::HashMap --where -- K: Eq + Hash, -- S: BuildHasher, --{ -- fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- use std::ops::Deref; -- self.deref().shallow_size_of(ops) -- } --} -- --impl MallocSizeOf for hashglobe::fake::HashMap --where -- K: Eq + Hash + MallocSizeOf, -- V: MallocSizeOf, -- S: BuildHasher, --{ -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- use std::ops::Deref; -- self.deref().size_of(ops) -- } --} -- - // PhantomData is always 0. - impl MallocSizeOf for std::marker::PhantomData { - fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { -@@ -657,21 +507,22 @@ impl MallocSizeOf for std::marker::PhantomData { - //impl !MallocSizeOf for Arc { } - //impl !MallocShallowSizeOf for Arc { } - --impl MallocUnconditionalShallowSizeOf for servo_arc::Arc { -- fn unconditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- unsafe { ops.malloc_size_of(self.heap_ptr()) } -- } -+#[cfg(feature = "std")] -+fn arc_ptr(s: &Arc) -> * const T { -+ &(**s) as *const T - } - --impl MallocUnconditionalSizeOf for servo_arc::Arc { -+#[cfg(feature = "std")] -+impl MallocUnconditionalSizeOf for Arc { - fn unconditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.unconditional_shallow_size_of(ops) + (**self).size_of(ops) - } - } - --impl MallocConditionalShallowSizeOf for servo_arc::Arc { -+#[cfg(feature = "std")] -+impl MallocConditionalShallowSizeOf for Arc { - fn conditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- if ops.have_seen_ptr(self.heap_ptr()) { -+ if ops.have_seen_ptr(arc_ptr(self)) { - 0 - } else { - self.unconditional_shallow_size_of(ops) -@@ -679,9 +530,10 @@ impl MallocConditionalShallowSizeOf for servo_arc::Arc { - } - } - --impl MallocConditionalSizeOf for servo_arc::Arc { -+#[cfg(feature = "std")] -+impl MallocConditionalSizeOf for Arc { - fn conditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- if ops.have_seen_ptr(self.heap_ptr()) { -+ if ops.have_seen_ptr(arc_ptr(self)) { - 0 - } else { - self.unconditional_size_of(ops) -@@ -695,203 +547,13 @@ impl MallocConditionalSizeOf for servo_arc::Arc { - /// If a mutex is stored inside of an Arc value as a member of a data type that is being measured, - /// the Arc will not be automatically measured so there is no risk of overcounting the mutex's - /// contents. -+#[cfg(feature = "std")] - impl MallocSizeOf for std::sync::Mutex { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - (*self.lock().unwrap()).size_of(ops) - } - } - --impl MallocSizeOf for smallbitvec::SmallBitVec { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- if let Some(ptr) = self.heap_ptr() { -- unsafe { ops.malloc_size_of(ptr) } -- } else { -- 0 -- } -- } --} -- --impl MallocSizeOf for euclid::Length { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- self.0.size_of(ops) -- } --} -- --impl MallocSizeOf for euclid::TypedScale { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- self.0.size_of(ops) -- } --} -- --impl MallocSizeOf for euclid::TypedPoint2D { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- self.x.size_of(ops) + self.y.size_of(ops) -- } --} -- --impl MallocSizeOf for euclid::TypedRect { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- self.origin.size_of(ops) + self.size.size_of(ops) -- } --} -- --impl MallocSizeOf for euclid::TypedSideOffsets2D { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- self.top.size_of(ops) + -- self.right.size_of(ops) + -- self.bottom.size_of(ops) + -- self.left.size_of(ops) -- } --} -- --impl MallocSizeOf for euclid::TypedSize2D { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- self.width.size_of(ops) + self.height.size_of(ops) -- } --} -- --impl MallocSizeOf for euclid::TypedTransform2D { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- self.m11.size_of(ops) + -- self.m12.size_of(ops) + -- self.m21.size_of(ops) + -- self.m22.size_of(ops) + -- self.m31.size_of(ops) + -- self.m32.size_of(ops) -- } --} -- --impl MallocSizeOf for euclid::TypedTransform3D { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- self.m11.size_of(ops) + -- self.m12.size_of(ops) + -- self.m13.size_of(ops) + -- self.m14.size_of(ops) + -- self.m21.size_of(ops) + -- self.m22.size_of(ops) + -- self.m23.size_of(ops) + -- self.m24.size_of(ops) + -- self.m31.size_of(ops) + -- self.m32.size_of(ops) + -- self.m33.size_of(ops) + -- self.m34.size_of(ops) + -- self.m41.size_of(ops) + -- self.m42.size_of(ops) + -- self.m43.size_of(ops) + -- self.m44.size_of(ops) -- } --} -- --impl MallocSizeOf for euclid::TypedVector2D { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- self.x.size_of(ops) + self.y.size_of(ops) -- } --} -- --impl MallocSizeOf for selectors::parser::AncestorHashes { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- let selectors::parser::AncestorHashes { ref packed_hashes } = *self; -- packed_hashes.size_of(ops) -- } --} -- --impl MallocSizeOf for selectors::parser::Selector --where -- Impl::NonTSPseudoClass: MallocSizeOf, -- Impl::PseudoElement: MallocSizeOf, --{ -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- let mut n = 0; -- -- // It's OK to measure this ThinArc directly because it's the -- // "primary" reference. (The secondary references are on the -- // Stylist.) -- n += unsafe { ops.malloc_size_of(self.thin_arc_heap_ptr()) }; -- for component in self.iter_raw_match_order() { -- n += component.size_of(ops); -- } -- -- n -- } --} -- --impl MallocSizeOf for selectors::parser::Component --where -- Impl::NonTSPseudoClass: MallocSizeOf, -- Impl::PseudoElement: MallocSizeOf, --{ -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- use selectors::parser::Component; -- -- match self { -- Component::AttributeOther(ref attr_selector) => attr_selector.size_of(ops), -- Component::Negation(ref components) => components.size_of(ops), -- Component::NonTSPseudoClass(ref pseudo) => (*pseudo).size_of(ops), -- Component::Slotted(ref selector) | Component::Host(Some(ref selector)) => { -- selector.size_of(ops) -- }, -- Component::PseudoElement(ref pseudo) => (*pseudo).size_of(ops), -- Component::Combinator(..) | -- Component::ExplicitAnyNamespace | -- Component::ExplicitNoNamespace | -- Component::DefaultNamespace(..) | -- Component::Namespace(..) | -- Component::ExplicitUniversalType | -- Component::LocalName(..) | -- Component::ID(..) | -- Component::Class(..) | -- Component::AttributeInNoNamespaceExists { .. } | -- Component::AttributeInNoNamespace { .. } | -- Component::FirstChild | -- Component::LastChild | -- Component::OnlyChild | -- Component::Root | -- Component::Empty | -- Component::Scope | -- Component::NthChild(..) | -- Component::NthLastChild(..) | -- Component::NthOfType(..) | -- Component::NthLastOfType(..) | -- Component::FirstOfType | -- Component::LastOfType | -- Component::OnlyOfType | -- Component::Host(None) => 0, -- } -- } --} -- --impl MallocSizeOf -- for selectors::attr::AttrSelectorWithOptionalNamespace --{ -- fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { -- 0 -- } --} -- --impl MallocSizeOf for Void { -- #[inline] -- fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { -- void::unreachable(*self) -- } --} -- --#[cfg(feature = "servo")] --impl MallocSizeOf for string_cache::Atom { -- fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { -- 0 -- } --} -- --// This is measured properly by the heap measurement implemented in --// SpiderMonkey. --#[cfg(feature = "servo")] --impl MallocSizeOf for js::jsapi::Heap { -- fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { -- 0 -- } --} -- --/// For use on types where size_of() returns 0. - #[macro_export] - macro_rules! malloc_size_of_is_0( - ($($ty:ty),+) => ( -@@ -929,117 +591,6 @@ malloc_size_of_is_0!(Range, Range, Range, Range, Range - malloc_size_of_is_0!(Range, Range, Range, Range, Range); - malloc_size_of_is_0!(Range, Range); - --malloc_size_of_is_0!(app_units::Au); -- --malloc_size_of_is_0!(cssparser::RGBA, cssparser::TokenSerializationType); -- --#[cfg(feature = "url")] --impl MallocSizeOf for url::Host { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- match *self { -- url::Host::Domain(ref s) => s.size_of(ops), -- _ => 0, -- } -- } --} --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::BorderRadius); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::BorderStyle); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::BoxShadowClipMode); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::ClipAndScrollInfo); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::ColorF); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::ComplexClipRegion); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::ExtendMode); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::FilterOp); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::ExternalScrollId); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::FontInstanceKey); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::GradientStop); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::GlyphInstance); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::NinePatchBorder); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::ImageKey); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::ImageRendering); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::LineStyle); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::MixBlendMode); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::NormalBorder); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::RepeatMode); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::ScrollSensitivity); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::StickyOffsetBounds); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::TransformStyle); -- --#[cfg(feature = "servo")] --impl MallocSizeOf for keyboard_types::Key { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- match self { -- keyboard_types::Key::Character(ref s) => s.size_of(ops), -- _ => 0, -- } -- } --} -- --#[cfg(feature = "servo")] --malloc_size_of_is_0!(keyboard_types::Modifiers); -- --#[cfg(feature = "servo")] --impl MallocSizeOf for xml5ever::QualName { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- self.prefix.size_of(ops) + self.ns.size_of(ops) + self.local.size_of(ops) -- } --} -- --#[cfg(feature = "servo")] --malloc_size_of_is_0!(time::Duration); --#[cfg(feature = "servo")] --malloc_size_of_is_0!(time::Tm); -- --#[cfg(feature = "servo")] --impl MallocSizeOf for hyper_serde::Serde --where -- for<'de> hyper_serde::De: serde::Deserialize<'de>, -- for<'a> hyper_serde::Ser<'a, T>: serde::Serialize, -- T: MallocSizeOf, --{ -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- self.0.size_of(ops) -- } --} -- --// Placeholder for unique case where internals of Sender cannot be measured. --// malloc size of is 0 macro complains about type supplied! --#[cfg(feature = "servo")] --impl MallocSizeOf for crossbeam_channel::Sender { -- fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { -- 0 -- } --} -- --#[cfg(feature = "servo")] --impl MallocSizeOf for hyper::StatusCode { -- fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { -- 0 -- } --} -- - /// Measurable that defers to inner value and used to verify MallocSizeOf implementation in a - /// struct. - #[derive(Clone)] diff --git a/parity-util-mem/src/allocators.rs b/parity-util-mem/src/allocators.rs deleted file mode 100644 index 2379c97d9..000000000 --- a/parity-util-mem/src/allocators.rs +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! default allocator management -//! Features are: -//! - windows: -//! - no features: default implementation from servo `heapsize` crate -//! - weealloc: default to `estimate_size` -//! - dlmalloc: default to `estimate_size` -//! - jemalloc: default windows allocator is used instead -//! - mimalloc: use mimallocator crate -//! - arch x86: -//! - no features: use default alloc -//! - jemalloc: use jemallocator crate -//! - weealloc: default to `estimate_size` -//! - dlmalloc: default to `estimate_size` -//! - mimalloc: use mimallocator crate -//! - arch x86/macos: -//! - no features: use default alloc, requires using `estimate_size` -//! - jemalloc: use jemallocator crate -//! - weealloc: default to `estimate_size` -//! - dlmalloc: default to `estimate_size` -//! - mimalloc: use mimallocator crate -//! - arch wasm32: -//! - no features: default to `estimate_size` -//! - weealloc: default to `estimate_size` -//! - dlmalloc: default to `estimate_size` -//! - jemalloc: compile error -//! - mimalloc: compile error (until https://github.com/microsoft/mimalloc/pull/32 is merged) - - -use malloc_size::{MallocSizeOfOps, VoidPtrToSizeFn, MallocSizeOf}; -#[cfg(feature = "std")] -use malloc_size::MallocUnconditionalSizeOf; -#[cfg(feature = "std")] -use std::os::raw::c_void; -#[cfg(not(feature = "std"))] -use core::ffi::c_void; - -mod usable_size { - - use super::*; - -cfg_if! { - - if #[cfg(any( - target_arch = "wasm32", - feature = "estimate-heapsize", - feature = "weealloc-global", - feature = "dlmalloc-global", - ))] { - - // do not try system allocator - - /// Warning this is for compatibility only. - /// This function does panic: `estimate-heapsize` feature needs to be activated - /// to avoid this function call. - pub unsafe extern "C" fn malloc_usable_size(_ptr: *const c_void) -> usize { - unreachable!("estimate heapsize only") - } - - } else if #[cfg(target_os = "windows")] { - - // default windows allocator - extern crate winapi; - - use self::winapi::um::heapapi::{GetProcessHeap, HeapSize, HeapValidate}; - - /// Get the size of a heap block. - /// Call windows allocator through `winapi` crate - pub unsafe extern "C" fn malloc_usable_size(mut ptr: *const c_void) -> usize { - - let heap = GetProcessHeap(); - - if HeapValidate(heap, 0, ptr) == 0 { - ptr = *(ptr as *const *const c_void).offset(-1); - } - - HeapSize(heap, 0, ptr) as usize - } - - } else if #[cfg(feature = "jemalloc-global")] { - - /// Use of jemalloc usable size C function through jemallocator crate call. - pub unsafe extern "C" fn malloc_usable_size(ptr: *const c_void) -> usize { - jemallocator::usable_size(ptr) - } - - } else if #[cfg(feature = "mimalloc-global")] { - - /// Use of mimalloc usable size C function through mimalloc_sys crate call. - pub unsafe extern "C" fn malloc_usable_size(ptr: *const c_void) -> usize { - // mimalloc doesn't actually mutate the value ptr points to, - // but requires a mut pointer in the API - mimalloc_sys::mi_usable_size(ptr as *mut _) - } - - } else if #[cfg(target_os = "linux")] { - - /// Linux call system allocator (currently malloc). - extern "C" { - pub fn malloc_usable_size(ptr: *const c_void) -> usize; - } - - } else { - // default allocator for non linux or windows system use estimate - pub unsafe extern "C" fn malloc_usable_size(_ptr: *const c_void) -> usize { - unreachable!("estimate heapsize or feature allocator needed") - } - - } - -} - - /// No enclosing function defined. - #[inline] - pub fn new_enclosing_size_fn() -> Option { - None - } -} - -/// Get a new instance of a MallocSizeOfOps -pub fn new_malloc_size_ops() -> MallocSizeOfOps { - MallocSizeOfOps::new( - usable_size::malloc_usable_size, - usable_size::new_enclosing_size_fn(), - None, - ) -} - -/// Extension methods for `MallocSizeOf` trait, do not implement -/// directly. -/// It allows getting heapsize without exposing `MallocSizeOfOps` -/// (a single default `MallocSizeOfOps` is used for each call). -pub trait MallocSizeOfExt: MallocSizeOf { - /// Method to launch a heapsize measurement with a - /// fresh state. - fn malloc_size_of(&self) -> usize { - let mut ops = new_malloc_size_ops(); - ::size_of(self, &mut ops) - } -} - -impl MallocSizeOfExt for T { } - -#[cfg(feature = "std")] -impl MallocSizeOf for std::sync::Arc { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.unconditional_size_of(ops) - } -} diff --git a/parity-util-mem/src/impls.rs b/parity-util-mem/src/impls.rs deleted file mode 100644 index 19e787ba3..000000000 --- a/parity-util-mem/src/impls.rs +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Implementation of `MallocSize` for common types : -//! - etheureum types uint and fixed hash. -//! - elastic_array arrays -//! - parking_lot mutex structures - -extern crate elastic_array; -extern crate ethereum_types; -extern crate parking_lot; - -use self::ethereum_types::{ - U64, U128, U256, U512, H32, H64, - H128, H160, H256, H264, H512, H520, - Bloom -}; -use self::elastic_array::{ - ElasticArray2, - ElasticArray4, - ElasticArray8, - ElasticArray16, - ElasticArray32, - ElasticArray36, - ElasticArray64, - ElasticArray128, - ElasticArray256, - ElasticArray512, - ElasticArray1024, - ElasticArray2048, -}; -use self::parking_lot::{Mutex, RwLock}; -use super::{MallocSizeOf, MallocSizeOfOps}; - -#[cfg(not(feature = "std"))] -use core as std; - -#[cfg(feature = "std")] -malloc_size_of_is_0!(std::time::Instant); -malloc_size_of_is_0!(std::time::Duration); - -malloc_size_of_is_0!( - U64, U128, U256, U512, H32, H64, - H128, H160, H256, H264, H512, H520, - Bloom -); - -macro_rules! impl_elastic_array { - ($name: ident, $dummy: ident, $size: expr) => ( - impl MallocSizeOf for $name - where T: MallocSizeOf { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self[..].size_of(ops) - } - } - ) -} - -impl_elastic_array!(ElasticArray2, ElasticArray2Dummy, 2); -impl_elastic_array!(ElasticArray4, ElasticArray4Dummy, 4); -impl_elastic_array!(ElasticArray8, ElasticArray8Dummy, 8); -impl_elastic_array!(ElasticArray16, ElasticArray16Dummy, 16); -impl_elastic_array!(ElasticArray32, ElasticArray32Dummy, 32); -impl_elastic_array!(ElasticArray36, ElasticArray36Dummy, 36); -impl_elastic_array!(ElasticArray64, ElasticArray64Dummy, 64); -impl_elastic_array!(ElasticArray128, ElasticArray128Dummy, 128); -impl_elastic_array!(ElasticArray256, ElasticArray256Dummy, 256); -impl_elastic_array!(ElasticArray512, ElasticArray512Dummy, 512); -impl_elastic_array!(ElasticArray1024, ElasticArray1024Dummy, 1024); -impl_elastic_array!(ElasticArray2048, ElasticArray2048Dummy, 2048); - - -impl MallocSizeOf for Mutex { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - (*self.lock()).size_of(ops) - } -} - -impl MallocSizeOf for RwLock { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.read().size_of(ops) - } -} diff --git a/parity-util-mem/src/lib.rs b/parity-util-mem/src/lib.rs deleted file mode 100644 index 2114cddba..000000000 --- a/parity-util-mem/src/lib.rs +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Crate for parity memory management related utilities. -//! It includes global allocator choice, heap measurement and -//! memory erasure. - -#![cfg_attr(not(feature = "std"), no_std)] -#![cfg_attr(not(feature = "std"), feature(core_intrinsics))] -#![cfg_attr(not(feature = "std"), feature(alloc))] - - -#[macro_use] -extern crate cfg_if; - -#[cfg(not(feature = "std"))] -extern crate alloc; - -extern crate malloc_size_of_derive as malloc_size_derive; - - -cfg_if! { - if #[cfg(all( - feature = "jemalloc-global", - not(target_os = "windows"), - not(target_arch = "wasm32") - ))] { - extern crate jemallocator; - #[global_allocator] - /// Global allocator - pub static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; - } else if #[cfg(feature = "dlmalloc-global")] { - extern crate dlmalloc; - #[global_allocator] - /// Global allocator - pub static ALLOC: dlmalloc::GlobalDlmalloc = dlmalloc::GlobalDlmalloc; - } else if #[cfg(feature = "weealloc-global")] { - extern crate wee_alloc; - #[global_allocator] - /// Global allocator - pub static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; - } else if #[cfg(all( - feature = "mimalloc-global", - not(target_arch = "wasm32") - ))] { - extern crate mimallocator; - extern crate mimalloc_sys; - #[global_allocator] - /// Global allocator - pub static ALLOC: mimallocator::Mimalloc = mimallocator::Mimalloc; - } else { - // default allocator used - } -} - -pub mod allocators; - -#[cfg(any( - all( - target_os = "macos", - not(feature = "jemalloc-global"), - ), - feature = "estimate-heapsize" -))] -pub mod sizeof; - -#[cfg(not(feature = "std"))] -use core as std; - -/// This is a copy of patched crate `malloc_size_of` as a module. -/// We need to have it as an inner module to be able to define our own traits implementation, -/// if at some point the trait become standard enough we could use the right way of doing it -/// by implementing it in our type traits crates. At this time moving this trait to the primitive -/// types level would impact too much of the dependencies to be easily manageable. -#[macro_use] mod malloc_size; - -#[cfg(feature = "ethereum-impls")] -pub mod impls; - -pub use malloc_size_derive::*; -pub use malloc_size::{ - MallocSizeOfOps, - MallocSizeOf, -}; -pub use allocators::MallocSizeOfExt; - -#[cfg(feature = "std")] -#[cfg(test)] -mod test { - use std::sync::Arc; - use super::MallocSizeOfExt; - - #[test] - fn test_arc() { - let val = Arc::new("test".to_string()); - let s = val.malloc_size_of(); - assert!(s > 0); - } -} diff --git a/parity-util-mem/src/malloc_size.rs b/parity-util-mem/src/malloc_size.rs deleted file mode 100644 index e7599ea85..000000000 --- a/parity-util-mem/src/malloc_size.rs +++ /dev/null @@ -1,614 +0,0 @@ -// Copyright 2016-2017 The Servo Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! A crate for measuring the heap usage of data structures in a way that -//! integrates with Firefox's memory reporting, particularly the use of -//! mozjemalloc and DMD. In particular, it has the following features. -//! - It isn't bound to a particular heap allocator. -//! - It provides traits for both "shallow" and "deep" measurement, which gives -//! flexibility in the cases where the traits can't be used. -//! - It allows for measuring blocks even when only an interior pointer can be -//! obtained for heap allocations, e.g. `HashSet` and `HashMap`. (This relies -//! on the heap allocator having suitable support, which mozjemalloc has.) -//! - It allows handling of types like `Rc` and `Arc` by providing traits that -//! are different to the ones for non-graph structures. -//! -//! Suggested uses are as follows. -//! - When possible, use the `MallocSizeOf` trait. (Deriving support is -//! provided by the `malloc_size_of_derive` crate.) -//! - If you need an additional synchronization argument, provide a function -//! that is like the standard trait method, but with the extra argument. -//! - If you need multiple measurements for a type, provide a function named -//! `add_size_of` that takes a mutable reference to a struct that contains -//! the multiple measurement fields. -//! - When deep measurement (via `MallocSizeOf`) cannot be implemented for a -//! type, shallow measurement (via `MallocShallowSizeOf`) in combination with -//! iteration can be a useful substitute. -//! - `Rc` and `Arc` are always tricky, which is why `MallocSizeOf` is not (and -//! should not be) implemented for them. -//! - If an `Rc` or `Arc` is known to be a "primary" reference and can always -//! be measured, it should be measured via the `MallocUnconditionalSizeOf` -//! trait. -//! - If an `Rc` or `Arc` should be measured only if it hasn't been seen -//! before, it should be measured via the `MallocConditionalSizeOf` trait. -//! - Using universal function call syntax is a good idea when measuring boxed -//! fields in structs, because it makes it clear that the Box is being -//! measured as well as the thing it points to. E.g. -//! ` as MallocSizeOf>::size_of(field, ops)`. - - -// This file is patched at commit 5bdea7dc1c80790a852a3fb03edfb2b8fbd403dc DO NOT EDIT. - - -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; -#[cfg(not(feature = "std"))] -mod std { - pub use core::*; - pub use alloc::collections; -} - -#[cfg(feature = "std")] -use std::sync::Arc; - -#[cfg(feature = "std")] -use std::hash::BuildHasher; -use std::hash::Hash; -use std::mem::size_of; -use std::ops::Range; -use std::ops::{Deref, DerefMut}; -#[cfg(feature = "std")] -use std::os::raw::c_void; -#[cfg(not(feature = "std"))] -use core::ffi::c_void; -#[cfg(not(feature = "std"))] -pub use alloc::boxed::Box; - -/// A C function that takes a pointer to a heap allocation and returns its size. -pub type VoidPtrToSizeFn = unsafe extern "C" fn(ptr: *const c_void) -> usize; - -/// A closure implementing a stateful predicate on pointers. -pub type VoidPtrToBoolFnMut = dyn FnMut(*const c_void) -> bool; - -/// Operations used when measuring heap usage of data structures. -pub struct MallocSizeOfOps { - /// A function that returns the size of a heap allocation. - size_of_op: VoidPtrToSizeFn, - - /// Like `size_of_op`, but can take an interior pointer. Optional because - /// not all allocators support this operation. If it's not provided, some - /// memory measurements will actually be computed estimates rather than - /// real and accurate measurements. - enclosing_size_of_op: Option, - - /// Check if a pointer has been seen before, and remember it for next time. - /// Useful when measuring `Rc`s and `Arc`s. Optional, because many places - /// don't need it. - have_seen_ptr_op: Option>, -} - -impl MallocSizeOfOps { - pub fn new( - size_of: VoidPtrToSizeFn, - malloc_enclosing_size_of: Option, - have_seen_ptr: Option>, - ) -> Self { - MallocSizeOfOps { - size_of_op: size_of, - enclosing_size_of_op: malloc_enclosing_size_of, - have_seen_ptr_op: have_seen_ptr, - } - } - - /// Check if an allocation is empty. This relies on knowledge of how Rust - /// handles empty allocations, which may change in the future. - fn is_empty(ptr: *const T) -> bool { - // The correct condition is this: - // `ptr as usize <= ::std::mem::align_of::()` - // But we can't call align_of() on a ?Sized T. So we approximate it - // with the following. 256 is large enough that it should always be - // larger than the required alignment, but small enough that it is - // always in the first page of memory and therefore not a legitimate - // address. - return ptr as *const usize as usize <= 256; - } - - /// Call `size_of_op` on `ptr`, first checking that the allocation isn't - /// empty, because some types (such as `Vec`) utilize empty allocations. - pub unsafe fn malloc_size_of(&self, ptr: *const T) -> usize { - if MallocSizeOfOps::is_empty(ptr) { - 0 - } else { - (self.size_of_op)(ptr as *const c_void) - } - } - - /// Is an `enclosing_size_of_op` available? - pub fn has_malloc_enclosing_size_of(&self) -> bool { - self.enclosing_size_of_op.is_some() - } - - /// Call `enclosing_size_of_op`, which must be available, on `ptr`, which - /// must not be empty. - pub unsafe fn malloc_enclosing_size_of(&self, ptr: *const T) -> usize { - assert!(!MallocSizeOfOps::is_empty(ptr)); - (self.enclosing_size_of_op.unwrap())(ptr as *const c_void) - } - - /// Call `have_seen_ptr_op` on `ptr`. - pub fn have_seen_ptr(&mut self, ptr: *const T) -> bool { - let have_seen_ptr_op = self - .have_seen_ptr_op - .as_mut() - .expect("missing have_seen_ptr_op"); - have_seen_ptr_op(ptr as *const c_void) - } -} - -/// Trait for measuring the "deep" heap usage of a data structure. This is the -/// most commonly-used of the traits. -pub trait MallocSizeOf { - /// Measure the heap usage of all descendant heap-allocated structures, but - /// not the space taken up by the value itself. - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize; -} - -/// Trait for measuring the "shallow" heap usage of a container. -pub trait MallocShallowSizeOf { - /// Measure the heap usage of immediate heap-allocated descendant - /// structures, but not the space taken up by the value itself. Anything - /// beyond the immediate descendants must be measured separately, using - /// iteration. - fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; -} - -/// Like `MallocSizeOf`, but with a different name so it cannot be used -/// accidentally with derive(MallocSizeOf). For use with types like `Rc` and -/// `Arc` when appropriate (e.g. when measuring a "primary" reference). -pub trait MallocUnconditionalSizeOf { - /// Measure the heap usage of all heap-allocated descendant structures, but - /// not the space taken up by the value itself. - fn unconditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; -} - -/// `MallocUnconditionalSizeOf` combined with `MallocShallowSizeOf`. -pub trait MallocUnconditionalShallowSizeOf { - /// `unconditional_size_of` combined with `shallow_size_of`. - fn unconditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; -} - -/// Like `MallocSizeOf`, but only measures if the value hasn't already been -/// measured. For use with types like `Rc` and `Arc` when appropriate (e.g. -/// when there is no "primary" reference). -pub trait MallocConditionalSizeOf { - /// Measure the heap usage of all heap-allocated descendant structures, but - /// not the space taken up by the value itself, and only if that heap usage - /// hasn't already been measured. - fn conditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; -} - -/// `MallocConditionalSizeOf` combined with `MallocShallowSizeOf`. -pub trait MallocConditionalShallowSizeOf { - /// `conditional_size_of` combined with `shallow_size_of`. - fn conditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; -} - -#[cfg(not(any( - all( - target_os = "macos", - not(feature = "jemalloc-global"), - ), - feature = "estimate-heapsize" -)))] -pub mod inner_allocator_use { - -use super::*; - -#[cfg(not(feature = "std"))] -use alloc::string::String; - -impl MallocShallowSizeOf for Box { - fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - unsafe { ops.malloc_size_of(&**self) } - } -} - -impl MallocShallowSizeOf for Vec { - fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - unsafe { ops.malloc_size_of(self.as_ptr()) } - } -} - -// currently this seems only fine with jemalloc -#[cfg(feature = "std")] -#[cfg(all(feature = "jemalloc-global", not(target_os = "windows")))] -impl MallocUnconditionalShallowSizeOf for Arc { - fn unconditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - unsafe { ops.malloc_size_of(arc_ptr(self)) } - } -} - -#[cfg(feature = "std")] -#[cfg(not(all(feature = "jemalloc-global", not(target_os = "windows"))))] -impl MallocUnconditionalShallowSizeOf for Arc { - fn unconditional_shallow_size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { - size_of::() - } -} - -impl MallocSizeOf for String { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - unsafe { ops.malloc_size_of(self.as_ptr()) } - } -} - -} - -impl<'a, T: ?Sized> MallocSizeOf for &'a T { - fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { - // Zero makes sense for a non-owning reference. - 0 - } -} - -impl MallocSizeOf for Box { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.shallow_size_of(ops) + (**self).size_of(ops) - } -} - -impl MallocSizeOf for () { - fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { - 0 - } -} - -impl MallocSizeOf for (T1, T2) -where - T1: MallocSizeOf, - T2: MallocSizeOf, -{ - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.0.size_of(ops) + self.1.size_of(ops) - } -} - -impl MallocSizeOf for (T1, T2, T3) -where - T1: MallocSizeOf, - T2: MallocSizeOf, - T3: MallocSizeOf, -{ - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.0.size_of(ops) + self.1.size_of(ops) + self.2.size_of(ops) - } -} - -impl MallocSizeOf for (T1, T2, T3, T4) -where - T1: MallocSizeOf, - T2: MallocSizeOf, - T3: MallocSizeOf, - T4: MallocSizeOf, -{ - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.0.size_of(ops) + self.1.size_of(ops) + self.2.size_of(ops) + self.3.size_of(ops) - } -} - -impl MallocSizeOf for Option { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - if let Some(val) = self.as_ref() { - val.size_of(ops) - } else { - 0 - } - } -} - -impl MallocSizeOf for Result { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - match *self { - Ok(ref x) => x.size_of(ops), - Err(ref e) => e.size_of(ops), - } - } -} - -impl MallocSizeOf for std::cell::Cell { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.get().size_of(ops) - } -} - -impl MallocSizeOf for std::cell::RefCell { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.borrow().size_of(ops) - } -} - -#[cfg(feature = "std")] -impl<'a, B: ?Sized + ToOwned> MallocSizeOf for std::borrow::Cow<'a, B> -where - B::Owned: MallocSizeOf, -{ - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - match *self { - std::borrow::Cow::Borrowed(_) => 0, - std::borrow::Cow::Owned(ref b) => b.size_of(ops), - } - } -} - -impl MallocSizeOf for [T] { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = 0; - for elem in self.iter() { - n += elem.size_of(ops); - } - n - } -} - -impl MallocSizeOf for Vec { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = self.shallow_size_of(ops); - for elem in self.iter() { - n += elem.size_of(ops); - } - n - } -} - -impl MallocShallowSizeOf for std::collections::VecDeque { - fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - if ops.has_malloc_enclosing_size_of() { - if let Some(front) = self.front() { - // The front element is an interior pointer. - unsafe { ops.malloc_enclosing_size_of(&*front) } - } else { - // This assumes that no memory is allocated when the VecDeque is empty. - 0 - } - } else { - // An estimate. - self.capacity() * size_of::() - } - } -} - -impl MallocSizeOf for std::collections::VecDeque { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = self.shallow_size_of(ops); - for elem in self.iter() { - n += elem.size_of(ops); - } - n - } -} - -#[cfg(feature = "std")] -impl MallocShallowSizeOf for std::collections::HashSet -where - T: Eq + Hash, - S: BuildHasher, -{ - fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - if ops.has_malloc_enclosing_size_of() { - // The first value from the iterator gives us an interior pointer. - // `ops.malloc_enclosing_size_of()` then gives us the storage size. - // This assumes that the `HashSet`'s contents (values and hashes) - // are all stored in a single contiguous heap allocation. - self.iter() - .next() - .map_or(0, |t| unsafe { ops.malloc_enclosing_size_of(t) }) - } else { - // An estimate. - self.capacity() * (size_of::() + size_of::()) - } - } -} - -#[cfg(feature = "std")] -impl MallocSizeOf for std::collections::HashSet -where - T: Eq + Hash + MallocSizeOf, - S: BuildHasher, -{ - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = self.shallow_size_of(ops); - for t in self.iter() { - n += t.size_of(ops); - } - n - } -} - -#[cfg(feature = "std")] -impl MallocShallowSizeOf for std::collections::HashMap -where - K: Eq + Hash, - S: BuildHasher, -{ - fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - // See the implementation for std::collections::HashSet for details. - if ops.has_malloc_enclosing_size_of() { - self.values() - .next() - .map_or(0, |v| unsafe { ops.malloc_enclosing_size_of(v) }) - } else { - self.capacity() * (size_of::() + size_of::() + size_of::()) - } - } -} - -#[cfg(feature = "std")] -impl MallocSizeOf for std::collections::HashMap -where - K: Eq + Hash + MallocSizeOf, - V: MallocSizeOf, - S: BuildHasher, -{ - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = self.shallow_size_of(ops); - for (k, v) in self.iter() { - n += k.size_of(ops); - n += v.size_of(ops); - } - n - } -} - -impl MallocShallowSizeOf for std::collections::BTreeMap -where - K: Eq + Hash, -{ - fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - if ops.has_malloc_enclosing_size_of() { - self.values() - .next() - .map_or(0, |v| unsafe { ops.malloc_enclosing_size_of(v) }) - } else { - self.len() * (size_of::() + size_of::() + size_of::()) - } - } -} - -impl MallocSizeOf for std::collections::BTreeMap -where - K: Eq + Hash + MallocSizeOf, - V: MallocSizeOf, -{ - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = self.shallow_size_of(ops); - for (k, v) in self.iter() { - n += k.size_of(ops); - n += v.size_of(ops); - } - n - } -} - -// PhantomData is always 0. -impl MallocSizeOf for std::marker::PhantomData { - fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { - 0 - } -} - -// XXX: we don't want MallocSizeOf to be defined for Rc and Arc. If negative -// trait bounds are ever allowed, this code should be uncommented. -// (We do have a compile-fail test for this: -// rc_arc_must_not_derive_malloc_size_of.rs) -//impl !MallocSizeOf for Arc { } -//impl !MallocShallowSizeOf for Arc { } - -#[cfg(feature = "std")] -fn arc_ptr(s: &Arc) -> * const T { - &(**s) as *const T -} - -#[cfg(feature = "std")] -impl MallocUnconditionalSizeOf for Arc { - fn unconditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.unconditional_shallow_size_of(ops) + (**self).size_of(ops) - } -} - -#[cfg(feature = "std")] -impl MallocConditionalShallowSizeOf for Arc { - fn conditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - if ops.have_seen_ptr(arc_ptr(self)) { - 0 - } else { - self.unconditional_shallow_size_of(ops) - } - } -} - -#[cfg(feature = "std")] -impl MallocConditionalSizeOf for Arc { - fn conditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - if ops.have_seen_ptr(arc_ptr(self)) { - 0 - } else { - self.unconditional_size_of(ops) - } - } -} - -/// If a mutex is stored directly as a member of a data type that is being measured, -/// it is the unique owner of its contents and deserves to be measured. -/// -/// If a mutex is stored inside of an Arc value as a member of a data type that is being measured, -/// the Arc will not be automatically measured so there is no risk of overcounting the mutex's -/// contents. -#[cfg(feature = "std")] -impl MallocSizeOf for std::sync::Mutex { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - (*self.lock().unwrap()).size_of(ops) - } -} - -#[macro_export] -macro_rules! malloc_size_of_is_0( - ($($ty:ty),+) => ( - $( - impl $crate::MallocSizeOf for $ty { - #[inline(always)] - fn size_of(&self, _: &mut $crate::MallocSizeOfOps) -> usize { - 0 - } - } - )+ - ); - ($($ty:ident<$($gen:ident),+>),+) => ( - $( - impl<$($gen: $crate::MallocSizeOf),+> $crate::MallocSizeOf for $ty<$($gen),+> { - #[inline(always)] - fn size_of(&self, _: &mut $crate::MallocSizeOfOps) -> usize { - 0 - } - } - )+ - ); -); - -malloc_size_of_is_0!(bool, char, str); -malloc_size_of_is_0!(u8, u16, u32, u64, u128, usize); -malloc_size_of_is_0!(i8, i16, i32, i64, i128, isize); -malloc_size_of_is_0!(f32, f64); - -malloc_size_of_is_0!(std::sync::atomic::AtomicBool); -malloc_size_of_is_0!(std::sync::atomic::AtomicIsize); -malloc_size_of_is_0!(std::sync::atomic::AtomicUsize); - -malloc_size_of_is_0!(Range, Range, Range, Range, Range); -malloc_size_of_is_0!(Range, Range, Range, Range, Range); -malloc_size_of_is_0!(Range, Range); - -/// Measurable that defers to inner value and used to verify MallocSizeOf implementation in a -/// struct. -#[derive(Clone)] -pub struct Measurable(pub T); - -impl Deref for Measurable { - type Target = T; - - fn deref(&self) -> &T { - &self.0 - } -} - -impl DerefMut for Measurable { - fn deref_mut(&mut self) -> &mut T { - &mut self.0 - } -} diff --git a/parity-util-mem/src/sizeof.rs b/parity-util-mem/src/sizeof.rs deleted file mode 100644 index fb917b1b6..000000000 --- a/parity-util-mem/src/sizeof.rs +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Estimation for heapsize calculation. Usable to replace call to allocator method (for some -//! allocators or simply because we just need a deterministic cunsumption measurement). - - -use crate::malloc_size::{ - MallocSizeOf, - MallocShallowSizeOf, - MallocUnconditionalShallowSizeOf, - MallocSizeOfOps -}; -#[cfg(not(feature = "std"))] -use alloc::boxed::Box; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; -#[cfg(not(feature = "std"))] -use alloc::string::String; -#[cfg(not(feature = "std"))] -use core::mem::{size_of, size_of_val}; -#[cfg(not(feature = "std"))] -use alloc::sync::Arc; - -#[cfg(feature = "std")] -use std::mem::{size_of, size_of_val}; -#[cfg(feature = "std")] -use std::sync::Arc; - -impl MallocShallowSizeOf for Box { - fn shallow_size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { - size_of_val(&**self) - } -} - -impl MallocSizeOf for String { - fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { - self.capacity() * size_of::() - } -} - -impl MallocShallowSizeOf for Vec { - fn shallow_size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { - self.capacity() * size_of::() - } -} - -impl MallocUnconditionalShallowSizeOf for Arc { - fn unconditional_shallow_size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { - size_of::() - } -} diff --git a/plain_hasher/Cargo.toml b/plain_hasher/Cargo.toml deleted file mode 100644 index 2228b4a91..000000000 --- a/plain_hasher/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "plain_hasher" -description = "Hasher for 32-byte keys." -version = "0.2.1" -authors = ["Parity Technologies "] -license = "MIT" -keywords = ["hash", "hasher"] -homepage = "https://github.com/paritytech/parity-common" -categories = ["no-std"] -edition = "2018" - -[dependencies] -crunchy = { version = "0.2", default-features = false } - -[dev-dependencies] -criterion = "0.3" - -[features] -default = ["std"] -std = ["crunchy/std"] - -[[bench]] -name = "bench" -harness = false diff --git a/plain_hasher/README.md b/plain_hasher/README.md deleted file mode 100644 index ec5082999..000000000 --- a/plain_hasher/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Specialized Hasher for 32-byte keys - -Provides `PlainHasher`, a specialized `core::hash::Hasher` that takes just 8 bytes of the provided value and may only be used for keys which are 32 bytes. - -The crate is `no_std`-compatible. diff --git a/plain_hasher/benches/bench.rs b/plain_hasher/benches/bench.rs deleted file mode 100644 index d5701ef87..000000000 --- a/plain_hasher/benches/bench.rs +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use std::collections::hash_map::DefaultHasher; -use std::hash::Hasher; - -use criterion::{criterion_group, criterion_main, Criterion}; -use plain_hasher::PlainHasher; - -fn bench_write_hasher(c: &mut Criterion) { - c.bench_function("write_plain_hasher", |b| b.iter(|| { - (0..100u8).fold(PlainHasher::default(), |mut old, new| { - let bb = [new; 32]; - old.write(&bb); - old - }); - })); - c.bench_function("write_default_hasher", |b| b.iter(|| { - (0..100u8).fold(DefaultHasher::default(), |mut old, new| { - let bb = [new; 32]; - old.write(&bb); - old - }); - })); -} - -criterion_group!(benches, bench_write_hasher); -criterion_main!(benches); diff --git a/plain_hasher/src/lib.rs b/plain_hasher/src/lib.rs deleted file mode 100644 index 3665995d4..000000000 --- a/plain_hasher/src/lib.rs +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] - -use core::hash::Hasher; - -use crunchy::unroll; - -/// Hasher that just takes 8 bytes of the provided value. -/// May only be used for keys which are 32 bytes. -#[derive(Default)] -pub struct PlainHasher { - prefix: u64, -} - -impl Hasher for PlainHasher { - #[inline] - fn finish(&self) -> u64 { - self.prefix - } - - #[inline] - #[allow(unused_assignments)] - fn write(&mut self, bytes: &[u8]) { - debug_assert!(bytes.len() == 32); - let mut bytes_ptr = bytes.as_ptr(); - let mut prefix_ptr = &mut self.prefix as *mut u64 as *mut u8; - - unroll! { - for _i in 0..8 { - unsafe { - *prefix_ptr ^= (*bytes_ptr ^ *bytes_ptr.offset(8)) ^ (*bytes_ptr.offset(16) ^ *bytes_ptr.offset(24)); - bytes_ptr = bytes_ptr.offset(1); - prefix_ptr = prefix_ptr.offset(1); - } - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn it_works() { - let mut bytes = [32u8; 32]; - bytes[0] = 15; - let mut hasher = PlainHasher::default(); - hasher.write(&bytes); - assert_eq!(hasher.prefix, 47); - } -} diff --git a/primitive-types/CHANGELOG.md b/primitive-types/CHANGELOG.md new file mode 100644 index 000000000..7bcb454af --- /dev/null +++ b/primitive-types/CHANGELOG.md @@ -0,0 +1,73 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.13.1] - 2024-09-12 +- Updated `uint` to 0.10. [#859](https://github.com/paritytech/parity-common/pull/859) + +## [0.12.2] - 2023-10-10 +- Added `schemars` support via `json-schema` feature. [#785](https://github.com/paritytech/parity-common/pull/785) + +## [0.12.1] - 2022-10-27 +- Added `H384` and `H768` types. [#684](https://github.com/paritytech/parity-common/pull/684) + +## [0.12.0] - 2022-09-20 +### Breaking +- Updated `fixed-hash` to 0.8. [#680](https://github.com/paritytech/parity-common/pull/680) +- Uses weak-dependency feature of cargo. [#664](https://github.com/paritytech/parity-common/pull/664) + +## [0.11.1] - 2022-02-07 +- Updated `scale-info` to ">=0.9, <3". [#627](https://github.com/paritytech/parity-common/pull/627) + +## [0.11.0] - 2022-02-04 +### Breaking +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Updated `impl-codec` to 0.6. [#623](https://github.com/paritytech/parity-common/pull/623) + +## [0.10.1] - 2021-07-02 +### Added +- Implemented `parity_scale_codec::MaxEncodedLen` trait for `{U128, U256, U512}` and `{H128, H160, H256, H512}` types. + +## [0.10.0] - 2021-07-02 +### Added +- Added `U128::full_mul` method. [#546](https://github.com/paritytech/parity-common/pull/546) +### Breaking +- Updated `scale-info` to 0.9. [#556](https://github.com/paritytech/parity-common/pull/556) +### Removed +- Removed `parity-scale-codec` direct dependency. [#556](https://github.com/paritytech/parity-common/pull/556) + +## [0.9.0] - 2021-01-27 +### Breaking +- Updated `impl-codec` to 0.5. [#510](https://github.com/paritytech/parity-common/pull/510) +- Updated `scale-info` to 0.5. [#510](https://github.com/paritytech/parity-common/pull/510) + +## [0.8.0] - 2021-01-05 +- Added `num-traits` feature. [#480](https://github.com/paritytech/parity-common/pull/480) +### Breaking +- Updated `impl-rlp` to `rlp` 0.5. [#463](https://github.com/paritytech/parity-common/pull/463) +- Updated `uint` to 0.9. [#486](https://github.com/paritytech/parity-common/pull/486) + +## [0.7.3] - 2020-11-12 +- Added `scale_info` support. [#312](https://github.com/paritytech/parity-common/pull/312) +- Added `H128` type. [#434](https://github.com/paritytech/parity-common/pull/434) +- Added `fp-conversion` feature: `U256` <-> `f64`. [#436](https://github.com/paritytech/parity-common/pull/436) + +## [0.7.2] - 2020-05-05 +- Added `serde_no_std` feature. [#385](https://github.com/paritytech/parity-common/pull/385) + +## [0.7.1] - 2020-04-27 +- Added `arbitrary` feature. [#378](https://github.com/paritytech/parity-common/pull/378) + +## [0.7.0] - 2020-03-16 +- Removed `libc` feature. [#317](https://github.com/paritytech/parity-common/pull/317) + +## [0.6.2] - 2019-01-03 +- Expose to_hex and from_hex from impl-serde. [#302](https://github.com/paritytech/parity-common/pull/302) + +## [0.6.1] - 2019-10-24 +### Dependencies +- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 178826eef..20292d014 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -1,24 +1,52 @@ [package] name = "primitive-types" -version = "0.5.1" +version = "0.13.1" authors = ["Parity Technologies "] -license = "Apache-2.0/MIT" +license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" +repository = "https://github.com/paritytech/parity-common" description = "Primitive types shared by Ethereum and Substrate" +edition = "2021" +rust-version = "1.60.0" [dependencies] -fixed-hash = { version = "0.4", path = "../fixed-hash", default-features = false } -uint = { version = "0.8", path = "../uint", default-features = false } -impl-serde = { version = "0.2.1", path = "impls/serde", default-features = false, optional = true } -impl-codec = { version = "0.4.1", path = "impls/codec", default-features = false, optional = true } -impl-rlp = { version = "0.2", path = "impls/rlp", default-features = false, optional = true } +fixed-hash = { version = "0.8", path = "../fixed-hash", default-features = false } +uint = { version = "0.10.0", path = "../uint", default-features = false } +impl-serde = { version = "0.5.0", path = "impls/serde", default-features = false, optional = true } +impl-codec = { version = "0.7.0", path = "impls/codec", default-features = false, optional = true } +impl-num-traits = { version = "0.2.0", path = "impls/num-traits", default-features = false, optional = true } +impl-rlp = { version = "0.4", path = "impls/rlp", default-features = false, optional = true } +scale-info-crate = { package = "scale-info", version = ">=0.9, <3", features = ["derive"], default-features = false, optional = true } +schemars = { version = ">=0.8.12", default-features = true, optional = true } + +[dev-dependencies] +num-traits = "0.2" +serde_json = { version = "1.0", default-features = false } +jsonschema = { version = "0.23", default-features = false } [features] -default = ["std"] -std = ["uint/std", "fixed-hash/std", "impl-codec/std"] -byteorder = ["fixed-hash/byteorder"] -libc = ["fixed-hash/libc"] +default = ["std", "rand"] +std = ["uint/std", "fixed-hash/std", "impl-codec?/std"] +rand = ["fixed-hash/rand"] rustc-hex = ["fixed-hash/rustc-hex"] -serde = ["std", "impl-serde"] +serde = ["std", "impl-serde", "impl-serde/std"] +json-schema = ["dep:schemars"] +serde_no_std = ["impl-serde"] codec = ["impl-codec"] +scale-info = ["codec", "scale-info-crate"] rlp = ["impl-rlp"] +arbitrary = ["fixed-hash/arbitrary", "uint/arbitrary"] +fp-conversion = ["std"] +num-traits = ["impl-num-traits"] + +[[test]] +name = "scale_info" +required-features = ["scale-info"] + +[[test]] +name = "fp_conversion" +required-features = ["fp-conversion"] + +[[test]] +name = "num_traits" +required-features = ["num-traits"] diff --git a/primitive-types/impls/codec/CHANGELOG.md b/primitive-types/impls/codec/CHANGELOG.md new file mode 100644 index 000000000..aac607ab4 --- /dev/null +++ b/primitive-types/impls/codec/CHANGELOG.md @@ -0,0 +1,24 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.7.0] - 2024-09-12 +### Breaking +- Updated to `uint` 0.10. [#860](https://github.com/paritytech/parity-common/pull/860) + +## [0.6.0] - 2022-02-04 +### Breaking +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Updated `parity-scale-codec` to 3.0. [#622](https://github.com/paritytech/parity-common/pull/622) + +## [0.5.1] - 2021-07-02 +### Dependencies +- Updated `parity-scale-codec` to 2.2. [#552](https://github.com/paritytech/parity-common/pull/552) + +## [0.5.0] - 2021-01-27 +### Breaking +- Updated `parity-scale-codec` to 2.0. [#510](https://github.com/paritytech/parity-common/pull/510) diff --git a/primitive-types/impls/codec/Cargo.toml b/primitive-types/impls/codec/Cargo.toml index cc35994f1..d0bd6b602 100644 --- a/primitive-types/impls/codec/Cargo.toml +++ b/primitive-types/impls/codec/Cargo.toml @@ -1,13 +1,15 @@ [package] name = "impl-codec" -version = "0.4.1" +version = "0.7.0" authors = ["Parity Technologies "] -license = "Apache-2.0/MIT" +license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" description = "Parity Codec serialization support for uint and fixed hash." +edition = "2021" +rust-version = "1.56.1" [dependencies] -parity-scale-codec = { version = "1.0.3", default-features = false } +parity-scale-codec = { version = "3.3.0", default-features = false, features = ["max-encoded-len"] } [features] default = ["std"] diff --git a/primitive-types/impls/codec/src/lib.rs b/primitive-types/impls/codec/src/lib.rs index 961081793..905d17d12 100644 --- a/primitive-types/impls/codec/src/lib.rs +++ b/primitive-types/impls/codec/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2018 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license @@ -11,7 +11,7 @@ #![cfg_attr(not(feature = "std"), no_std)] #[doc(hidden)] -pub extern crate parity_scale_codec as codec; +pub use parity_scale_codec as codec; /// Add Parity Codec serialization support to an integer created by `construct_uint!`. #[macro_export] @@ -19,8 +19,7 @@ macro_rules! impl_uint_codec { ($name: ident, $len: expr) => { impl $crate::codec::Encode for $name { fn using_encoded R>(&self, f: F) -> R { - let mut bytes = [0u8; $len * 8]; - self.to_little_endian(&mut bytes); + let bytes = self.to_little_endian(); bytes.using_encoded(f) } } @@ -28,14 +27,17 @@ macro_rules! impl_uint_codec { impl $crate::codec::EncodeLike for $name {} impl $crate::codec::Decode for $name { - fn decode(input: &mut I) - -> core::result::Result - { - <[u8; $len * 8] as $crate::codec::Decode>::decode(input) - .map(|b| $name::from_little_endian(&b)) + fn decode(input: &mut I) -> core::result::Result { + <[u8; $len * 8] as $crate::codec::Decode>::decode(input).map(|b| $name::from_little_endian(&b)) } } - } + + impl $crate::codec::MaxEncodedLen for $name { + fn max_encoded_len() -> usize { + ::core::mem::size_of::<$name>() + } + } + }; } /// Add Parity Codec serialization support to a fixed-sized hash type created by `construct_fixed_hash!`. @@ -51,11 +53,15 @@ macro_rules! impl_fixed_hash_codec { impl $crate::codec::EncodeLike for $name {} impl $crate::codec::Decode for $name { - fn decode(input: &mut I) - -> core::result::Result - { + fn decode(input: &mut I) -> core::result::Result { <[u8; $len] as $crate::codec::Decode>::decode(input).map($name) } } - } + + impl $crate::codec::MaxEncodedLen for $name { + fn max_encoded_len() -> usize { + ::core::mem::size_of::<$name>() + } + } + }; } diff --git a/primitive-types/impls/num-traits/CHANGELOG.md b/primitive-types/impls/num-traits/CHANGELOG.md new file mode 100644 index 000000000..1f811b9cc --- /dev/null +++ b/primitive-types/impls/num-traits/CHANGELOG.md @@ -0,0 +1,17 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.2.0] - 2024-09-11 +- Updated `uint` to 0.10. [#859](https://github.com/paritytech/parity-common/pull/859) + +## [0.1.2] - 2023-02-01 +- Added `checked_*` trait impls. [#716](https://github.com/paritytech/parity-common/pull/716) +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) + +## [0.1.1] - 2021-06-30 +- Added `integer-sqrt` trait support. [#554](https://github.com/paritytech/parity-common/pull/554) diff --git a/primitive-types/impls/num-traits/Cargo.toml b/primitive-types/impls/num-traits/Cargo.toml new file mode 100644 index 000000000..64df11f7e --- /dev/null +++ b/primitive-types/impls/num-traits/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "impl-num-traits" +version = "0.2.0" +authors = ["Parity Technologies "] +license = "MIT OR Apache-2.0" +homepage = "https://github.com/paritytech/parity-common" +description = "num-traits implementation for uint." +edition = "2021" +rust-version = "1.56.1" + +[dependencies] +num-traits = { version = "0.2", default-features = false } +integer-sqrt = "0.1" +uint = { version = "0.10.0", path = "../../../uint", default-features = false } diff --git a/primitive-types/impls/num-traits/src/lib.rs b/primitive-types/impls/num-traits/src/lib.rs new file mode 100644 index 000000000..5c0973eaa --- /dev/null +++ b/primitive-types/impls/num-traits/src/lib.rs @@ -0,0 +1,89 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! num-traits support for uint. + +#![no_std] + +#[doc(hidden)] +pub use num_traits; + +#[doc(hidden)] +pub use integer_sqrt; + +#[doc(hidden)] +pub use uint; + +/// Add num-traits support to an integer created by `construct_uint!`. +#[macro_export] +macro_rules! impl_uint_num_traits { + ($name: ident, $len: expr) => { + impl $crate::num_traits::sign::Unsigned for $name {} + + impl $crate::num_traits::identities::Zero for $name { + #[inline] + fn zero() -> Self { + Self::zero() + } + + #[inline] + fn is_zero(&self) -> bool { + self.is_zero() + } + } + + impl $crate::num_traits::identities::One for $name { + #[inline] + fn one() -> Self { + Self::one() + } + } + + impl $crate::num_traits::Num for $name { + type FromStrRadixErr = $crate::uint::FromStrRadixErr; + + fn from_str_radix(txt: &str, radix: u32) -> Result { + Self::from_str_radix(txt, radix) + } + } + + impl $crate::integer_sqrt::IntegerSquareRoot for $name { + fn integer_sqrt_checked(&self) -> Option { + Some(self.integer_sqrt()) + } + } + + impl $crate::num_traits::ops::checked::CheckedAdd for $name { + #[inline] + fn checked_add(&self, v: &Self) -> Option { + $name::checked_add(*self, *v) + } + } + + impl $crate::num_traits::ops::checked::CheckedSub for $name { + #[inline] + fn checked_sub(&self, v: &Self) -> Option { + $name::checked_sub(*self, *v) + } + } + + impl $crate::num_traits::ops::checked::CheckedDiv for $name { + #[inline] + fn checked_div(&self, v: &Self) -> Option { + $name::checked_div(*self, *v) + } + } + + impl $crate::num_traits::ops::checked::CheckedMul for $name { + #[inline] + fn checked_mul(&self, v: &Self) -> Option { + $name::checked_mul(*self, *v) + } + } + }; +} diff --git a/primitive-types/impls/rlp/CHANGELOG.md b/primitive-types/impls/rlp/CHANGELOG.md new file mode 100644 index 000000000..fb5aaed4c --- /dev/null +++ b/primitive-types/impls/rlp/CHANGELOG.md @@ -0,0 +1,13 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [0.4.0] - 2024-09-11 +- Updated `rlp` to 0.6. [#859](https://github.com/paritytech/parity-common/pull/859) +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) + +## [0.3.0] - 2021-01-05 +### Breaking +- Updated `rlp` to 0.5. [#463](https://github.com/paritytech/parity-common/pull/463) diff --git a/primitive-types/impls/rlp/Cargo.toml b/primitive-types/impls/rlp/Cargo.toml index f2de4e8ba..839a5842e 100644 --- a/primitive-types/impls/rlp/Cargo.toml +++ b/primitive-types/impls/rlp/Cargo.toml @@ -1,13 +1,15 @@ [package] name = "impl-rlp" -version = "0.2.0" +version = "0.4.0" authors = ["Parity Technologies "] -license = "Apache-2.0/MIT" +license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" description = "RLP serialization support for uint and fixed hash." +edition = "2021" +rust-version = "1.56.1" [dependencies] -rlp = { version = "0.4", path = "../../../rlp", default-features = false } +rlp = { version = "0.6", path = "../../../rlp", default-features = false } [features] default = ["std"] diff --git a/primitive-types/impls/rlp/src/lib.rs b/primitive-types/impls/rlp/src/lib.rs index dffbfe96b..9b17ea4f7 100644 --- a/primitive-types/impls/rlp/src/lib.rs +++ b/primitive-types/impls/rlp/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2018 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license @@ -11,10 +11,10 @@ #![cfg_attr(not(feature = "std"), no_std)] #[doc(hidden)] -pub extern crate rlp; +pub use rlp; #[doc(hidden)] -pub extern crate core as core_; +pub use core as core_; /// Add RLP serialization support to an integer created by `construct_uint!`. #[macro_export] @@ -23,8 +23,7 @@ macro_rules! impl_uint_rlp { impl $crate::rlp::Encodable for $name { fn rlp_append(&self, s: &mut $crate::rlp::RlpStream) { let leading_empty_bytes = $size * 8 - (self.bits() + 7) / 8; - let mut buffer = [0u8; $size * 8]; - self.to_big_endian(&mut buffer); + let buffer = self.to_big_endian(); s.encoder().encode_value(&buffer[leading_empty_bytes..]); } } @@ -35,14 +34,14 @@ macro_rules! impl_uint_rlp { if !bytes.is_empty() && bytes[0] == 0 { Err($crate::rlp::DecoderError::RlpInvalidIndirection) } else if bytes.len() <= $size * 8 { - Ok($name::from(bytes)) + Ok($name::from_big_endian(bytes)) } else { Err($crate::rlp::DecoderError::RlpIsTooBig) } }) } } - } + }; } /// Add RLP serialization support to a fixed-sized hash type created by `construct_fixed_hash!`. @@ -64,9 +63,9 @@ macro_rules! impl_fixed_hash_rlp { let mut t = [0u8; $size]; t.copy_from_slice(bytes); Ok($name(t)) - } + }, }) } } - } + }; } diff --git a/primitive-types/impls/serde/CHANGELOG.md b/primitive-types/impls/serde/CHANGELOG.md new file mode 100644 index 000000000..700067d1a --- /dev/null +++ b/primitive-types/impls/serde/CHANGELOG.md @@ -0,0 +1,25 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.5.0] - 2024-09-11 +- Updated `uint` to 0.10. [#859](https://github.com/paritytech/parity-common/pull/859) + +## [0.4.0] - 2022-09-02 +- Support deserializing H256 et al from bytes or sequences of bytes, too. [#668](https://github.com/paritytech/parity-common/pull/668) +- Support deserializing H256 et al from newtype structs containing anything compatible, too. [#672](https://github.com/paritytech/parity-common/pull/672) +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) + +## [0.3.2] - 2021-11-10 +- Supported decoding of hex strings without `0x` prefix. [#598](https://github.com/paritytech/parity-common/pull/598) + +## [0.3.1] - 2020-05-05 +- Added `no_std` support. [#385](https://github.com/paritytech/parity-common/pull/385) + +## [0.2.3] - 2019-10-29 +### Fixed +- Fixed a bug in empty slice serialization. [#253](https://github.com/paritytech/parity-common/pull/253) diff --git a/primitive-types/impls/serde/Cargo.toml b/primitive-types/impls/serde/Cargo.toml index 77a70be6c..929a60126 100644 --- a/primitive-types/impls/serde/Cargo.toml +++ b/primitive-types/impls/serde/Cargo.toml @@ -1,18 +1,25 @@ [package] name = "impl-serde" -version = "0.2.1" +version = "0.5.0" authors = ["Parity Technologies "] -license = "Apache-2.0/MIT" +license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" description = "Serde serialization support for uint and fixed hash." +edition = "2021" +rust-version = "1.56.1" + +[features] +default = ["std"] +std = ["serde/std"] [dependencies] -serde = "1.0" +serde = { version = "1.0.101", default-features = false, features = ["alloc"] } [dev-dependencies] -criterion = "0.3.0" -uint = "0.8.1" -serde_json = "1.0.40" +criterion = "0.5.1" +serde_derive = "1.0.101" +serde_json = "1.0.41" +uint = { version = "0.10.0", path = "../../../uint" } [[bench]] name = "impl_serde" diff --git a/primitive-types/impls/serde/benches/impl_serde.rs b/primitive-types/impls/serde/benches/impl_serde.rs index 6aeb1eddf..fee13c550 100644 --- a/primitive-types/impls/serde/benches/impl_serde.rs +++ b/primitive-types/impls/serde/benches/impl_serde.rs @@ -1,4 +1,4 @@ -// Copyright 2019 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license @@ -12,13 +12,12 @@ //! cargo bench //! ``` -#[macro_use] -extern crate criterion; -#[macro_use] -extern crate uint; -#[macro_use] -extern crate impl_serde; -extern crate serde_json; +use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; +use impl_serde::impl_uint_serde; +use serde_derive::{Deserialize, Serialize}; +use uint::*; + +mod input; construct_uint! { pub struct U256(4); @@ -26,32 +25,77 @@ construct_uint! { impl_uint_serde!(U256, 4); -use criterion::{black_box, Criterion, ParameterizedBenchmark}; +#[derive(Debug, Deserialize, Serialize)] +struct Bytes(#[serde(with = "impl_serde::serialize")] Vec); -criterion_group!( - impl_serde, - u256_to_hex, -); +criterion_group!(impl_serde, u256_to_hex, hex_to_u256, bytes_to_hex, hex_to_bytes,); criterion_main!(impl_serde); fn u256_to_hex(c: &mut Criterion) { - c.bench( - "u256_to_hex", - ParameterizedBenchmark::new( - "", - |b, x| { - b.iter(|| { - black_box(serde_json::to_string(&x)) - }) - }, - vec![ - U256::from(0), - U256::from(100), - U256::from(u32::max_value()), - U256::from(u64::max_value()), - U256::from(u128::max_value()), - U256([1, 2, 3, 4]), - ], - ), - ); + let mut group = c.benchmark_group("u256_to_hex"); + for input in [ + U256::from(0), + U256::from(100), + U256::from(u32::max_value()), + U256::from(u64::max_value()), + U256::from(u128::max_value()), + U256([1, 2, 3, 4]), + ] { + group.bench_with_input(BenchmarkId::from_parameter(input), &input, |b, x| { + b.iter(|| black_box(serde_json::to_string(&x))) + }); + } + group.finish(); +} + +fn hex_to_u256(c: &mut Criterion) { + let mut group = c.benchmark_group("hex_to_u256"); + for input in [ + "\"0x0\"", + "\"0x1\"", + "\"0x10\"", + "\"0x100\"", + "\"0x1000000000000000000000000000000000000000000000000000000000000100\"", + ] { + group.bench_with_input(BenchmarkId::from_parameter(input), &input, |b, x| { + b.iter(|| black_box(serde_json::from_str::(&x))) + }); + } + group.finish(); +} + +fn bytes_to_hex(c: &mut Criterion) { + let mut group = c.benchmark_group("bytes_to_hex"); + let params = [ + input::HEX_64_CHARS, + input::HEX_256_CHARS, + input::HEX_1024_CHARS, + input::HEX_4096_CHARS, + input::HEX_16384_CHARS, + input::HEX_65536_CHARS, + ]; + for param in params { + let input = serde_json::from_str::(¶m).unwrap(); + group.bench_with_input(BenchmarkId::from_parameter(param.len()), &input, |b, x| { + b.iter(|| black_box(serde_json::to_string(&x))) + }); + } + group.finish(); +} + +fn hex_to_bytes(c: &mut Criterion) { + let mut group = c.benchmark_group("hex_to_bytes"); + for input in [ + input::HEX_64_CHARS, + input::HEX_256_CHARS, + input::HEX_1024_CHARS, + input::HEX_4096_CHARS, + input::HEX_16384_CHARS, + input::HEX_65536_CHARS, + ] { + group.bench_with_input(BenchmarkId::from_parameter(input.len()), &input, |b, x| { + b.iter(|| black_box(serde_json::from_str::(&x))) + }); + } + group.finish(); } diff --git a/primitive-types/impls/serde/benches/input.rs b/primitive-types/impls/serde/benches/input.rs new file mode 100644 index 000000000..5673f1f52 --- /dev/null +++ b/primitive-types/impls/serde/benches/input.rs @@ -0,0 +1,25 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/// Hexdecimal string 64 chars (32 bytes) +pub const HEX_64_CHARS: &str = "\"0x6402541b4e3c2ab65306aec48fce5adedc60e3ac465c3d7036c731e0b2e49209\""; + +/// Hexdecimal string 256 chars (128 bytes) +pub const HEX_256_CHARS: &str = "\"0x2568f30caf43f5f11ec121695200e166f89b17149743da42fdff7dba5504c527b34c898ef20a61a45109b7a6e6a516994567932b31478cfa3a3bd4b058e36e88a51400403be492afb01039910c45376951d2bbec9a838666404b14850c6a8efe07b30b7cedc3e84f59b678b5d812feb4adc7e2c39d681366563ef45669a33a7604415e46a3df671968e38df6115c80eedb96dd326a273404a4b9b9957055d22e7091d3e663faa6c54a48888f29778db6f7c5199a2ccd2237c265d30ea67aed475cf459ca8039831971c2e04eb6b89951d4ba472ef8b196e60e2875e3ec2955a07956936d56162d6758eb0fe09d2b2b12e27217f26aa38f90f5def14ab7c215d3\""; + +/// Hexadecimal string 1024 chars (512 bytes) +pub const HEX_1024_CHARS: &str = "\"0x102450f41fa547cdd5cba0f648912b2890fd9081889150edf41c97e3d23ab44334bba15ec46b1052ab86885328c3935bdd2229b0e403fee8ca4a70d6f47363d7e26c9253a97d625b28aa40a67133c6e3c9e6c0ea70bda6065755262a4f40b10d01a234adf14e997278afea3c869ec300066ddd474212d3fe5bfe82696aa13fd69ebc7a59877f23eee7245e96ad48feb897fd2148ab536a4e1220218499ba7b87dc9c6c48ee5186acc6699dd335ede2dd90b5d5c8292aa8b04e11656a053f0bb3d781283b6b1a2c863cb0ac60e24d0e194dee2dfe8cffcf96ae5e06d96026d36b1f3540e17db1d18e1e20d4f00d6ed5525bb826cd9a9c54b6c906b8cadddee844887287188a96c7eac7b86348814cc16c1348872364a5910f4739f914133d30fc6bfc99b398c526926c214f4c44792061f3f14e7daf2a0133f1eb070b9e5b8153c4d679bec659a76c81fd15326688826ea49c4b587d81b4fc483783683d0ca36331d8aaa26e8f162ce787cc191514309f6c3022d978f8d61a4db338baab043d697e2bc1c91aad78cb8c6a285eb1a2e000546b43b485985dc142d68f4ff7ddf45022ef2d059aa3871284de11ebce33ac61bc7419611b0d3c2836def6b980e13963606b06f34b2c4fa91b3a867ef9e0abd98217e56869fe1f9ebb720ebf34752d9ce049e800162c9c48eef2530653f9e049db9cdfb691124ea8dd6d296785b44bf8\""; + +/// Hexadecimal string 4096 chars (2048 bytes) +pub const HEX_4096_CHARS: &str = "\"0x40966ce6355f1dbbfef6ea6f1e131dbc6425bccdf60440fa53df8cbc55554e38a5e0f6899004b4ee7e5582349285b284849cc2a5f9a10b977eabff784d6be9fbb3361c3a0f188656a7dfa020e07e653fbd0ea76816422d76f6160d38f477ee5b16ed3a4cb7b6e28bc62c15f944ddce23439493659501557a4dbb8404bb0db9080c879c8d109127174e9b667a6f84e3f00ac7915a7ab971ad2aacb69074d5b804520b10842f253de4843ca0d594b9bc4e1888c4a291ab79b0e08d0100996d988f14e15bd63c97081e36ac47f546ab1c51213c8ca1de5a9ec261f7a4392c4e0e167a82142e1bf474e752889ee17a910ce0936d05e99cc8dfed0f6ebc63d723f29e5103fc74fa5168023459f9b4d73c98b2c4c054f2b5eafbd8460b922b7dcac8d30fcd15b585606191a5c69d8c0a4e47f693d1f3e5f278962715c5106bbc6b6b05313f83950997d002f01a81b6e3bc999049b846e1a19cbf7c4207a45548356c007db4030b13e4f4d81b24496ded0f54aeacc303b2f470474934057bfa0e27b95fa99bc946d38afa6b1ef6a3675981b2d81c3e58e12092d0cd9f1dad844cb6145f5e55d0492b21bf32b282831536232ee0906abd2dd0ef33d3206a3f70f97af1fbddf1868e09d82b1c9825cfedcebb05f5e5c37688f735732284b8608c9db57cc3a89914044d1dfdab36de3e876673611468650423127f28ec92a78d8e9de7065357be7bd5e052d009a445682f99a4523a77997af93c3080d56737d2c258637e6d245c28515e3a15172afa89607ef1cecbaf9241ac7b29f81010d20333dde8c8b2e0df7421336ad623a154fc2ed58c3505c8db0d1ed00a416a75136a98c901aee8ec97f6d7b5949df429e411f98f165455f000234441825a5456f60582a3f0dab0f831ac3f68934cf5b35b3255761c53f6fe9b1044d164a31a344b966d1f62403c6fb615b8c68fd5af2afa771ab3fe9aed6ac8a6818bd8bcf4d11512bf36f6f01d01c78d137496f1f42265430678cb329a4adbf21ca8d6a646f7543c2ab826f4251e196f21f4c6cafc1b910a8a83053b00b092a2effdb45935218939248a54a0d69f3d269b6e47039a5e56edaf0985b37687cb6301c57be754ed2e08dfb6da9275cb4dfcbc9f164f90762c41e1d618db827021db646041ea9de903bb9524c25d89c519e631ec1048694fa2faa77fad75af23c8ea7e5445c064aa157ec2c286b4ee50463b6a5b57b34fa52720293c18f059787a5a386599a58d7e5fee4aa301a6997842bcbfbf7e8241509c565b1ddfc20a08edbe6f80417b879a10b0ee75f34c4469187166c39834a98d40b40d090a4978dc8acd94eb3c91a8b9391850dd1520935242d358a5320e472dc788c779026258f014daa224ad2c0f49ba2f72a95ad4712305c9d9383106b72ee31ddc62d9f41a2dea577fa1aabbd003b620b6959df8b939cf22357cb3d4785b2821d0d4d756228b32cbec7d2fa7d09c3f8910d97a90f4e919c3dc1fe8be1157a574dad0cba47edf41c13fb96cbd0dbb79dade0d25b1dd08e2bc2467bc60abeb215bbdf4f590311d6549168ee5f1a66ea32ec81ecf76a5d16eb7fcd66d706fba91f2bc1745e7609b291ff4960de1f7589baa9cc841e58370a94a898c59fed78c19d7e4277e2039a0206c36cf57e467b4ed699cd04de8ee1ae5f537b6f9dde0ea230f9f23aa7969ea9dea7d5d1252ec048084814d1f3c34d058b559cb4dca6f5cbd4da065b161a7f006378409a14b8c9f7a4b8b52bd6cd78dd8c9d429a3fea8989e7ea1e3f09a542b5d5fff5b215ca092162203a05bdbf86fd600f5f93ae0aa60ba9515f38c82b8be4dabdc768131f4b1543c9575e8ade19687446298f1948702ae45bb460fdbf6c007d080396a990fe77054418495e7b8dfa184bab4e7fbd370f5235bf8942411300e9c885a3357d8db0b6f7f4d7b4092918d6e17577946e46879a31d10d5503fbc391d6c96aa4f2f8a5f676afd8f11dee8190fef1a67fc144821175eb5231e4a34051af080d48af8950e17e3de652359158d79b4be46f30bf8959cca21dd093bc1aee5bb95a6bd3430ffb5cf13d7ffe0eaa0e1dd644604f01612f4947ff04e6a2361a7c28ef267ba521d4d4dcaea282dd477191225811d5572cbd1d2379455ffa7f47009b4ce867f8592fcc3cdc8ae71c391e5c5c66f5c0af7a8cbfb55428129db96ab0307ca9efd4ea3fac8f0bb37da6c53def450ef646f930632f3d2e8c05d787171ae41310529a94a3df34051a0b5bbfd373bab4a3ad222a283a877ef116cc4b86427f06fb9ffd31839274280b6a7beacbde9d29db7ea9b95eefb633dd3af49c6eb48c86b76e75ba688112eb313396d5523418c6503eb3613e8ad3904a493c6f40d5fcefb700b123a494ca5d5642ba178aa834995b46326a1bbc6648eadfe0c486b900e173aad2391e6d94ebda7c3d21c14042115c050eb5bcf156e181d086c0acfed4881b781a2baa6dd9073d4554fb3e123d2fa260de74df8917d86f00ddc4991efabc78ce583b5f5f569069409392dc457bb9cc998a0e8e057cb11aadcd14fd9ab65aff62af1641b6ba50a93ca1f6168f20b0f3b77f0674d5ee5b9b9d1e265a424f20721b627b053a38b4a435be65bffb7f2d01200aa41bae45df0039c3f767b165cbd78b16f537e35ec0d49c2e1714d04bf92421a5a29ba996a0f5ee76b9f22f065a07077ed549aa6be7267a0abd683e95092e706d182a3557393eaa745fb216504c23c6e1804355c29caedef6648726e55c192c227b70b7dbf067aed51ce685301205d9701c3fc9c8478ae9391934ead83e91f897983f7dd0a73cc15e4b0c6a6a321b6380e157b61a888d3cd35eda3829a563569082f24ed1269d323cbe9dd907c494b85e3c4d5b708097f4f\""; + +/// Hexadecimal string 16384 chars (8192 bytes) +pub const HEX_16384_CHARS: &str = "\"0x163845494c2b0c18961062bab4aaca8139fc0d25421880bb44321d6632cf7f4c4504f62524f7a86ae4475cf99b5b4cda8737a27f14ddae73a8c7815a60603e6f732dad7b42a94ef4cc9674b7c73c93b6baace28b61879755ad8261917798e9b158219efacd1c25689e1d1c085cf314808e5c3c1e92ea386a1e0c8a5fb1ae88e2156052ecde6aad91a78cba4cda9632fa7c41d64845d48a51a1ec922567e530184cd9fab6d8c1b19612b350ebacd2daf44dc32a969496f0e0cfea47362fbc387bc8cfa7f8a9efd797bed8d1a317ca217226aaecb0f58cc29cc4c2f4d3c91f6367b2db2c4ffc44243486bd6e5ceae007bfd6d1bc44f784af00df58f88b1a999911c256038e04a56360655b2ba92841fac013ef8cf902720c23991292b6e540f95a7ac9b7d0dfd92dd7ab7e757a80cb170acf0466812f141bad1b8047b10ed98f51fe1a28851b8019a73721c0b29815b6642587a77d2711fb93238a6005ca3613a049546db4b6626ffcca2c8352b02a4bcd514e76bd2c9e4f0a6ac557deca69ccfa41caf16d276e06b272d6bf2c680828dead23c3b6d3111201f2ca98afd36ae03da4b8a620e48cb8ae05ecf8d37d21840052ff426543a141b7f25e27a7cf94240945b0fa69e9e4e566e2255f677789cb26fc129b7021221af3588711cf9402a750d89e4f9288eacadc2bb64a6782b9c3cefd3faa57c3796ab150a9126d9959853bf93039dbcf8a0965dec9e295d5fa7e7f8d63f115e14d9e9eb531eacbcdd392cfa8c6eb499f55a6660b1bd9c2d40ebcf31581a720b0b2576ef202f739ac809a7ba7c7fe87817b6e76f02f8613111621d615aa57c9fb914cc8587a6e7f1e91a82381174e8ab011d50e86b826cd945c41cfe3608b5785c6c496b2f6e4ec9f25059bda78a3af9899426076b07039e5072b464b94ec5b5544ab15151ee583a3645819caf2df574d81079cd7ada1baf296db81e5c046f4385aad4554903cd107c0f78759c5dfd327a022a3a986de84a96e03858c3cbd476e4fe25645c437dd4986c31cb7fe6fe82d80f83887de253a6601160ccf4ef82d5e556c5fccb983bd8da0556983520b866768505e73a5eb9ffefea262dfa6218ddb143eff7f9a9b2715c526d3bcc4f1d1dfdfcea8ab68a005d0192ccdf68d2da67cbb156ada92df641a5d9f18af4b76485aa84565e9deb41c6b29ddef705c53801dcf6540dd2c2ceaa7da6bade03ce40e1e44fafe7edab9fe2a89ef77c25a4efcfc456f472a6502020a196beb2c17c25977ef7c328a19c3636ef52ee8afa78f5d31002bda28f33156a3520b4e40a6740fca6bcc003b2fddfc33e7bff9c443350d4e2e571a0f7b465142c905c1d98c5ecdf887b494f1956a1f83d5952b4618748852937b900edaae2e9b8445eec1346a7b95b5ca36bafb9c45bdb7a895f1bf4852ec217886b7d67bfdf2711f73b56397c3b5f7138fbc91434d3ec3a1d8ca584d32691b82fc0e59f8ccc0ea9aef5ab92eb2f93fec2122142ba912832bb22a1fa594fe4b14de7f3a429bb6325fa6db1d1744f878fa2ccc630c02bf4a4a6c8a8fa42e22771dc2cd252f14c2cc6f0fc3a6fdd8c7ffd62adc26efee754b0fdfff4678eaf6dc2d8e59778b9cf1b691bedc4395b0680353be17acdc18d33101a8a2e6bcfe8878151b4480a677d47a536b41e974362d5d7e9dde2e6122b22d6f056b4e1aadbc6c776ce7c2d6248c66a3cecd2587e9595548661b86dd6bf356b2d28d1cee23813580b25cc863ef8da98cb8c59b120fb6c371474cde9bf971ed5366d51419da90edfec0b56127cb7d7d51d836cae2b1f83a3563f216bdc5bc69161d91af42441b22e76e80f91a51e8635296ed4e9ae26b627f298f973066eed623374d062ad3a67be902ac50a96634437b56db882a0f24d058213187c740260eff87ef41b5e987fd18c96acde50908eec87efd3e0ae75da18113f86f04b4dbbc197e6faee36121f2f03ddc995e07eab68a099931aa288ebc1c0889ffcd793a9d780b736b85d036bee14b5874c669e425540c7d2702e0c833a5ba8ff9b17b7f44b0612dcdd1321d8dc674a56c66abed9cf920c2b5c8eed06fadd784ebd0335e89c7916148b0f28e8a54e6881cdfc2a655246179a23ee328e436bc1890c821beefc4af0ca86923404ee9ba01cf42ceda8801e9a6507e4bca2528953dd9ddd6b4564ab7d18b9d40bb2f29e5f9a38bec25aeaf89f432ff295e496e2cf76ade09b5c9f65dc5807e1c1ac104d6f8593e9957504fea1cddd5add90506c277f0a2939955306d9fd9f5b83ca7c0a7e538de323aac21fddc9f181024c60c7ac17e593704345fd1d51bf336e96afc23fb7704f0fd78c17e5ea09633a1a4e81eb09f2c7cd8366181f8437aac37dcdd23136d38e311da09f3132a6caf29b6a2428c103d6a0de54e4f74deeb4da8142aa405653c634bc5fb288a62a310949aa03f8474b79cc4d578b14fa9ef9f970e28e747cd89d0f3dd0850f1a34056e70596e1ba5e5a3525d0fda0c1e1e7a40b0e65df62532f2c43c067126c050037c192d31808ca7deaa42eaeff832c9301751a949906d7ff030fcccc510fc3790c6984976bb8580169f0de5db4b21ca214916059fba0ce6e255d6624eda87adcd5424d24e5506d0100ee0aa8d0baf89c2535358a7c244496de98f6533f8c6a4c35890217876c4254dd3b98b707694b8b42c6705bedea7b125a5cee9eb280826b6a722eaf18e4180c5f7b30211d1e983a5055714defbbd2f225b1cc2219201dc1783aa8ef36d8bf5ee33ec0326a127d9a7fb7b50fda765e3a11092104d45e64e07ae4ce215bf5cb27ea333d1a37db8a105041805e1e2eba47135aff7abc33175aa373a1d3621756c82c186d62df1586d62281b93795adf3c3ae233ee701bae359589502b6b4b2fa6672cd2b4f3d064118a53e2beac5379d05a623cc95537fb14fa583bf0f03366a9651cd91e81f20aaf36098cbc0bc8c52c5073196ae40828e01a266e106c3da91ae60bb9571a610622715b1e5be5e0954bd948e441b1bca32823900324c4b8ad21ee701c4c0c6e4b3e9fb71683b9bf1a6e4cc2cb3a21bcfc5153f5835a309722e4567d7e66eebad6abd0d2e721e8513fc7f266c9d8230cced279e5f6615b25648e6ee5d71b9836fdb0cd27e99d9884f288cb3d64d7f6aba59ed5ca44b645c489a497363fe48e965a03c738771e2f3f180278ceeecb60d69c3391d445c0c2c2af8d9e72fe2973961a5c75cbcd1dd75ee3417900e70ebb4c6f625e5cce11e4a1e64644aa524156f5cc0966a1a07e0c82cca92a54f7549136f2858c66f5add0c90735ec9e8e8b89d99730242437cb565a79d3c850a43d50f4af8791bb68284e395e4d97ed183ee555fb81f871f6735dc5da96f8e9954335695f2f637fcb939250bd869374d5929ba298d8873c8f9687f22d4bd45184d96a0e26d178a1e413364d62e49f8381db668959f3b8676b8e5e08cb363f3a51b91d27738efd10fb897b1918895b75585174a272934ce2a8eca9d13019ea4b9abd86573fbebb45443ec9302a65d3594f6da39ded12ba4aad681cdc4e504ed7dc69d37a7f27836ee24bd1471884acc6e8194cb7d66e099e455ea72c211620f723ba7089be12fbcb93bdb9751d7718d2c63972afb3be135d83f413c1e1697fa3ee4a20177933973870b40e5bdb73ea8401e4846238306b904fd028dd7706c26909f96f1087fcbbe87aa053befab4b6ec8e07486dea54a4bc8c3c139b3d010fb6aed3563f2174f6eb46f8eea68109738ad14f381f519b39b45a50cfbc24fe84eef9ef99241ef53474561831faa89499dca951f662dadef973837ff6f72cd442563b801df3a5114aac4dd85403b0600698b09096d5c90daef2b1b84d7a1e9ccba1265d8893de06b1e8c642f95eb8dd26877fc21fb1c828aa8eff954553addbaf88f266b25b5ce2472c46a38eee97a52296bb6b66dde1823960da350e69f7672f8689b281266413f0932042b4fcf0b8a546292e492e45e2d94f8e4f0115130a9a464e2236b954875aecf6fe0925ecc1a010f490292bacb1f2bcb1790845c7dad561bed04a403340d1efe1cdb4d83e00bc688d5877b7b2ab52bb16d11d03bb1e1190e9531f8a7380aaf2447f1f9f6bf1d389b05c897705f52bf215d1e421983d2bcaadd2064908f84d344d66ea470580248c704417aa2488736cbd72a7c0a436fb7b9e6f8aa76f8c9dd780d222a4652674f624fdf9a4b375c9568db0369f37d0a636568980ef3a411b49720096ba31b3b5604fb8437e007b092faae0eeaea1549745c629a92fda7195c5a28ed014d3452b472851638a0d042ad5eeb447ceff57204ba469dc870c2c31f7e2f35319666ca6c296383360fd56df57bab17ea26c438ed0eac012a4bbbb4f7fc0b7a0126243b676f89e4108c88e0dde8799006a68540c294d244b7de1eae3217e0cc6b5769800db8aea252114ecb17ca31ee3520281a5aa6885fcacb5c33ac32a68dd2555d527e611cab46e54cb514ef43a6c0b7b087246efb89fc7afb1336970479a64cfafca89d09565b03b1b260ae09a950d5828e14fc6439576a60c3f348ade4857a151b024f32a1f67318c67dbe9532a4767882331ceb6ad429bcceae5eaa93867244b2ee38515099b674a90643625b6ef54f3ad687476ef973d1fb7f92e00fcc3f572b6b913de830dcf0af8b1567758e1fc5fe7b529783d72b656fe29bf7704976ec4aaddb3968f6b8410cb204b600ae552206c929a1d589f76de8f00fdee424a39de97a162c84c17890327a9b0ccd96cf5f2784a9053512294a4b2a88dea2dcdcd98eebb1f33a26465f531986d1515f5704eb782723a3c757ecd5c0d6cafb75d8444cbb825275754fe373c4575668cf8249c028347d4372e4cfb57a408b2e4ab268fd1d14c2d34afd8a8dfed7428bf9d5678038fa1dfc9f47ed9c974a959fcb3965c20bd8f288c5b29e9322977792d6079487dbcafef61deff4001f82c8e1e31e030ff920bd58d69d9d3d0e5b0f3c9b8477d15354329bc485dbf280f51379cf8d604736d4777e720ce6ce8df339661c3e89b10341f40935c82ea1f5e0c336d859aed5e9de49f916c409a31434fd18a17b06b157b2cf8fd4b1bd715498dd8f5c57eb7328814b58686ce09d77da69566a6562a6bbf319c2ababc45c9f8a01a4834daaaedc77698a1758638cf4083dcbf0ef3e60544824bedd389ffa4c5d75a97ee56eb1a6a88155228800295e079e4717014431d2820867e46b25287842a7083e2a6a8561c26d206612babfdc34b1df9ca4a32a4f19b0f9d98b07b9d7a0dcd82edde6b0454e063345b637750da75a239a9341558e1e3540f961f46d8efebe216293e24401801ac65f82bc1b702b38995a3c69399645ff242bca25efd4a2e85330ef9023c8d1b44d9264532e647f1190730cf24abb1dd3b0dd567d4d3c2ab707ee74dcb1aa7d0eb7488ac711d325a1eb3475526c41f74ff8f91bd0ee5507d50349496792a2461fab14cdedfc222165d0438deea93bacf5818a0af53cc2de188f28794a591b3a11289e2e0a201f54e30994aeee5b46ab5abda521777e8fb15a53472bf63dc526c8bd75d8514fba0c29da4a79e957a66ea77c37aeb7025f24b2eac7782356521827e60293d713cf6cad48cccc83bb9454dd22ba43fd23c87812e239701dff8c949d22d07209e94264b427688c9970d9bc9510a415ab58f08c86ac38d80ad0271635528757b07babb8c31cc0e1e9c99bf7a07d99d920835ce72e85ce5ed1602aacb778962e6aa93af14057efd051561f77996243b06c8da26fb46e696216620dcc9bae5abf202192d86a1b6d6c002225fceed3be9a4e2a9c611392245a6b15da0444a24f6f2a72210fa863ada30f7f27e98eedff1ac101c4f3b2d98bd09dc5947f51cc24039050ca52efcb2b5633a84bf1873733547f5b96106b74de88b88917be5faace63e906e3a6800bd41666be2c3ac79204286435fe7bf763de9ba28c966ca555e77e41ec831b1caa9cfae788a3cd7c05b7244349b22bd9424627352f8db369e2acf511907c88838a27b7fccd545623f5673f50dc794a6b7bdd4582198682a4f8bac58f5d3333d9cc205e447efd4630280c4083758e44a4a41bbf4c09deb7f48d3865b9138e51fa19a4423d1e40dbf7999e0883427b75e5fdc72e137d6d1a64938dcf1453559ce972c22a6e2f8fc4c8dca1c71cf25b6f442a0525613f6e3cc88068ca52b567dfc1e1ab19de44ec5562bbb47b4face30b46b1ec34c0370e0687a6aea111a887549c49227e9468309cf0330d68a72f7f2620eb0eac1380aaa277fbe86c516ff0c3731419cc103c535975b64114099769bef5e9fee5396a2063b11657a8968431b7831fe5161f8e006144d0a3a015367d0a9ae601cd52814efe1d8e3638a8a7c326017cef14c5bcbfddc1c96e7fa91d21a626ed1295ca149100219a6d5f7c3d1afa77fcb499bcc3223a5b32d2aafb367f467a0015380f00b1db790d0bc18beaa64cd409a0f94d7fc2fb02726ab95d4d61a4e0c940a835cc8ba7d25d466e399077c843fa033e1d600276034f0b61c31eb3a183784e112c89f2e8a64129ff5b4e09fc61d8d463d01b34557d45717db21e1d0ef85750abd487cc276c45a1f81ce06912896bf45fcc80345c27faaf0c63e303be7b44f001990ec3ab919cd8e7f78cc1cde691f4ce27a7632a983da200bc9d24e5870cb4712571e71f418bbafc40ea6555ce4da37eadf5efbeab4f6a280530b674346a5f0a0a90133e37072e5d94dc7d26642b152978b47685c7c973257045e3d0b6dd96cb5845d5be690794fc3b6f3f1867d1e646ce422e16c2da8258391314f51a91770600dd4faca5cf48ba1284c96ef164cde29e48ca17ede11561aceaf851ea2011586d27f93a00b020c9f91598cb6505e7697f2d114da19acd0d4078c2638cfc8a871fb6e5ea61eafe9b2b83f5b32c7bfbeb99e5bd598464507678089ba1965e9920e2594aead6366309ee4b2e1d597362017d1868463ef38281aff30835c255b50d34ccf87afb4bce0045b81a39ededcd77789e083052c8e03a68a507053eced7bde2b0d5fd35f3081ebbdd3df2120d16c1dc892c06b6f19cfd32e4bbfebb0a985329ee1013ee96c3817ea8514311041847fa45af7d4750e3e17b666b343b92aa5078f9f544d45ed312b6500d124d86388bb7f4c36de5439d9983cb216945e0fd389a76d7b467d4a14414ddca775f694374784ab251a35c26646c14db98d85c7d8e2b318f60d643d879d5830993636a579a333742c20ecbc6049e1c7c8bd58f5009200c26707eb7acf5a4ebadf88946d3260ed5011e4dae9b7efdebbc2251a69462805ec71bd3667e094f618124fdc68dd7e38863e3077b9e07428067d7dba602ab4fab22c691359164ba1bc9b9b0ffd1fa946cd0d28aab6bb46e424b476b836e5f8f01346a7aeb2ed7f999d4d75d206412e318423b541968c2ef2af0a275b97dac972d6c367b93deaa204339d28cef59752850b616c26be87d3d4592154164b8c0a950b9dcea06069dd2d480433b1783b2e048eef04137443c1b77d980cbc81818d97ef6f06de604691b29ae55f0d749ac65668a024423803eccd533e015537ead53d2c8fbbd11376fe3509fd3fd52aef61c45dfda36983de690278b65ff7af994465b7fc815a597f660ba443b8a5daae31b5ae35ea808fa77e3c115a7abe067e9745be0a21d23aa2e22c54459b477f1ca016fef63fb2f22e7a7a7deee5879417eb7dd0ad763246e64c8de5ebe501264d7b46f200a0734e8ba38cae4f7870777ad15e278cd0a031df412d220458aa2e4bab93424f447c6a5987d96553e1d357c9ae57461d6f7f4a8e694371673a04e1463b523998d435cdd863b7cc8971b00b4e08e7bc6bf4926c3cf4670cd613794287316536e33ec5eafb793cd05be39f5ef9a9dbdcfa26d0efb24eeba5d4befccc6c12e5a97012453c7bb42991f705cb773a6c8ae8b440b29eebd8fa770d57ed28f05e452cebd5785ac71594f5a5b8e5338d0ccfce3092dadf7b093615c7276b408305fca2edf355a08015e5db91a5a0182788b0fe007a99bd808c36d501e33ede3f6c06b82db8a52e20a66ade5f7ea162ceeedd5748266661b9afae3c115495039bb7a903cf9649a5a13d299631c8957d966c18fecef06d7e8f73ab61412fe9dcf7ec29468ecf84256e1940d0e9df74d38f75fe248692d8f012793f0989c447e0b5d9653a5f03952389393556bcd55d93e24870cfa874bc530a04678f64f674ad029d8d0a50f7467a193b0499e6b7a823199915216e428e33b9fc6f3d7db6a0e0ba5d7e2690bbf489abe48c0fb85cae65f2de835f71ff181f912bfe345518ec8f8251c47febac277bf8337d185df491dad9885076ccc9e5bdb1a69bf2c3163571bbbbacd994c051c551b4ae6074d911fa6a15dfa4429b4f23dd79ccf5ce290f5a1351b4dfaf7f12ae15b566989c00d34e255c9b05228f9ee6031fca417d71ac75996bba84438da99a9d1bbdabcb7cc0ee0c595dcb045c1b45115a8516c7eea6f7707d6bcadd18d0909f8d3956ce64a9028ff00458d2d415eb54276f63191b72ed2c023598255f962e3ab29bb5bca6636ac72502cbc1bce19237f0967395db40fc6e018301bbf3a5b9c3cc4d03c7d98aacf37d4e2ef6d8e5200598ce5a5b4725b169337318f2d036ecf4918a656d204efc2bae2db3073b4098bf7b2ddaafd87e2742063e63fe68da5b1a68655effa3e389e0387a3b658cef44e4c025d454d056d44bcfafffca0c5101213639324694db4ba7cd1fbb6adb983c714cec5b9a8c075dadea62def609f4eb500e676c2603ce356ffe2c816dd3a839c6e877f177d88559d077d3452591d25a5fa3946f2b0844a22d58027a3b0e7d0c275cab0638075ec17e8ce9e76e3789dba14c9905e21e3c0cf500bf913741785d3437167150e271a7d1b192bdafdb17d5324ace1fdd4a243614293172594ebc5765aa4efb42414adefbab07ea17b5decbc9cd8a35223ddcdf1254af01ec491f107e6302c17da3a570010b9ab49d7d803b8f533a17e32dfde8cbed1ecb2494e3ace8f19ab9eedf8e9fc2d9e600c8160024d9a53f4b2276cff3a43e6d9f53fc3212d95e90f2eb5e17a9b40be5bbb71b927577ae64ea796b5b4c7a52629065133bc179f100f9bcd61f74dcc3bc4673fa97b263af4a7ae0500856e832212a81a371055c27849cf658f765bbab2c7fdaa40cf1ff663318167b339e303832c48f7fe7b4833e8cdf75529a7815986cbd43c17f9214c89ff215564f69596061082838027157de5a4111ca21b92efa9bab7a692c4c0feac3bb7ff36d2603f4d501d09a3b1134b51b6257f164643490e4fbda1dff16bc9d20359dfd8ad59ae8a8d2d74495f0263092edbd8b9be2e5bc6ed913f2776a294025f0b1decf479376cc9662c5912102fd10705ffb8e40c92c9defc85d27e363b80f4eb90cc1fc65791e673f8e3230526e433cbaac95fc9a5a13e99180ee5dd3707f6970932bc6e0e224d94fe6b869ad86e495e235f2cbc860900b1dd031b9286208617756d0d1813e1daa66c2f3e733541820ac880058a99fa58f677eb626dfd3a8d4a504049a47c35e8ec14f1a737339cb710ce3a83cc001b84511baa9e1aae1b1df909839344dac908f4070e5f13b48e1fc1a5c045922a8f03b721d2368634890c137c1c16b837ba5c89ed8628dac3d6bb4d717eb26646a1c5f3d0d438add2b4a42a8d88ded1ddbfa6b5deed6e1a37e3b9209542de006e261a13f734c8e4aa2fa31ed167b8ddcd91227737c406b344dd44ab844f9823bba1eeaf58145c7d8a6392bb02480109f4c3dc9e86a84675dc2de6104c1bf9424a4ee13b5fef7e81062d56415a29d999c80802859d307e213c3bfc904502dbe89796f67d1e68c090bac68f2655c8c49788820ec1a9ee5299e49cd1fce1a8751fa11226bc3898417fea48d969f9b1e6b33a079be36c0f6ef08682e2901edaf9c9216d1c5778e17b70a7307c93509ba2efb6b1def5a84cad094d865d5757d55234cbdd87d45f9efe9548d54b641dc6eaae8671498452e6f5954eb1b0021d349eaf7dfc2652153a6a7dcf603a392dbe359d119342043172cff2fcaa22f41e5e4b26cc034398b1fc9e60c57b8659339b4718aac5b1d9f9d47103b4fb0fc3660e0e2044943dec1ac4f6b504127e5ae77275cf287367c324081ab7e3a4405966a372ae32a09bf978322a79d7d1ce45aa22b14eb6e66d47b8c702b80d5692e6a7d380866f61e6589a080fbe720e20d10a636feb1ebc0996fc1c0fb6e2f83ea9bab7a6ab3a19880a05061e11d3ac66bc0e10cd43bd304e956acc8bdeba60bdab0572359e26ec9dcfd77f3e5f4287f88fa77479a2053e15fbd2bc0cd8f9a8294cafc55ed4820661b3046cc2e71fc081945585b60958ea67a69d8b55cc18b39ced4e96453f00aceff758a4403d762d78c634777ae8780d11311885e41a5837511e53e2321ee46f33ea553353bd825c4c66776e1b4a99b2e649445327911e7adb6a5f3a8a402be0c6c42a8019ae0c75d6b1aceeaf35227e79685bf799f5eda354d68768c8c1b7403adbe65577be3b233afa8f8ae0740c41483698388995b84a04338e702ade301f70e0e137ff503779f85f30d2d83410f041bbb608f429de659640b9bf03b86bc4ca6bfe74d0b24aecd0021c1bc3b048d7a6aa72fa4a3116578cf910c96828e647dfab2e7b1a88ffb9fcebfe6e3f2ecb499e61feaa0a87cc8271d9ca0857c2803b5ada76dbe1c163fa9454331e3292d6582c5c399f9cb5794b43ded257ce812b96fdc4c01362d45a2c26a4a4cbc230355fcdd70ba502894c7ed7c93b07e227988deaabe0f594e581ec63f60a7102c81d96723130ab4960e641b6281b04c538f7d37bddb39949aac4646c17a1455a592bd564f3608303162c395e961e0249f6badbc74b19b883e77480361c7b26d51deb88698f91266f0fee2eb30f38b9dbdf9b34c2de5fefc0d441a7510ae4488a70faa929fc938ad1d3116ae9847a94727dd607340fb2786b12898c980f3c5a1a1ec3a140598cc5df65e8f2cee0e9847440898c28507e215e29d4032e425e3c9140bb67e9a017c089cfcdf2cfc1cf74d000e30a40acca9dcb0beccb2ac6935c152de31a2d27b63b7fa4744e19a6865ef72594e98930650d3174e58e3dca0c94dcf1ce33558f2d20eb6dc519431d823b4acbd22db19ef0ee8c99a9d3aa1799f25defe64a13661daa5b5a1d4be59a970ec87ac6533561be444d1492480067ef4b147bd535611b171a19cd6090ce73e248a9a1e34716466453278333e0493a840f89f81c27f0216ffab3cd5516a2da6dd97b7c2ba1ce83db582731fa703bf630116612562960a7c94322532abe9867ba4990138d207ad8b78f03ae49be80081b9e4489800817a4923d379f7ca03404d2ec75754ccb17e71fb6b9f507de00576f89274cc3ef0409ae012bf1f5018e17590db2542480a29f4e593913e1cb63b32787789751d1802b0cf6b2c5e9fa44223601e2b3fcb491ccea517bacf5236aa9ba871aef52af37c6963c39fd622dfa386e6ae0c36548395104a53b70\""; + +/// Hexadecimal string 65536 chars (32768 bytes) +pub const HEX_65536_CHARS: &str = "\"0x65536b37b273ac2f126b11185ef91c1cff07c9b5081fae332ef45c4e90dc0bad4868ee6713acc2eac2d7071fa5885f8b2a9988a07e01bc7de1ad0cb0f06a3467905a2e8723987bbc96c86ac4f1503479e1089d8d90cf4d836c4d12816a1a39dd8c379add349bc3f2f543a81051140483f61e81c3f18ff1c4049aee78e1c245846ec12f1b8392028c6a2e9e3fb110d20f0877110b2f266609a3d92f0cd8b59b7385cbd2e70417ee062ee356ca191f8c68aa68243f5ab62c5f4b237033c32ff7813405c8a4bea82f73380818dbd169cd1060f4f6791b96a402bc4dc83f6a6fde353e3a5de2e626706ae897b7da6b1e3522ed2f4b0f340378c70bfcaf10a7fc805d696822a76f4d8d2206a9a8aacc132dbe770ac870b9a406bde566ac665c942978e5edc1efada06bc13781315eea26f9977f58327a188a9051c6eaa00ca4e2e8e04f26f8dc2b589c399a6160c1345816f35e33b4a0db6d33322c3ccbf82cfc5c1b28be7da5d834d1720343c1bfd8b8964552bae373ed2a6b1b6f02766f679a2c9f1007610014cd71810e8117255b0a07e5e5e87711ac4713e13c6adc899350c4b35d9a22925bd46cf2cfe04fab2fee013e3560380de3b879b01a2e03347c784eba5b1f0367aa5a51cce5dd0cce8f983a1c0876887d679ca207faac11fff8e8a64c81ea02f0aa7ef2cbba80f75eb37a3975963b757f7fcc805adaf7611d729f1a47df9e85df5b2a2a8eed2b69d657a4513472ca6b6a44a1b695c8183dd178dcd621b2066ecccdc1fb88fea080735bb2edb104b88547ad6de9cea81a2afd04f245d1d15c92955648c248501a9b837ab549030860417f7ddff056eb73e90b1263923fd6e5ebba43ccd9f839817967df5c8a6b2b7fcdc62e4a55978c0baedb046059e4754aa043fc975d57c09ce6dcf56b1cdd24a85272faa1d821853f71b0bfbff2967f4b2dae326e4d990c3417f6c990559d25ff5b3d0b2573f49212bcf33d06fabd8a971f90060b396e178c6b52f55667958b1307460827b78884022187a85ea89cd2981f88365b5e2e66e0fb55926d7eed93edb19afdef4217f64a33d1c9318cc05cfe774e0f09428ec4a9dd38ac42a48751cefea3c9a8935ab04fdf7b24b834261def53839cb68a89cc61ebaab7047cca9d5727a86d79d5944737b0642e8770974cf2128c663d0133d9555cb4aa141e39dfc10c70e2fdfae954a7dfb39af8b7419909255d2d81fc069071fc29d1763c7579e729d87fd148a9bf26262a9332b2994a024adbb8f43ca76389fcead15a14ae24f005cfec16d4da950c8c1647b8e2357ab59be554f1faad237e492d6a5de11618817da26a611f122fe6c5f1ef9a826d3905ed4c8841f9f5e3431b994d1f4b62d10877c973aa1daf9614bbd55cdd865e36e25b9241c08e3b6627846113d6fdf4bdef35eacd48ca2d76959baa1484adb988262b38013926c388102bf0551c35616970f9d5fb1fded91a25e12f1f12afcce8295aea0ae7608163798af65025a84ef6e1f3d60a5d354d55e0fe408023894f68c2e523be63725b5cf33ace0828cf89a69ee428d073bca15014e7faa83014f1bbf6797ecb80bf75ae012fbd35b48f3d7e213d85c3ef991933021baaa7984e7c92073fcb720911ee736479e79eb55046d3ce4433b669c34e38c14b232402222788c3df8aeb42ee8b487c3cfe6eaece11d9b3aa681b143f9f360c7dc0f157d613c2b85ec407f6d85b1b1876c298c0ee50ecb9e0351738e6eac1e0e5749ab6a5f7aeb7fcb150af97b0c6b4b6df19dd0d06dd44764dfc10361e6217c8808b9b34b62311c5a93b4e6e0f7ef3ae52d4ca2689ffe94a04a4405442059bce9df2d919299f1bf75e74d5d51b963b19a0133e6904cc3c0310fb50a7c0c04c0e9a7d259c12c12cbf84dc72e3e460d74db9a2a3890d551b9451c84ec4a5e565c17692124a0ad25b47bad7cbf8c07ff8df7b40b3226309b783f939baeb02767c829bfe6bbf285b66b235b301372d8896521774d45d5c28ca6593b6f81f9aa9d1c74014fad0877f8134751210ecd6348555bb033d30a2d07a438b111c0ff0ebed28e22b81ddb4b9fc36cc4048afb3c876f62bba6f26b882cb2410d5f8e2895c35b90bd12010f424c64e53d33af47fe5657a2cd25e80b08e8a26274fdec89a6388dfe80bb6c48522115b6f0c78245e18b42a7f175ebf6816c4c45593a9f40dcbd881a1c9f8a9a010a886a319b6fe97e01347f6ebcb64d41687a08c869b0043c6cf684d11e75161c15f1037351c3b51f135c02935815fe9f5e382f4623a3e34ee42946e774c0717d6299a4c3720f26adcac65105771621ff851b47a1f2b96aa6a0d664021e398de908b83d64719da4dc9c6d20304d00c9fe89433f6a787abd9c282212a1c087781c40f5a615fa8a31affac834cd19e98e7b26c115e6f48f397f74e12d1b214ae427f13fad70b62e051d3fe4f90fafa426bd12321e17fa97bb0fd211401db77ebd2ad2bc5586f9a84ca02af969b1bc4c7229db28e02bd5ca60aa60d3ce159dac61bbfdc6dbe3b58cad672bb64a2cf8e30093d2be04b0f84be835aec191fdfa517d671b2bfadd7a76d45b0dc99ac383a53e324d734bee7738f093a6714437cb70a4d93af9dfe71b930158216a5e19e237276f9ba536b3cf0e063fe7101edffa5827b38b5cb74abbbbecc823ad1df97f8b638eba5a75ae875dbcb1077530c06e56ebd5df1956fb93f3a605b8c1b69fe0cdc42fc6ecdc0794057b1c6e756ad5f6f105727b21074ab582e430ce12fbc1b234b4fe9b5ded642378fea32006d6ca4c539e07163c7fec74848db5d164d4f4f2619aad89f246bba33e7cee1bb434c3c4baa73448226e4711e0a13b433dbea7b474248fea8fb3290c6ae92e42dc4a365e6562381bb1d7e903336c8cfef2863d383fbc0b45f015b0b831636724d6390efe15617333cdce3efb5d0c6c99a9b77ac6f6bce8008e85ad4c5cad08aa9232c312f5df1dd55bea468587a58b0a5d966e37f85acfe260b26016e2c2ff7a5f0fe8c296e3c9f21a69067fbb828dda563ae945ff5bcdcff950b53b1890c37add5ef49c3d77c51f046e7d39f1ce013ff1f95b1ea5c66b37e7a244605ac3e8a5d33c521d7286f6bc22b7fe4076d080d8e66f8921772887233e6c883c5705862d2e4894efe63f475460a10f66a19fe50ff82a2b3c03084515bbcd25aaaf506c9fd90c4eaa150a24fabd3331220d83a48961cf9c4e9da384959935f79d84a2218348d70d394b522ec0787e934aaa472633c75c98909d91576a322198980c06f0ea370573ff2c2e2daa3f71f0f8d92eaf202fdda79d56bcfbd5dcfa435f8300c25a179338bc0d85f9ac297d283aa763892fcfb96167ad6d803eedca540bf05d9980531c135e553bf5c27442e228c761863ff43950e66526a1805e0671afa5ff0eb021fdf175fff6a92841d639b7c88bf7195a8f32b43871a648048ec0fda674cf79239865eda8dfed6d74968400a53a647b66e5b00276d088d2dc91b57f20fafc5103ced1a56ec7f6bc8e56f8bbe42bbd034ed6aeeba802e3f8ae05426758bee5b0e583e34e4df9632b636b79b467c84618ce7aa026dcd7d1ecfc54a87e6bd6aa5c3d5159693fed90a6b59ed65e0bb6d4c4a2ba045fb539652e7df48386bc2ed72e5c29f8d7e8e45233992fd18c473a39cc04fc8cffe0d70cbc0f75fdc7daa1d9f4ed0434732051968cc6123a5ac63e49a8984aface0787dfd84dd5f16573034c95a6b10b980bca6868c903a8a6d4119acfd7c844c1bc5b6c86977e2de33440e4b31f472d6b7b58bf4394a079a529587d25f2ad0fa1bf22e196d859bb45b81788b5789384dad71b23d2a226d75e0679e1cfe9efb1c4d66463a42796877cfc9e5e2435b072d3f4453e23fdc6555d2cba5ebdcf4ad536b85d30286f3df49cb5972abad4019159adc68abfc34c04e46a5bdc182e622a08694db464760c5a4505021abe6b0dfe15c8eb5fa686fe0cf8a337dd14274884cafaffa3127ca9311de8cc04d19b0ac632ea8ba34c4308436cca4e25efe3796279f3e9da9f697448a9d4a0bcf4673f3e3e6a3d630d9ddae3b6a1ff95a545db4a5fad2f17bcc6c21be224b3a038e346d6e6db12ff12ccd6d573bba31807b31ae12056ad5b77cfc5047df67a48392389665ba2c1e28f9889b8448f0fb14c30ff2050ebca2546af586aeb5d83a785eaa54874521c5ac3daa5e9b365d620801e6384b6d72122dc72ede0ca03001af82d5650011abb62769a1a55ee09aed875fd706f14946248fb4b82d12548107db353b6f39a9da939acce756e7fb9c1866c2c00de1a79a82ee91b46122bb8f30d584619cb21a247cbffc8b7b9b09d76eb13bfa48c8f28ea335c9ba9230a030ae4a69fd7ec47a5eef8c5f9851bb1142092f173239dde5c059a728590f1741dc9f8aad1590501ba5616b7bc188d257009190356f968bbceacf77ff23f014f8310c9f0a89279d02c8d225754ce535000f79ec86455757319f65c4202df8831dad4c8e2b94a67c8949e46b733991f783d9368282bcc00475da4a1be720974d972544c810c5d789cba5e7b0935ed4f849e47207cf9d54342247a7702b8176a164bd7816fdb2bfd52f4c0d6e528b4080150d0eca5707dc5c1c220cb5c0508edc220d14e84b5386ba229b422558d49c9ed3dbe128058ca59aebdee15c9f040f10ef8d77345dcd95e2c500fa995330b6c0b2769c086d03049ef3efb98999265395a2c0ae3cec18c13d270bd91c1feb53588812942c9ad73f6d89eae4e84160abea950ad7da47e2c82dc9fb06d194642c2abf3af0b30e5d04ce6e028d0f522d4639cae0bfbcb2649d2e64a6a55f7437ffbbc65f8fd5992e59a98e98f0a1a83929ef10aceaf6319332d7ab660dd00e2c4b898fd25600161ccc24aee43a10eeaa62ccf5b95790deab87b88b49ec872f48c8713cd4597f6e55a556ab242ed843f866b64ac5b100916f0e4711a02bbfa9c42e47e5b4e72e84c28e27fbc7858d1d8791d600f29936680f470fc4badefbaf72f2ece3be6b85835b1bfddd8aa5c5b7a87f6e9c64ee9e76e836ee16efeaba2491865c24e9e0c5de706a567a54b77400bfaec3b2c9d6f43fedd6a13f713fea1474a6c3504ed17bd5a5e3ad57d807eec47d6e6ed65d8fcb4b9c0e7588680128b865677c38d7a2b8918cb76462bdace3efa22217c64b05cbcc8d29039f438e2687e521078677e1a44678a441e31752d86f6da8a46f91766a40a03526cdafda3e69e636093d8b27eb3d12f235525e21f98483db3f9735d76f2d5bd5f1b4a945a367ee471def08f2f5a5af864772436a0d2f8da323824489dbdd65da331c9ecf8eededb058c563ba0b1d378109df5186fc7e100c0beb4651e4ef8de0f33cd0e49ddfd7efb84be4ebd71f778f26ad273af8b47aac27f103620eab42694fde562a68312b551afc2c592c7f7efaec178625c51eac8f8028687c953bb26151305c6ba22d44d7c6c9a48bd1542e0211ef0633ffc8bb0ac98b3aebef98fe710b4af01303b1feeaadbbd3d631f3e133c646a0853887ad851130dd2924e84da97191494b93fd711639c4aa54eb60341b67cc5f39fe6b8844dee0c70d38f729d4aa44553a161e9af54c667b9ed083a65451286dff52d9503e3f679b5fa38ce7adf78ef8558176a0fefffd45e8a402105d075b9d50e6aea8fc00dbd5db0d3e0259e0207b07ccddb1cb54c0c7d07699a9bb817246c86bc7e76b4726d2648b266508dfcce13b7390eba8b3b5629ee150b540fc032ac6fcf6a7e20d8a5dea3dbba99b8470ee035a991f2e2a7cabba89431308d6e64a7cf4e931572e937069659f88d36afa4c6e98cbbf12fa624b17cfa15118151bca7912380d4d66220f7b58fb3bb71f3af7861f553066a356bb4333d18e9795fb5a209155f38c7d0141364ec4f0de11a38d5efec4f592f6fca3388f5772527d9e216aa09d8f3be8591d0e4efd9fd19dba829032b524eeb123f27ac8695600658b034b0c5eb4a1166e952667e8441004f4e8de0d0cc9c4cb79fb5579b9d6f2ccb390baea7c9719a55beb0cb87b82314a0c0c8a0e66aae85fb39224ff7cbcb4e9f252de601c7e452713f736039b7d4e90d6495806ea3f2b129ceec6ae7ac401da17a839d8eb394c0c89e7b39b59ed2f5994659ce9baeaf3c1adbfa943afdd952223457ebcef5a3f9f6d19858fca3e61a78a755ebfaf2e57cdb369a8742161ecbc2fc4c5ec29b9537992e4221e14a7b02e9e63b3e5a6da6587a7ad2a4e6963190421a11879350a39c22c9cc12b9ecf7a3f9132b53e07cfdeda0db6e29ce5a82b347db645377183a02cb852feaffb44a57da36c02945bb803d057a094df43dc1b4ec751f4dc3be45bb3b5490822d790d4d67cb82e23e418327be8732cd2db2f2479651a9130620ed92b6166bb729b0e5a9bceee3d1c7a98e0dee9ce57911b1d1a1f3da7d1370414c19b1454c8da246835158fae4de3f882240345aec8174fd5268f5b73b60c602442e7fcee71531b0120368abe2ad3bb5ac2da6f7ed1a14c168ccffd580261d367327ad4834c524ec554ebc0c248be39d8c6503e4ed2e8df7b36d59ed1b6b9f5d8ac455696102a9f795ca0aa75a7e9c86aa2f3f5bbe0dea2b09b9f0afc708ca38152decba96419ddc4890f74faa7358824328d036453ac16ae420fd5f86c0ed8e3ca44b095f6d4429b534d7f877ca07657704d0c35a5a1fffaf12e12a51bf210dc557353fe27406c36a185d3da3f8013438345e1c28cfa98038883bcfb957e0dfb57ff33f8caf5d5b9edc596d26629559ab6f035ba941fa665337ea16422549c918221fe0b5602180c3ba3fc7c18c7e0afd471c0dca54bb3732ac40b90f9dcd5aaf665b0ca63292ddd7235b67892df8e965fca21a8e77c1ec7d444b6d2ce05bd2253b9cdaadef964813a03b99695bfb5a8aec508e98adc14d35f7daef7d9f384776565152c354deb30b98d0c0543ef080fe48495899862fc553a2d48e1b39fd2c699c7891100c01a89aef6ce38871e04330c8164d6ffa27efc4a4c7f282b5341a8f6f92c87eefcf0facfc9a6417f93df635b600c8c218e185522009e0cc730d30dfbf3f3f2c6b2b582a6298ee855d0d7df41b0005033788c81830fc09cb97e4115ef9713ad8b6b74787db30427237a48fe83126cc2b8d3d431a3deafd22d532e9bb9ec2e74dc0f4901c060cb215b57d07e0c7cacb6d3baede035322fe9c32f212b28f8318a87f73f603c293d516b475cd96eceb1739bb4081a2d2d82fa6a6d0f33231ef184316041e357efc6bd400b7228943b93d2eef303c5ef7d7295ca959f10ead921d6e257dad7160743bdc3db3c8fd87d26b2aca6ed0bd945f41caa574d9ed61f65a45f8ad7ec50f6d7160d05ebd4516d0c3da1eb4efc88f88c5ea244ed90c07239f3d217983d93295926d6d0c8597c1d6985dd9a0df8cae5b0af9718820bb0ab425d4f8b6b5d1a8831b69c695258d2e268deb2e8743a1cba722078458fb7d5b5f4fc0f6f07207a25ed4608b7f8b8ab7ace1eaee32460e5ae00d299f78389bde8f7a64b58116387e5f0e98522c6743e808d63d7d4d3596a74d2812780ec35a9715c8fc440826d37ab29a909cc0c244099453c4a08fe541099cbffb04ead771a5c7c7004c55c44990856f2e02e790e9e4a3db5cf4db2873ad82a97159c86aa8347f8dd8ae846a4cc42f1448046cfa0e121fb9a7787136502f5cc5a21b4ff3da68a5ccdcc54e49f825dabc00a10d437d39a2a9c2e5deb72ab8b188520671eaf1b0091ab7a271a1b6ad840d089f2f79d1ac7ef5e317775eee5b2153c550ccd0038050eb7b7a6789aa6bfe68eebaac4851d93a22af4a08a14e37ec02fb0f89edbf0c1d15905a6dbf691f0f0f9e96539aeccad6267f52574aa4df31ec79dc9fd6f27b5a26fbb1945528180cad006e68745288c3868d611257541af28f036cff9b911172e38272cd5fa693ab89a11e02d795a0c7b4bda53203027994d2dda4129b474a2d94f708cf27840443597d9cc6149ff11d6c747bcc070b4adb4ce4bce847ccb7d7f899d1c4cfcb6db49b87c029b468c71e2a32771364c16c2ec2e6d10d1dedfb3379310e32b3e16153609723855caad0cc183c99c9143953e16a22b832acc833a8cd3b64d448b76134e6cf42b4b68e920c8682387c64bbb4700d8b131c45fe76d6217f96dec776fe6b49d4a770513d8228fb9e8f51f3b8d086e1cce2c5a3af3f4acfd76d985147d7067a3637984f15ce82560c5e1304f25bb1720ebce8407cde58560943896b8a3ecc59cebeb77131b1d33ddc1000dec496650992b25e3b7b13b5ada9ffb8ce961cbe53f43496e95f9f49d860cae0eed145b2c5ed2163a6a06acc60affd3536cbc328eef9dd2c3d778ed42deab72462cd808f2f1cb4ee11376ab3e4e2b1d3b9e4cd061aaeec1c616573ca29d6561f8e675af33805c2d57d2e539fd646851f4fdd9cd00a863bebb6ddca04cae87b5078c7349d6467f8c96fd93f881e76b89d506b5be38cf916094dbd6ba046995de1e38e5874e9582a7920afb47a220e7925f83adb6c14b70ecb6c85e0f14ffc1e99b11e8e8d45a0c4cb7459a758f55a5359776277548a3cb8e806bc1df0238664b35fa2173827781cde2fa9ce98853a86e3bd0bc8f38a53eba0fafa306b97e5657eaa4e5d2325ff5f39a5308126e0a4bc23696bf2d6233d3fb7a2c83f3096f82613ea80c5e4c3a5d5983d0b637c62bba0e18002e8bbec09608403107d1dca13fd22cf5f936829de8d614d8cb04bf969d3a3b7d2a7bd9a572b2ce3e05f62220548e0d8e93cba0c187464ce909cb440f4051a3d58f16ab95185579f37d1e2f890cd730fcdb99f7bad003c4c71bcfdd430eb9dfcba3140c30d9663e6b94df246aa73c5acbff26b7b6c373a0cdf64ac1229f17815be0134509bb63e066f9259d3e7bbc524e95a16907c335d78f830c4b63e461efc80a0d1dd31b63ee14124c23373c4f41f61b1923f4526cfabc5f3598db634ebf07cb70061ea71417f8efa32a2ccbd67fe4943633ef76184e38f3ef594f5ee8e1c148b30637358e32b5031b80db19e982291b383ccf5658cf2398594c5b570a2a284f45b44f343b33c76dbfed74c8919bb7fb09b46c854e0acc2fdcfcd6bb2434ae40f91413fca079183e087ec16be83cf854fa55d3b63fff2ac24877bb1b3205b77c407bb70c04d55023e99eb036fa7dfab98735234fd73a2b4b0962f31fb89e207f5fc8b9d4dbcfa00a184331970379dcf55e0b184fa79f32a9cd7a3b39b2bb9f3a44d628d85abb7453ce94e22ccb14a379e2f0c5caa0deb484e8f8c455e97a1e16f84685300b88211aee0d6ac18965571fab8166e5bb1548d6b556a4808b86494102a8f547f58cc47140d0b8f39718e1a5f35842b2ef476e50eb08ea1202d6db96fdf41c080e6b4609d9acf3114d4532076db372b4ec2c52f2b96a797896b2c1745b13012c3ce9e3e02eceaeb11a714539419b3ece70ffc777b3f633e406d56fd8f3d1feae707a1c9a64d527ece1d57fe78593df50b33227f35a3e31d8ef6777617569113460d7271aece80c344be8f5c139f567568a657d8b5bd5910c3e2f34e4fe5930d5dbf5eabe23ccd4c841a3cddfd39a287796844c95bbf6675567b1cc84feb5d3d4e514206dd4d4c2f90e296a04a8a30744618228efc696c40f7857d97e2ca25b868f236d04bcd20020b654a69180f67d43b5c34b626653abf25cbd364dd465dcb7d9dc1bb61bc3bff4afb9ff31f12ec68fa3e08a3fc91741edf8ec23f0a425482034b47022b74cd8ee32bdc1a4fe0a7e9e626315e1e6adc2686c6394a5520d7e918f5fff74e77a69295aaa00020b8337983e8af0cb58aebe4b486642cd5021c5998b5019cc64d93467c0daad184a7eaad2419755ef37c39de6c0d5905b8629ce6f31a1a447578248787af03d74350f25fa8f859750a98f94317d5915e4ad1b92ff45c440e809a1a612d962e2da2778677040884342e1a655559b1726be4ad5a6afe98ffedec7e4ffe0415b6578e3d4c206f9f9da50fad9d78eb94af03ecda8cc521db0cb49f9e334001e8d6dc7434d5cf0866a27e3c169004c2c722d0dc8843e29091d4c529c59c8d434eef4e967a52d38e86de94f27e61f9162ebee11e95f1cff57f31f943ad04c8e14870bee05454650be86aa4284abfc5fae87fface3d519be8a28571282ffe9869e16bf7eaa3fb491ce1f8e9a1a861a76cebe54283ae43f59d654319cf563ffb04663131c39adf1ca127c89d175880423f686823a9dc8f853912ae6acbf1d7ea0bd0af675760c7435eb4c99961cfe4d27b1d61752fe2d33ef19800c9966752b255693e692315f140a80f830be5d9f824d9136115284775106ed6a0c62bc59ddea4c1662fded15c5eda869f65f0c35f6d139ba8a8048379ea6a3c083e78f68f616d23b913100d71d5348d0b80d516288c9471100af56ce2b5eb5b310d7d8814fb0b7518377b8144ff5a84b5432dcbf931563aaca31d0239c321823fb3c28dd41a89354ce47d4ba95cd30d63dc673ebd03973eb03a7d85ea88d61a6f983e7fb4869591d26e2e818a1f77eef3bf277fa0cf386978ca5195001441a1c0579d78a49b1d5b007b7e2a4c80cc4a9721d6084dc91f6d21f6a10e775c07ca4ecf9a911015cbb63335484f7ef654818804d7474a4c8be35a899bc8a59847590606a07c7eb743244fe10948ff842c8cafa524aac50466b9a5f40461e7fd71950eb673306e477ec34aa9e0259885b128bb7d9c702b410d7d524dad6da5744054c8ca10fe8575375ebe645689dae51cb5142d3511e21a7b43840d0b42333bf558fd51a2870c4860982be9f53355d8cf99ee8bc6836d4692195b0e21fab6280a87e6243097ecee16e9d578012f67f7a377464f4a5f2f222163388a817d0f2460000734e882ce7ebd4276b7e1a6a22fbcef87d65a9ace315ddfbd8e3a02984cc509312fbaf7f4e47a074569ace19f14010aea632670957b2e9d5ec789bef90e32914d3d43c67a4571da14108c2ea8b80af4d332c831ff000d02a98e4ace3dd037ae843a8647163a4e80cdef62e2536c822bc144e643e0f70dc0cbf0a0b54ccf5d188733eb05e87f92add63cf7ea85f91c4fde4e2dac2cbdf2f656cd01db7777ca4190e32ed439d66c18502decd0c8cc1533b263d0d747986c4e6a4aff70e2a84badf0295e5df47c1e35ca13d6bfbcfe7c794cee421e96b219af6b694ad0f24d6f76afc88c387903aca32ab3573eb56dc593f33ee9a910dcc3ed709f9dd830a2029376172e63c7138b757ab345f1de0accb21c2fb6a06b656ac2399c7f1f0d5fb3447c89128448c35e61c2032b4ea4a840119826f69ea73409ababcfdb04356f66429b29499ca72a1037f7a08fb2e6f265b4e743e68035ae0311b2ccb61aed3fe17dd22c77ab38c49b09184107fe012d43ab385a11171ed09929cf462649449b68a2a10587d2f6948df8dc509e05a2bbb1f16563a0b6ca9c8b40528f55d1ba56d68b3d39456c2ea71acbd06db6cfea521f51314aeb39444080a9b05aa99bff9fd5f2feeed3d3b9ed33936c2b1bb0c80cfe0a2bf6fecebfd4f2ba88d6bec458dd065b852d56bf61cdf98651790cf3063d310453ef7efd57c63ae348ae3686433387dff0595622d44ea5917e645d312a1175571338f4ef78c7292ca625d81de1166cddd0005f0332b4c8741c281a8bf6bfa08520622f74e19a7a43a26a8c3cb806009d362d5119c0182b427f52652ecf4c346195aceab37c5aed0e9fcbda7d4d3a7dd59dc3b10c7c5fcfc98ffda7b357327812d2e08492efb7c8dc6fd6f7b3c22193dde3ca229c1a902e9fac48c06bd4ae72560b3420dc772b0ad7fdae4d48f2b286eb5f85a1ec12c77a21eeade57dfff2326e46fe7de05f5b0004fdddba3adf099a3bf1a6ff5cf1fd56cacb8bf08c531237d5d769a1d9cf676dce2ca1309745aece9b341dbdb960b84c40f3f45d72408f2bdf5fb32403c5826da8af71d6319a0e6e9f282b578536cba20c2ed267df4d4ca2f49628df45b836b993232981e0bc4f121b80295847ce60cb0db41f67851d6ac64f683f7cf6f986c5fb22095183645fa87717325496930017920dbd74b0be6899036293809aa863466c271158211f3e26dd86a62bf4c159cb17bb5e303f50650b8776c5644988a896fd7fa1c43fb6f142f1451c6b20250612c0af43ed0c2570fea925070ba9699a6b7ed67e7b83bc1db2e213a5a63c96a2ab691cb38e90a435a8c6f638baa9af40c2913fe0c1d516569acc2c27a57e13116a7ab0cd86152c9d972d332eac62f8c4ef9cff1de4c3e13cf03c3b9dc38c0e71069450443b5e07b51e7cce9b96dfe22b652f29541f0ebde8911fb56c2e6baf361ede39994d3679afb266734128094734dd29a0c2f90df70f30bcf45d0fcf461d1d22548d7fe807a571e690cf3bb71cceccdcd358bb66e42da73b8213770cbda83a50d1e4869545254cb4819f63da570b4b13c8545a095ee5d57bd74a84f70ab847d8d9478f232928a7335d8c36f5985514b81099ab32dbd5536f16c6074e2dc92b44e35aff65c9321e89f0f8b04864d595467c73c8dc618d747d01b3a62ebca9e7a4dd86ffd218e1cdf85ecc2ce300fd5787ee569367c97565a375c414ef65431ccff63866be5e47db305810c7df61727bd1c6b5dc37048656320eb71e3ba587d82b78a34ae20063e4f6598bf60b1d85c27d0fabcfaf9cc414013891e6f02d5953c42ca1c8b66dee0be7ea6a5e882a1d6d35ec67b6e6c0fdecdd7d5a418212ed30ee1cc9116fad5297cf60b1405b8943872197347a890766bd75b258c0cdb6c836dc62ed8581077f389e4f236093a2e530db8baac66a22ce78c5bc67ad6a928a5e666f59135ba35fdd6d8f96176eb7ea8d5d7e73cee10471ad348f89f8e17d1f7770b166253b986d9238e4014d6a1b2d7edb9c13c0779d4e0a11d09df3d2fc3facf27cf867189dc9c261bcecc301a51ac355dd08815807a171fa5d8a045b2fd37137b2f43dc630a4f426b53e01a59b88e3020160ca2c27c799d63b4591a8607cffd25ded698064761babb99103f2d8143192c5d9630cc62beb875e793778092f3046cd9f1b4bc83307a53cbbc36f9f1118053cbd5c06dca39edfd2512e7bfa16397cccd20fe4fb38c6f6ad66513d42be20a40e54cc0deb93feab18795e45bb5878663e0750ed3a4f2d6da17cfd85dda8943464efda1fff03205c84e9cb2a74bb26c938e6bbeeeb20ed2609cf795d6eab980a28c723a14eb809c553d9e5c5594aff1e96074649c1b8f4ea42f0d2dba40d0f07d251621297038119ae5a52c80cf001a332ce5f647d6c2d020ff621dbd5effa4a4be4b4cbd540a0313bec9acee45307429b582a5f71f71a6888bc4429f85beab1f6c11f3ddcf1561ad641e2571d17c77ff28ea33772d89aa6e8cd12c530168ac103a98421c299733ff4842ac944574ce3c9df87506daeac089eb8df22f45ceb8c1230be580bfab7d2a746071f28aba102e93c2e46e30760430ebb3e9883f2d09511f99331cadde5f96d18ec33f3fe2af12d46a3e4819ef95634241dbb23f0ada97759544579e0497f0f4e3077f1dd54a14ebae8b9d825a729e4b853f52b8d58832b6a20e56f27b85fcf38d39563f4afedfa6a5e7c58c47c417438e080463a8ea8db995297613db3be953e31fb570f54f9d90fe16c60fe2d75957701ba1940e98658616d5545d4deb230aab310daba86965131f7a6dc0f7d6c6e6a12cf2481f3095b822e52b54e2cde5e017081358ad1085acd94e434025b710ccf25f9f4423e6e4fc682ffe1479840d37466ff4771ce035855cda8a3e2907323fe7760dc4c39e317cea433c7a32fb587dd860683b08a9b8a568e95498b2107699f5e2192144fe52ba305240a70b8e9d60b56421d7e7edd141734706ff670abb57e1683b1c12b387df907f1982db60a0f206c5892b3b50d1daaf77194caecda12ec5fb1f89fafa5f749bbdacca43de4f7b6d418a9af0ad856e662a2ebb3fa6dc83a41609b1f7d5e90b53d9e8a77a73cf80f2984e3af010aa2bee35794712fd3502c9c58a81f7aaafab04f3265776d6bc82e79d0ec7e7da13fc57c79dc9e4f6ea29c2149fd57c6b878bbc3adb2aad4526dea40e8e30c76fd836d2bb825dbae2fbbe6ce6fb25c71a5cdadc37c0e1852a1b66d7451f90ea84b352c40a78cdddf18b637bfccfce077209c739ce802f85392568f0066f632e2cefc0f468aa965bce09c33713c25c4e0d5381157e7cd13f764cfaafffe0b47ec94b87b9146290719bb5156c44b97623754f59583d8b7f7450ec1b1d7efe8b8f5a09decbdaf790922805d7b06a29aab14cfbde149a79ab9890cb337087f0ab3f40b8abc1f2a5d3bf1aec8a7009a91c7b21548000d50ae6d2955bb9262787eda1fe5e0ec769a0c9a3bfec552fa05d6af6a10223fe4bcee0832475e5a3a38c823cbea745b87cc39d7d49defd664749e920843b801cfc14f558baad762f8796da716674e7ba1a4662b3392afeb24ec6e3a7e85f829d64bbeb812b021df0f8bc00b41660d106920b5ed28f3fffd8b59aff15f6cf514394d457d7ce7fb965b515004c8dcab97e3138b9daea3919f09af0ba2eb15fd37a75b29589f9e7456cab542d91268b8395fa90b8a2f5832f1f55cc4e0fe7c0051376cc1cd09b18b6a4674dff495bbef5869d148a967835c4adc996924e744e19a6e80456f06e30d0fe86a081f481e2aee593cbb63082d1110eae5267a5c6e8f38c766591f5de6ac8f58cba9aeec0be824b61a025bbb8efd53d1e59f6ea24a9408bebdef3b3cd5ebade03afc2dab103172f92ca4c9dae8759c44d67e0b442c00e4b81a50abba7d808c0b2f2bb9465318da6cd9bd89e3948d785b718d1d225c8ccac620bc9b31aa838e9272b4427ee3753dbacb7af3cc175063bb9e063a0570ece3c50fcb9468b7e9080feb5c64507ef91a79eaf0d70a0298f9e51d3a92b2591e3e6bf81b4c46aa5a64d620f1a8817d29d8d0deb2b373d094438a829d124aa966d8ce495dd5eb530989bbf7f4a2410928fd2687a5975bf9384c646d2480c92973f0e76cf8c126265371ea0612548cbb21a2772366d43aff01a3a0fafbc609889431eea924b2fbd0b03c1990f7480bf48e55bc897cff83e2748c8628fde910b643b211c951a01b4e1953853a7602f01ffc05b58f8a8a63f9b1a1573a23342154807940ad3080c77f95cc3fc714cf427845cb2a02a801be971f980f9e20ff0333c3b326c510615c2a266e3e4479f6bb9476b29e9aa9ba2d18e5a47f2c1dc8ca2a2f68d57410f5514c37499f7e8aa634567f1f5f43a1c858ad964d4ab4b3fda048e98c2bb369a3d7c9bbe6d202450bbe4e6874d01ad3ade3070cc6435223911150230529e67998d86740ce1cb35530e4510f08b968b78e319c904e2f92187af2ae70900ea7faff3830180a72ff9a55636e6ea0d6be5ea7c8a9dd2cb4e67afd3c4baf5a9506f75173e5c8c556b77db2eba2b25f98db84d64302ad1952936eeaf8296379e14cbcfe0546ba40750e3d14d29d884d1fcb0538d90fad43f53c20694e19239e24f5adcaa000718d0819be166a2827a04eafcf04fe4a2f8eea90335fce30090601f5dac609543ce74671cc0b735cfb447002a2edb9b182cce199a57a186f6bd2e5ea5216b93e13e88543de802becd8c0e7b0de5d08dff22903024da169dc45c8c44d4cafbdf8ac7e3d77010dcd94d4a95f1be9322c8b99449a9c737d2812f8a6fde48043d51058633a0db37a64ec4936ab4e80fbe7168b3d44bb92e50d964a9d98d254a72657755790ac9ea9fadf5a221d5c1bb6241bbe6e9682d9f74910f8f3a37d5f597ce1c8ffe000ee97434a452be176171dc2017530ba5314a9cfc90f3fdb1dfe8c9df810b3ea1303892a9df04337d1728d0257f85b921c9e1b2d954a090e0e74cd016114f9e7b829a6d311cfac64aeb2a3cdd10a96bd99ac77faf29ed4fbcf93e81fad1a37bc72f474c650eda34c4150dd67bf9d9c028b65748f99dff24595483a5a5aa8f09886b51cc538aa8666669923b1b39ddacbd201c11b6368b1e15f60bd0620efa37608a8b797f7b275cbc8ae396128243944fbf9dbe14e00817ce4e374ae3c04ce65b31636d53531326108d77012400faf0b6fe6f311e923692d9ac42af0792ec8d7b47303d875e5a95fc355a0a68dd690f29ba2aed1193635f6ac0e428f8b6494402ebf4440ff409bf80fdf4049d292178e18473701e97638ad7e6958a951a457a0df68a1f0e9869c77cc535ea6e8b35fb4b94a30dba24bd9d316f5dbfeaf50832a1573f6965959bc18149d6f7971f567f035d491c7a51825d5b4b5cb72dba919720e6a46f81aacb8e352e5fc15844a173609e89a21a462676c6501fc66c2f57b041e8a1c83917f4a0e7cb890e64b754c4f6731544349c1217343a4c8a7495082ce57e5137a1c80d3d479eb26af5f4ab7452ea86ae729abd2b6ea4c7f22c1aba4d71d47cfeec2344aaedfee4acabe07011807029a52aeb083436e4f4cb73320ef0d5196f6f1b2edea8fd93cae03e54997934e38e3a9b036b24d491f803799a0fece0cee98abced988ca5b32e179a3c8628269b18dbe3b2914d9710c4666cf28e11cf1bd296428875f25085e98ef1fadd60db3e3dba3cda144824723fbc449103634c5314954b5be07d0f2a3c3c45700c712bdf0142f317c2201e3a3670743f9bfe23f91f5b9e5fbebf6302a8faf6107eaa1820ee82584557da209796ab692d37f2bf7d2cbe192c25b2a2c2ff09648ba21bb3414ccc82a3b49f1fa945f51806b24ee50aa3e562ccdd374778f3ca98beb0459bb56f9359bff810aee72909db26fc5a25715ef07bced17999a010c78226b3f8bbbcade996bfa7f9eb312113b84cb1d65cd0a52746d2c7a89b109783202eb2aa14bc7a1a46fa225b69d1992838338c6f5e04f0858d026bc7968a632d3fb345f39c2dd850931554c7f8950246134b9cd5f4f252a0d22d958a072c61d25d5ec54cb9242259f444c5e7e536f0c32fb716993fa7965fc98e080970a36a08dc4eff9f85b77ebd468cdcd2e72d0a2a021b0a853bcc033ffc48bb68dff288828e46310dec368b860853a6c19069db8634486a6f545744ce83cbb1dd2c5f2d2868aae84f1ad0fda0aafe11f0fc0acddabe4877896d26c90d6542692b80c65395f69132f0d2fbf7cd0ead779a468ae9f617eaad77ba25db0acd78dfa2b2005aa8e8273a364cf2559e955efb36db928aef178a4d4184007d5ca5c7ec611016a355ba79fbefab86a123fb97f53b0eb32cfed5ace50a458f83644cf40b91066118343e605a1f6d3d5ba3efe9e0f2250d5bb5257ea2fe097a7a6a3f80fcc6702956e79474e6bb0ad3564e621fbc320437499ae7c72a2384664a44a01a06106bbf02d9c1953ca2d2d61c95a1d6038b11635bddde4ebbfc5b419e3058ba0b4b6d9b165becb17b64d4cc450b31ab20964d26ee2352085758f5851d41f754d8a8f8a920ea88631b61f69d60146a2167a704129a58fd8fa6165c162820611a8906d1688ace79bf9e66512cfd1484c096ce50388e602ed43cfdf67cc63847f3aa17dc2db6b7002ca597de8170c5c45011b43fcd529de06cc294bcbd8d73220c46770c5a9701628669d049a83e764855c5d32d40e646989d7dd84266a7d280eebaaa8d47e470634732d59e9ab4297eb3f07578b53f3531adc1391cb47dbfa75fdc02cdeffacc061deefbb7f0ec9f0f96e8ce0496e5abf5d87798a699e416175058bd093e97c1773b5616aaf56ff4ca5c26fb191c5ae41d4c2f361fa60b6d554b3480aec44c9fe477d2f0f7a45d7c6629344e67c93f33afe231625e5aebd5f81541f8218052a577b9ad7f6b72ac5723585e6de4ea90a2872b2b951df89ac3939cd87c91200b254d574c7f7cb55af7c9b278b4d6b201b12f0b94ae06cbb91007bef89368b73b0ae8020f955017c5d46c291f0fe20fbdba1739ea4a331803c6fe7bff876f89949cd5da5e2e7eee764b3c7c45e8ae0292753ca8ab2ae631fd8438d349a55beb9bdbe841e6bbe7882bcdb488470b0e55e6eecb0124ad3fd3db5a1376f11969b8287456d3d44f9a32dd474999da8f7ba73dabaeddbd2c810cc63006d8e5dd154a09b9e814997ea719de328d4e4eb6ce1ddf1bddd6e9d7caaf9b386955a8fe142d010e86532ecb06b3685c0a69161f2627a8333068c7ce52140eef125b42b36bf27f74070d192ba8936aa587b9d398446c2261fb965280bd6bc56309184181df312ff673c4335afb8a9e2140a1ed9832d14cb05c8f77ea84f3f39d7516707ff801d070e9f425c138f06929f5fe233ae282b20085c6cfd5458c7d08c24966692af215c5693f9e09861c01d9b0f860dd4b582c44b4a73459741a15e0edb6e3022391b79d5caccf5386b635829f3208a8158ad8b7a396f6890d0f4cbfbc61a2624c9ff55d45ebde3ac5c5f07c9c043d015c249bf7b40262997f5c4e93716f5e381b544e4cbf76729fb813e98333642f7d68cb6caee49668824a79f2d59cf73363b3aab88b4a6640547e9beb87cd78b6e056197ff9871fa1dc4f38d05f21a6d156011761b90a605015d45a834c13ff17854037e72af3a17de04b9deefc24c59768c8a615231b6f3a0ab37d69ad4a1ab4318803c69a3423f70ef6ebae7c80be230a4c64ff3b7ee3c29c6eec97370868294bd185ce576781e8dbbb69138f9569e4c638650301017285f5bcc2f606b39222130456ce7c2608366f51cbbdbe36eaf3448d841d4021c2bccf17c6de26fd1a2a36fe4477d9a966e34cf035b9f1490b7cd0161cfe4a8afc343dc85119e5ced8c7757206d6bfbe673677dd099007e05d031b53fac544405a942f3654502ae43cf1f882d7c562841daa43f96e062437eee30a993c488ec4d501ebe4ae0019344c8d9ba892d2e1ae84c07e24e9ac59852a15be0425881742c7c83caa026da718596830ed3ede80bea26565d24a529fc04ba53f5efbe355b36451ba4d33d7f7b12e1a09081838682308bf2c5a3b7597870268591f34fac0691ca37901fea0afd16dadd5cd3e2c28d2f265a4fc4f027289278184b6806e95fced0d23912b9ac4f76225b96bfff99aa953e779976043dbed61175db8b9373b21343de3b2e8a367e32197cc198a8cdba743210cc7a42971a10966a4c990088df071f1ecc96db33ec361bbe98c825ca3575b0f23a8294ea969cc8dbddab551a450655e488341c0541d8d189ad94b5fd67d052554cad0ffbc8d020a4110209bcde2e4bd6d461af522794470be1d268ff633f1317459eeeac3717e72bf9da0f4823dd4e2981c46655c5ae2dd213e329c5b151e9fa973396db3cf3d79e5362c6e63190c1fa936d95b98d81411aab1da95ecebae94d4c113fd49dc269eaf9033b6ec49c13201c14f7dcec4b29a146056800d4eff9a0c979f6a381de3f69f6aed2c5828cb82ded7db52ecd793b753205feec5a48f61b714c151aefb9944e254c43045a6c5e9c002c79f2bd42a17b713f23b5de9aa788f87e4266dc70ed569ef6e68ac621acfb6814f7869887fe266258bc82b29a70d391922a2713c31071bf73da0b4843b8e1f5a7f3f3f2aeaece946a819f00e34e0e2d98fc32580e79cc237647de6126a451102b1e8c5cc9bb2a3c01f0afdffa4005c60aa869011d4dd3eaa4a7bab937bfa7f82518150ae35fa9ce24bf829c197b792fc0994c7ebfb42a2f9a93f4c1892e4c25f662dd8248d67aa4ed9afa83ce9326dee5a20cfd879fafe9573b472817e74efc65e8ecf5f47199c9737332f634a05b3354cfe9340d12460f9846404e1b406c911db86c6678fb04a3317a645b1993e5e61bdcc0f822ee280bae9d84db191051a9958f22740cf0894555de3cf136f4cc8eb1c4ac44a8a9822f95a15fd6f80bd37252677a2cd10d703f9294855d169242679002d3b322656b4ab8eed68e404d79614fd4d91816c20a390341703e68e3ee241e41739418373d0fb9ba1fa6a0366b0cc0a2e9dbdd983188f8c0400dfb354b81f71817cd185526793fd68683952ca58763933e76fb7f1020df7f59bb35b63d41d14cd157a24fa3e4248b5cde6286519e8f437a086e9191b1faf82367faa4486978ba6f1810dbe877e5e33977dba474dbe70a3c4d4671b3f63e67e1db5281b1e0d0e0f1a66b1397e0e7726c4c374840a5fbbcbb98b6952fc211261def9275eec50e83729df2399756d6162cc2ca4e317ac25801e89b8f18974e47a6c3fdfc263181a689aab050f6866485b67fc445f037294f95592bd75c454a17df0a95a7e2a30c89a9bb2091102afe444d98002f36c65be3c1f79fb6665ae408e8744ebd1c399d0c95b20b21f06f7ec00f86554ccae8171ffc4698173464551795a77ce540ffe692450ed264e99b64dcd4deb6665c99d906b8b193ff1a1960d1fe4417660de1fe4f7df440b46effcc4ce95944bf2ab5ee1aa227d2cf5f513c191f6d5baa23d5f4f759e9404a0c1c86236e72d1bea4aed258bc652bd97ed8a226ad6e50a61918b3201406099938ba0d528d9bc6ceb36df2f7ba6c6538678a52db407d9272e32b13eadb152dd4b8ea3d384de54f57c33354c9123be7b8bc8a770ac5338c912c1539124449f627d105aba2102a4b0b67b0c65221cf18203e77f49027d0ab585b8bfc6861227b7752342921b1c00ee5a8742c089f201a190b19e41d3740e919a808e1ce8d3591a7beb851d81b8a55d61586651d229c1650f5ae4d2dca791c1b6d3cb2ba7059fcdb19dee84e1ad21ed8f12bf460be2dfd201a9fec9f0ba555af60262073d1910898bd32ff43a6cc844a9eceaf111208c959f48b03fd34393fa60ebcd35c973c9bc61a27bfc3f6e5c4b0ba005d1605542ce249fff70a03097330181444e4ce33c130517551acba9a693226e84a5edba5da80a3fb530bbc8bb5999739dee9c696a9ee1c7198f4cbe9635e007afe30cb77c1315a02429e226e3727146074c38e043c4770481cf4d7d321421ced4bc44cf359fab27bb5add0ebc1f02d1de646c157aa0a2fa053f76c5c17dc0e21271d3eca7ea2b4db478f23c052e106a734f93c5b3ca78de70c8c549dd411b5944ed9f18854b4b1ea1acf5a8780163a1fcf2104cb4c483fd17e8f759880a84c0e4cf5f8304d6e4389237afa8c93650b27e0319ac90f660782f92afab62e21fff102bb8d316eee157c2096ae69ea4cb3234c3894fbde89aa3e8cb18819f09ab8961d91d33901b0fb8234142d6692f6f38298534088a4c66b78760ac4cef690e6e67ef1c25bc8d010a0d12ce5af512bd2cf723bc65aa592ed8af781767b89cf66584553ea49cc8bca17efbf881a2bb424add154ff538c41bdccd485e625ea495c8ee50f6aca3a1bb285905295dcfe15b564a649af905cef87ab0c018facbcc04002f9903c8ee91de5e455eb4c35c1f0b1ebd65ed2c79fa3aa1a3ad8fdde6949c21bcb0489310021dd0ff77bd3a54e4e3b445547cec1ffd8883c38629b4493d0317093e90d51903c814885d75af75d2afd96792430eea0b65b981305cb684cbe871f4c5b5de566a86d2982b6ba8643116f50e988b228a460de65d9ac360736fb2b5052f51014142b0a9e2b01c36bd85182942ebf39943787b4b19f33effde6a4b78f137e41967cc89d4ee64e44d67ab1dc84dd30e65c51866217d0819ede95aeffafaa14642a6a8ce8ed24a8abdda1f5972db5f0bb9649440d298dc68e37ea3175044e099297e0bd5d62a9baa8be949ac6005a39105ab710042110c78a487ce7c87a2eb18341b7477b540fca6b0fbae9550aef51b759b9993e3485535e15799c473c5d71b1f42f1f460b9bf9467ac4548d5119444d721349a122314eb746e50915a4463efe65931bd505cd5883f0d6e4a2b4142da4c61326649baa5f679e8a4da39ead03b18235e148dc5bd8c6cd35c616bf1ae2f6f781baab1f4cf5f618ab074b774f2246830822a196c16053ff2a97bf9a1a04b83544abaa2c40cc2b3c3c578c00cad70dddd0ee284160987387de74bdc1198d083e814ceae60b4815bcc8677bc2999818615a872ebe40100a6c6bc0224c0195ff0b464e2c7ab6e828b977f5019e3cc6afd85cf39ad89fd1f73186d57d7b111e7ecba67edeec09dda5a22a968c9315df130d07863488ad7d72812f23a3d10a2d4295f6ed0e18a861c01bd621bf7090617e92b18e32cb91cf35741ecb3084b7f7b7bbaa2e358be52f2c197e1d6a8acc294c7c43f059178c1ac2ff2fc5ae567651350d4a87284f7fa8c7c31b9c2e8ce78d70205ffd1a869fc63c228d3d7973ff6f757abe1aa7c71f3e8624b8ed2c1dcd0643535eb15e73dea20e3ff98c8ddabdbe2fb6c346c4211849b993f27973dc6143a86228bfb2b5e2b94db481bc8d63c61b0e6115ec4b75799bb3fcc8bf5ffd7e0287a8221eb5388d05ec69fabee5ad152347ff9ddfc352838e139f791970773d32170afce03ba5d306042bc4e3a9c9d447ca0d93672bec897ff1b59f3a91e7f769f0cf7d1afe5af0266c48f486997722470555f59015a6e9b74a32019aab26bebe79b86bdf6511c434889ecf5d8766fd798f9ae4f10acdf4810add185d82345b0c469eed4f967a5ae662859127b22b1f0bad7a32196b6f941d3a2aba1641af14e074d19dae0e024b5defdf98064de910758b275eb0d39349a9aef6b2b9b9e9ca3d5a32d771872fa0c6ae9e43f39bbdfd36b9c017ac31be13f98bf47da6c367b92bfc8532bce28d6d9762fd56919c033378d0827ae8685a4ca04e3482fc1a3a023e103e2924cc4d5b897e23afcbb8f09ba3586ac455f95f4ffdb99e5fc93ce6aec0d673aa6b08de5926a19eb9a7de0ac787881bbe67a10690020006133c407293fb99695e491d26bd254354c22e08e3fdb9fc7b828f446c7538fd631d5ddc9992dde920bec9457ff2021bf0ca34f87beb6235f42e3606a48b183ea3c30dfa04ee94b11969a308ae876011826eac0a64ed655e84e136da48efd9d4682ca0e41dba225b19272b3212b98fd0dd1ee708aaf2c16202252fba04d8d9a960103d431b8e037b6d9b97aad6735fa563600edf95e8cee2bf5d0977538db048c7dc011582cbf4d7f94df5b39928f015538f76acd23db98b72cc785554523aea0af7eba88041df512957cea80ee6df20da80aaf0f76e850b4d5ad7fb780a37a34f695c8dbb96f1c03dc28cf6c07e803412d3c0cfea9e94e300334e8b6d94ecf53e0811238c0417399ba16309ab6fbfd99f7574c552911c05ab03dd301a47daed3fbe186bad39a262d8edbde91902aa96169eb53c5f7506cf14671e5faff0e6f3c33932c9374486e727858fe5c6a8bcfe80ac5672bf1f62cbb50ea9025a48cdeab0abfac0dc1edb00cd8fd3e92d19e042ab6e319561c26b1d5fae34b408a926941e4defc5655b193e1538db3e88e657d0e15f5475e51b039e85c7ef373b7a9fde3d9aa569fb5965986049fce25a73449b953ab590071979a479fbb6b8f44f03c8a9831e09efbf4138dad207e5ae666ac169682300115e04907e090e27deb33992f89cb3b4e460f7a7c577bbdf749f2707ba42a8263496a56922cb7fa2d97a72753a98db9165de7083ddca4af6f640d1fa6430e47ff43c22f87206247343bb0d365a77d459edd065ea228127ce1a1c56fb535e109fb0ef6562d04459b2ee1b7f8ac7666eb3a44ceb809419d0cbe0c4ecfc274d4a3ee511e1060b3b7948c86bd3c9effb965d5429c099df9b14c9e0db340c7090ec8ee9356c8bc8a6b8b4315b9092051d69b9ec86715edfe305eda73d1188b8426bdaa49329fcbf916eb555e23aea4b57b9c674ecc0f3107ed0b7c41008a61d0b77af5f5dffaa015ddd623e33ea020c0f8f5007d2bbf76fbd94ef312bc27b77d69b730e4eb5e4686e2e4950ce7a2084ca361d93c54e0b0b4ca9eed6710f1a2dde8875b877c226d2d10b828e7e346aa2bac738d1dda914d2a8089eb012a9ce5359ff74561150f7202ccbf5016fcecfa22f1489631524a022332806f7e9daf39fb8b774d4d3490107c2a4d27546d7cb2270b5804d01c3371aa06a3d180f0b4df11d23a6545157c1eeaf1edf0a02f492d45a639aa6e9d50246b966a2eec5053d40bde13b502a2f86f384ebd94d6e4f69cbba9ad9a126e2a0eb6dd84d129e9c5d2cd5763d05420e283170e73be17bfc612941e1b6b035f20d1ccf010faf6188ef76068b908fe5519c772bcefbbabcb7ef717e9b906a62a0b00ec56bbc0fcd72f08f3ae1a257dc847874f420a42d73bea2fac9900baa9dc87b3cfd979fd4b20d754b0ec92b8afd19523a2c46f913febd7489a156ad0c69c26a0472db960cb7f73a020e202294f29ea25eb51de2d4f72a2b9b7f9d1a379492e98c9514e77b5b516f32b974f67ea7a33398467e33e125db8994913ec7721c66a31407fdd147f6275f768bb61f1288e6318610f2a471ad84e88230363edf13c7173da724656392e8c08cc53729080ced477ff76cac77e2985d38272e06caa793d60d14d5df6d051699deb8a51b7749b20e0115e648a87c8b44c8b26d7b8f6447097b0566ce388a30087c1050fb2475d8540813a0c48e902419e37524d0744b92d3f14081fac0a101232565bf45b70cd90474fb38bdd6f48c9a46c523524a921c4eaba009e37655e3529a15a16dec68fa4baf62c51b6c670d39fd0a769f4714311d0e63ec0c5701fef71e22c29614b3ce509ed448bfdde4648cd32b98747e93e2cc668f83aece521ac4306960bd04241c2d5e9bfec9b8160603733f3d015e7fa2527ee300bf6de1c2c9fb0dcc3611941dddf5c8129b27f09801169b0fb1913bb7748e7193f87550479d6b7ead333d8da7e98feb892cf92b2ef450335bd4d2eb203c698b59d862ce9096c030eccc5885835f37fe2aef3184c3b75b2b9b3803b526bb32e0194d812eede8bfabb3abb84c66d8c6450b08e5009647749da4891b4e93e5dfc5de09b546cb2a80b9cf2701d1ca4d15bc5da86caeca4ab339b179d9e497db1450d4681d32da23c5f3b8c24a9f86ac1d0df434821823c47d8483ec86b011ef6a219cfa14b74c2064f029d636d8392494fe106979e179eb7ce234a4dba1551cf447309efa5ad1f70556ae08bc777714d586578effae2cca477e2ce61b227f5e2d5241e493d0b2379b621219faf8d8be44ea9ef4ebb8b6aad01440d0e9b10c964066e849128334566f5dc39724a8ca22d8d057cf32c7916ad2870c5eda332f87fb65e781b419a10fbe49630d47f1b115545309a5390e505702728ed43936c42d1885df2f9bb06cc834dd68d42175d9289a91879678ae06032bd324500e51f0ee40b14d1cf6d940a5e0850d3b202685963b8035e7ada7bcb80dc524d19188c2160f8539edf53d2fb0567ebd56a1a2f7c43bf7de5532a50b9afb27f43ea631ce59367a3274ab8607452f9811f4400dfde9cb049b2a475c8943f4f83e0600470b4f6ce177e2198c8fe569d2bae0a1d0b139211d741024b26f3a9916ea3c0b704b36c93275a4991dec8d91a541ce27b332183a4a1fc372dd34bd6b876cd1524e6600b7db242a799f20837376307110d7454e187b6c56d62a574e51e9e5f95e81e3ef5fa42c3518bb3c42ce4d49fb3395c42c2d44bd829594566f4a12e1ff8c4ecc5c34aad3f1a8721ce44d3b696863434948aa4770927401e4b213842605e41a3412a73158d51586e5311d5314de755737d4a0cafad4b8eba58b552ba767e4462ab4a3bf2b82d952bcbfb28cd2746b65ed92935abb4fafe2e7603317dadf8fdeeb6b46c97ee02e5727b8bb31ee910f86f45add2098d6a6b0fb2bcbc12e7b8ea5ea851984dacaad187134e5163013745e85e19947b2ef56735d9b461ed5ddda0e538b1239bde55ba16bed3b1dc453e4148d89fcc32288fbebd90b7e1daf6ad6ece865da477c8658075866b5c4994e8d0632391d8c6ebe0efe37f9c439582cb28824c2aad6548342eff83fda4d7d90cca7955c66b2ae0274ef207c2fa6fb7504ef4871a5df25bf001b8adf8dfb19dc19ab03fe960cbe6f44b93418ba8ab173c075405cc6d28000f22f6c7cfe4063e7aca85914e30ef32ceb78709e5134030fd91b142c2ad6f00ff43bd5e9804b6571aaa1d57777a059f080d47c6f38ecc9dd1d1f1a593107c91d9f24eb60f5e22b58ba06e7f032e4d66ba8fa89ae8eb539d0ed255409b0d4b0a3681f1dc4f5a519211c8efc83186a8847c9913756ab7d0614fc275320f1e8312e204e6043a11af78587f65a0cf95d55d7aa10bf3b7bec83fe9ffadfd9f6c0b7d5e01213bf5a01142a4c25bbbd865480da06eeb7b7db6943d40b60b731d5857561556d87a5288ed6b575d80b60db8c0ef59c1cb3bb43c62a35da0ca1a5fdacc0f1d5b2bd59ddeb3f4411df1e5d813f78a5ca0eabafee8c7e1a0b55b8067c9f784d9910b8d22d8224b90d89c0ce85dd2c5c2882c213d7b8a3d07c512e572c8a9696e0e8e028ea45572196fd2c8185f6ba4077e87051020fc197b21b69774d6be94b6e50d7ea64ee11515ab61fc64e6f7ec85446e7c1033fdacedf6a627e3b29d8136f45ee98f445df5680b953daffd7b625d98afafcf8b590b67aed14f8b7c50af82c8387790ea496c9ebf781ce6e31347b5b9970ee98ea734ba7f8a1a1638fee252cb38afcfd435c9acba935b15dbbac6f3197f09d4a470d9b36f3a6f27cb73d4b7e840cdc0593074bb3a07881806e614767145003acf8c8336d6827b082a23bd71fcb7665e4b4351b5b86fe7167d89fd4a93480c8807658796dcb6edf799939dad35645b23e73ace82096c83a1f92c985c85ee9c963a9b4f9e6659d3f426e00ac8bd31d9cb649f6da7a5553b2f4303697ad1bd765759aa0ed9111a7c92963d4b6f09dc60c2d71b830725b8f6c932e917a0e122372a2cb1673408a6c97ffad46e71fe989cb3f377bb9906ae8c2528a6a0bdc4bd4c2394a9b165a79b9796d769dda30a4e5e214ecac12b45912b38e367e571f2816b5be12554f19934c0e95b94532d854d8100f26d26cc4686b82e5663efda19d69c39f1f8df964f52139ddf90f5a3f1acec7f5419b1d7b5bed7601a37fa6b26d98058c60cb6da1aa740d496e08fe23222321dafc8b2ef5dafe7207a57cd2fe009eff2518b1d9dde687a687139d184723b03f6120c1ffc62fc96f243971418a10003242b9ff8c297ef036d5f014622d5e377d18eeab535043d37fda41d58656c370bcc862527040d79650060b8c54388bd4cd1b2928f1c5aef361671ed44aa181b5fc7810481d8eecdb4947c02b126fa260e3f0c65446427135c9b7d5902ed16093a750188a454d594180b968e26c21fef0984fc1be54ca52bc141d99d9f181d0691e0970dd5a28c2882085db6cf7bfd0c5573c978f938214eb2d52c499bdf9a89b8409247df73b903ce48de5f7cd945ab6be939d157cb7ca6939054940b5dfdb32248ad51323d1742e31dce87abd534bada2b7bdf2bc45927c1696eb8fa6594199014f5cd7b07b3f67c1025435518bc2aec0836925d0e94ec05ef11cd5cac5360b2ec6718f9ef989848f0578cbb0070daffd728b0ef14a95bda563173c988a81d03993b69823404ac4d56dceabf1bbbee8147675140897dc12213ce461b48218ea733f121a1322f58d0223acd6a90de5c67c10f329b5685d17d3a89c22b94f778df6d4c9ef6bac8232d22d3315e234d852ef2e610ab99fd4393da933e9cf51073b5f3d590b5ceb456b12096965598290a50749826a65e2f965cbccd3b7151e0e14489b8836cb3a5d389f42d5fb53e3677d14ac4b8bfa239f5d0412781adc3d0372c7a04aaa04b66bdc26315138da0306b387ae992e575d098e65bfb1e9939f9bd1599eb9f1f82a9e0152ae8d39c9231db5f1eec0e7eaa6816c96b0ad41fcc5c4ae38fd91fbc986fd9fbd489690e7a2bc5cb7ccb02da6bd99e6c4bc5dc5f40eb863eaae35ff9d9213c59e7885dcf6cfd59aabcede31ee3a5118747747387ba8eb269ee0e25b265a9bbc221a064b585ffcae30cf99f47f3db6b84d1aeec9c44692ceeacca71a272e55caf2be6a35f145ce28b5b3f9d210067fed63657d6a16d9f30aecc3cc5f3a9c87f5e30fa950b8b2f3a16e5103c90cdac1098c9774c89ac73d126a10aa36fca4943fcfcaa7de8733e414f4387818c27720107babbab2cfcb6c86ffb69dec906d96f52077a5f1973846206e50ffa7b1f6df1c5b5e05a8ce7db5e86171f8378a8e9bf06f240f0484720e268f08fb19a35b48481192d56cea354cdade638f090769410659706b660bd7d2831544d06f7ab99ba58e8301544426f345571c39366d420fde95da95210d4e1201ac76b987594253b8a026ca7983ec54ffa7fe1e28aceda66bae4c239cfebad4228802d6a445a1aae432b2c2cfc203d59ba2ac771a31c00c09e84f66b8b1ba4fef1780bbde37b9145d36b9e20c013a9ea0b1663cd9e27cf2298b0c97b024a5f456c500261e3fb73d72c5747cf0bfab168d0b86af86a554cff342966fa5246f810fd0301505b26cead8f37024e8c6fe6affabba731d4211afe60b6831f2ceeee91baf42a0a657358a98cf640f4eabdb3cdb38e37e45e6420086fc74e6a0b7c049c807f4050248f3cd11e2905119288950c2d5817c1677c8af8b015e1f351841f938bff6b7f702c8c391555b99dfacf06b19ef48c67f9d07a701f6a83ea0fcda3205235b0cfe8202f735266cfd4753bf0868b2fb77d29b640731fbe17f366b43f56cfa7cdf8c87d47dbdc9ff7154b9ab83618bfc80970b8f49a946c85688965923f7da71649d15176882ce3be480a20ed60960549c04fe806646b5eb7dfb9cf95742f7bdcf91bddaac1dcf48291fb62ecb5a15303dd6ce6c21ea72bef3f683df0df65ff43e0b5da22b0555fbb0e7093ac81df61b4d0c7de6f4f98dbdfadc2a82085e0e6dca0e3979bfb16e562d76e7616305b60d3dfdb70c14d9a1ff16a729cd7400846432279f68e97c7c7bfdd362cb901153e51dd56baa81ef7159738c19452ca6bed8546d9ab03cc12bc2ab252149eef44e3551c81be3f0a446e8a00d84d210cc31abc6b750172010c6b7af7d40ac2512c655285043a888e7115b46a0eb9c4913d75e86cfa17e617272107598da589b774036cac697192c6f9e54fe53ecd1c4ed1c3d3348ecccb0340582ece994cfa7544e5cbaa41051ee4ccb5d6d762e812b7d5cf8b7e76d6300ba211c48265521fdab33cd858dabc97167a0bdb78f26637740407398b7fcb78b9d435bcafb3b379fe3e5411036f48703ec672e34b8e85ea1e163f8fdeb146610cd93620ccfbfc85ae4bfdfd1ecdddc5f3c8e0fc5630ec43bfbf6708028696dfa94200db11cbdfae810e3d55ca1f855a1d9b98377b54aeaed3156391614ff1415146cae45c3f4c0e3f377f90d7e60c28c34e958c66ac091a66494a9cb1dcedfa5cb6eaa38f062664f113c3491e28606088d11e446a8cc41070298927be4c68a85cae340ee34af8e008bd1e2516534aaa3883a6f986ca290ce83bd717eaddf95888e66455619cdacc38912ffd9a44b0b072709705ff5c70ddc556f31d9e6c2b99e59bfe0272a336ec7914da06f957f27c378a6c563acf94ef5a91fa747893f2383b1ab848778abce8dc0a2b42172a7fb91d3cd8312aa98d155b5d9f13be2e5545b3d9bb3050c2447aa007575ff9aec56517c977752a13b199d82e299b253e708a21c1dd27d6659c7d1a724642b142c013122f6dde657cec6db586553b72935463668fad9ae186996ef38fdf6fde5aa8c4a7f073521f3cc2bbd9e33c4928a6a4726ce3a560659a41293701dc275c39eec03d57a359ea48ebede800e9208f4dbfb377293efad76fd92a7e6dedf8a530149879a8e50627059046c0c90e886e1790d5354e48fd8dc621596c55063784bb35c34ad25eaa8cb23a361091d4971ef7bad1cd83e23d9e40ffeb5a70bd0eeab408b4a427f6304b2ce806fdb8ed7b7e8c2102f66bda076abba771cdba1d0f5d7d456779ce8bef20929972e7417e6579a74d50245e5cbed2f04bfc519ec77c6234fb8c257515e29d1edb96ba6e551487d36a0cded60da9fa7a843f62599a1f6114afbe43b72ded08043b37e4dcef5a7802de1596ab457049a003aec15d0fa943aa0578db81492775c046217a204204ade6b158bc7c4b7bc33e771e49b9ceb385f3b6d11acd8d1c209d4b571b90174231bdbf9e4aff370b46a1e80398d19fb0399980852005063035090e408d1296d28e85dc58e90c7b68196b54faed1a22c5cc356e99822825049239b3002b946218e67841c61cb008e59c891481218da34708d17d6896fce941b7b3084a481c226827fdfe19e4c7455a6a2cedf205affaa821833b63855eb5964b074d4b475196abf9c592d54bd98cbff0d925fb36333b5e7d4fc87ba654cee389c47ac9cdf1673898e71ec8b23520386b0b0f6f9b807204d85d772383ab5d24526d6c57c87c5e2d566b58fc8f8058b38064153ab6c3a50f5f06ea4764ab9d606f6377e7a2c7412efecb71a4cb634cce3a63bf5359e8814ed77ddc6b4a40fc204d9af66876d06ecd2e40f9366c1185eaf88108b40b6f0f57f5ae175f5d46beb5036134f474cafbe8f4ef2260df5ba2da17bacd1acbbcd64ddc03746ba3e928cd0afcfa2939a8d6f11924528922d9f10ef8a312f69d8dabd8cc20de9b7fe58aaea75af2de6dc862940564c61c19754187341caa90ec381760e1c4810d8e771335ea5cb1a5c5e8e558af3069660d79e1ad66e30ac224cfab91a1bad9a4e9ea244b7aa1544ab11d44a25787bf9532f5c319e6e34f803929fa09cac811af9740548b28ed6dc4cdfac9616072f317cb0dbe1118488ab770d8576f090e58f5dfb6288f42afdfda46ed59277f7e06823e5ea03e2d414a79f454d86db3e322e1350ea893dec7099b1ecf696a4a3fb89d359fe29ab620529dc6a7dc5d632b696de9f333f506f84bca9fd7f66c936398123c12073a51e335ef3a0bf6b80055a551bd036177b7838aa07680e7f92556853232ef151caad7d7b09b9c7dc04925a835e1a4683cac902eeea199831cc929753e8c3f00114ed86bfa35e9ee4371be2bf58d159b93fbb24933a7815bb02712340282141d259a2deb9a1788ddfe75b85568bebf1452dbc3ee892c8d824cf01005c143a6cea5ccdd9a8c70e9e1b8e832bde53a109fd498af345eebeea6e5334e304cb584370ae3417a4a465ef262bbd64c2e5f6696a59991b4e9bb50a7381be46d0adc35e67276e1bcf3d34b781f788bdc0e3f6f5f21c246246eeeb28ffe8e57dd2fd09088bb2771e9c39b56726b036ee0f8fad7fa03ab203db69d4cc332b60d74e967f245f20b3d53774b795ce491603ef7af90021219ab199480c9edcafce4ff364a1e88f97350299bca866791bf4a61fa014d7d2dee73f62d8a4dd890abc2b6e781f946316a65aaa83dd774b942ff8badc448d7cfa5e9bbcd6d30af77496d69fb684e6c5a06e8e90815da8943277b2b9c05d112e3b70393cb8e0980edbfe6bd772feadb2ad553ac867b595959b3f078d0e89183854b41b4770db8a79649c5e4340bb205fb32822d4115434459be3101fb6ca248d6254149a414e482b6f602931c9e3a8ad09bd5db8def3c766799423d12e9d3af353b1861f9dccc899694c256e7eb2403edcd4c3b1dbac2a5677b44ce17dcd2995d8223608a7b6ea41bade82b2b195675775f2610ebce08eb81bc4ea253467a9f76e2701555266147b79f031ac9418b8b60433299bb850f4e463b446136b33fb58fc684de519142b477e32d11c3a41041edba421c1620f43a6813020d1d33cda7df23d2a8c24b7bd7772149b8bcb3fb28e53c0195696a648c0a7272ccdc2c78b3b0bd52b61dace9f79755df413efcfbb1ff1d5491b5010dd2dc6241c7c9b541ad5cb251b0e8e0c432de389aed2db2d339a84afc67594e460dda83c019fee3e1eb85591d0f036a09addb34b852a7bf79c21e5592fe0cdf1c2420e73293a2832b2ac6e734834cdfaada0265071f96aee6b10662747f79a4f96f51c8a6049afcc43da5b766dea3c42378dd4994817ad9ba561fbea39dc16f8f2766bc8f4532ceef2fb54e879c403e6083b757a6c6055a22c035cdb49ecba97c448263dcc33f509c5c0cbc2e65430b8f32779ba737bd292d77fb5797989090b7e126f67c321f23d671d5b80911e32ca2cdb23bc79596c9b33e5c2941eeb7b7931687f94a159b72a93142774147ddbc71f6ae38ff359bacfebd6aa33676c46703790243089ffc240a7649d4e36216948e3e9ddbb71541b022b640f90253064f0f27276cb93193b54466fe1df047752677dc29364f4bd97793a6735752f97750206c10f60316b3f8d9b9466412e997247ce9d9bc0e8754ef754110736052404af720c7f1fdd7017150160b35b6a2640b906d58475de9060c4b7bc314722e967369ac9a34cda947e542d7921a81a8d5083934e92af91841256e9b82930ca87ee8640c7ae66c5a3c3fdab86dcda30e079976d22a3cf56f9c156559828b8f7334bab35b654f018f331dca72dbbde50541b951d543284d49d3b96f1f2da217632c5e3660283d27904a56e4c83b4cac7675134b8745cdefa91c16905e16d958b0dcb29b5d52020ed7de8cab9dd8ddbe2cc6c44ed652193041e66b8ad5b86c6de0b905daaf17ff243ef2b21991da5b8a18c47f5835c405b13017d9a23fbf126c53bb2777a6d6681715da504698dbdb9d35500a9a11a4d1a41e1890c5aa928f33268c21d8f46a51abdc740aa3923f6b6e1b689af06a5cd1b983ab6111d9f89197e96accd56b7923fdcabc93f044a05902afbe293e4e17bceea5769574fc49020f2ada228b29f86e86439101eec7d551a882c8260d2798a5464341fc08b03f2a1e8d1420b00aeb0629742061b9f53ca9aafe67f06fd0e596b2f47f6197de46646ab70adbd7b200b375934a19280eb3be89c573b2b44ec0c4239f54bb158f23c06b65dd3c5c5d16754e0bc66e647409c38bad393e301cb6c511d3d4194c815b6ee562e0b3e5b1eb54782aa91a9b5957e61c9bbf37f51605e858549b4f1858455860c5ebf685d8f1c864f881e4b7e4b73cf866beeff7232c6d5c08c9b89ec81a9b0337545f8115c4fe44dd6f6147e6cb7788f7e3c1feba373da09f42d731522c32785653d33ff195bfd89f0c06892919b3a90b2ba324d0cadae87dc7d2f795e9a33abd500cdb3ed36975add5b149376a0888e6995023bb5fa7fcbfe83556309b452a436aee6e7d7da5242f03c43630c3bff362adec8ec4974e330567e81311d2732d20ec8e01c497b5fc6ac43f50208e9e11094e2b44a07dbb975e43f0eb0dcf8e9ae67391d3770db07f06166c585915d12f618a50f2c952ca38edf636caa511bdc0e70cf567fd7ab6d1280e5650f6dd4fb2e9951a012d5e2fa22a90e7e0d29637d00a0226cb13c1a90c6a7989063d314c91a4da4c65ac011231320ad2fc335fe7b728c139e58682a8aa4d296c0257234e89a8e6890ef8d982c244fb8f7b391167fa3bd52cd13704de96aab9589f7ce03532fa1b23618fd29d5d96a7ec7c43f2db64562ec1213e364f50a021c954c24f64b84332b38aff3d5bf23a2be97d23f7740a072d0e248993143a9d28afb08a577eabced00f638d953ddecd6bc8c54deae66d5d9d61f3ba20c15961270ac5e331437306200e89a2c1ba8b4748280d8092488258e7a006d2e5c47a83e02b71655ad8dc8ce9d7ae0302ca9ce3ad281fc83f71efb0f38afc85f9a02c024ea521b09aa7cb9fa6ec18b352d069adb122513b6a5c976004fa08ac546aaacad730c128bb2233b504640eb977697238304a8867e07ad5452827ce5c57f688d69f8b637231bbba36b435b4c2eb54122e20a256b316fe0b9f09ba49c6e890f1c125b3b0a54055fadf566b4116bda77d1c1f54f6179470dbc0f18a750471e97b4df70e1f7c8d65ddc7724ecde9cd8d540ecec35ea698f3234ae9cf110fbbcd70ae1e8c085abea9b8eecc97783ea2e645e696d4d62c47a714fea6c527ce28b679ab060070899611feea7a2dbea36a5e5cb0db1cc0d1954377b0b1a80063c510ef3435f8ab3c0611712212a0d3dc09efef11a5c9b2b6427913b993a6429e5dfa64530c7a064292300ac582663714d24541cbafa772c2ecfd224650ec4149d7f095af599fb9abe4ed606a3c9a01fd6fdfb8a39152dad05d896a3aa98b9b6be9484de85057d8f0a9fd38382f0a1b18ab6fe8e5feb15248b2852d4bcc5381d228f3fb833720ba581518d3ed51b7a7cbe1f1c883de8c0deaa92f34c54c14dccb5a333f84fb444fd93d9219be03045ea966a61a6fa74690c9dedfeea1ae26cc89c1e2352f9af665672f5c7ed01f12b6ae5a71563b2416fc1498d604aa8bfbefd0210eb2fa619d365dc507c15ff8a80e63e106902633f6625cb7f3a16aae1f77c8aa40d1dbe2f239219e19b2e238754b267c5c32b410592fd969ec8638dcc6c878f681999709a177ebfc6380a1edb6d88b18a8aad25cf4c1afca087d2c7297af4135394ebefb8d48785f045e894039845f0403969c4de08195ba4a280e664caa381bd709d7c9eec0f1bfe41f1569f31c57f98e3a51a1edc09c08334f181dfb582cb2c3abbf497508112620ec29490991316dce02458f3721ae0d09ac121586d5f3a83432bfd77c5487cc4cd6879f7de8737f10ca55ad7660dc66cb1060500cf5e9c0af985918520d774c51dbf55b310928707cde6b80ed4c2d5280ce9427bc3ce0778bf9286567bc07b784fe362188192180b0c726f73981741cae69be102d6fa51ded9d1d07d8f1c6ccf34e1e8041d3285d7a2e1ffab2fd909b96cad746c8c9583cbd611f00e4ed72c2d9b11db8c8f8a185b507299d124f35e17047e759729e284ca3f5c0706c7a4aea95cb944c0279106b33fb36c559a9cebb9011ec6532821684fcca72e59f709d986f69a8bce0c1567e8a2c3c239bd6f4688babbb14301ba31653cc4665540195d04a7b97adb80a8301807a2f71f3b296586ed84259dbecb51543e2d47814a6163f25c4250694560c0f6bd13a0d14137949bd3942aabee89af46cd1b044fae3eb930a1bfce61e7c0612211d38ec068089a90c4e8409c4a0ec5aad74c259357d9f6da5777d6d210cdf9fb64ab98f6efaeace77f23a73a5c5a333680c69e87460f55b1bec27fbc0173c3a491c1b82fa62a90399d7154a4540e96a7054dc34e6413ceb9e294344f2acefc9107306f49da2204f3ea2d3a38f8e9ff0a88eb2807a00c2c0b9e01cd1a95bcc825572be38f33c77fc2e9b82af06e15c3b5fbc0c4c343e50dafd03180f1975357ed2d1eedf46a947a2a4209051ed388a54477abcb3a4a36808e4e3aa725822689f861e2deba71abdcabc30af4fe2e644b11f8eb3dba80cf7b3fbf9eb7f5d378c42397cde37b33f4029548b6dbe0e9c0c87dc3bf8d516fd20b510be907bd560ba1e60f4e36a8ac3d5b34218ceac208dd732706030a712d38de4857358e94ea547688de11de94de4247fc9e50018e1d93f53e29b360673089e92c250453f0ff13e6e117ce94250076ca5e4efcb5c9f30a6fc123f470ece1f640ba2bd4cebdc6856e6e9d0a2a4d51c48f12fafef263ea83dd64fd575692ce6caf6536120393b349436cf782839950db0366b2f935f509e45ced95961f6537c461daed9f8c1d1ccfca06965ce7b578a8fca69166d700e7c5b8646b9f6cd92d88856ecb9bc70559e86dce5f8d1db1707d3cfcbff2bb7b49b189aea939f994061ae816d5016989eb581451798fb22dce881832df99e472ed1b22339b5c4d3a2f7e904702d03ac3685ab7a8d354610e846c07074af671040f9aa72f533f064a27118cb01a46817dc3997988c4a18ae0e0ead047ad1e32825f0e6393951bab272e41d94bbc47f4657b8fb65c1bed5c6cf73cbf3479c0bfcc5196af8bc3b99fb18dfdde33f2c438cf00c29e0a3481010f7348aee0894d1cd0223956ad0ad94fc70469af646c66ad124f1f7f8f98e7d49fb16df47c538e2c08e1264f4e404edddfa5cbdd5efb24b8f39164bdee556930951e1ae491608eeec5dd8a644cab8f60cb51a3ba144d1189b2a4778da5a7890888ff7d67f41195feb8ac09071403687776963000a253b3d29adfc97ab7ba383fff162b0e612b4e2c8f29e9249b7da9aa24c45a051ad1a860a7b440a0e32e579920d68c4220f7415caf8171043b87390baf3faba39fdd606567b6be277d6d2225ead573ac7b662d8f8350d2accd5efaedf0a3892379e9c33752a908c4e8353c83350c3e9901fe74bfa688d776ff6022a5212451fc40883182f03a97ffc776fff4cd9255837c86e90fbf58070b77c5ef94c8c7be4d610722aaca275ee4b9d099f2516706dd3d2e4dd1b790c35a703909fabb2c0d4e405bfee8d795e0a4cc7e64f5adc7a63d5164b195dd2eb96f64dc501916dfb83db3c7cc6583e4ba71e1c042a9fd7e749279c282ed1667f93de0921052cbb7f540f0b3cebba625084b8fafb1cbe43a377efa6e9d423dfe1ec135b5989ce33b5a35454a337e3ab1a2894f531ffd41b758fa33ed4d81da600084666b7d308928d39d57a4fc5f19959928e2b03473eca7f85e8ac4813ae0084067d9777a015715f5bd52a5e6bdeb47d89c194a01037ecbfd749c81dc6f3882b2b146770fc2c6f4696bd3367de7a344d6b2cff7087159176bf8daf3f19b39304b7e1158eb406b05bc032a3102ca08ef31b0c76c7537b60e8617ecfe1cca51127c3000d092bb871f520f027cf9e3ecc7c309bec05243c83566866b8463eaba69ee315c97011a1faad7fe85634d8a02e98c60148e0290cbc5f129bf4a4fb125ee7f73c0a1543d799cf9b78f507d0a2d243b40c574dad05cbb562a5265c7e9b222c49a66619b736447f0804bacdd141456051e51d00752c417a1e43e963b81632b0d2f55d63814f441cb13f7b4de940618773c8e8793a46ad675042bf70a1a855826f35c3ca70b87f294b21b0f7f8fe518ff659654cd88a644ba0122bb7310da8292d7e248a5b6333f2bf518e185230d8c0eb552c0863c912cecd8c0b09e5dd12755956d655dca984ff8f58f27a09abfe2dbd04228149e4bb651dcb3e5c20995177c4e041db0e5987f05cf5373241f8589357630789f4beecf54990c575ee19e44d102cbe196e9b216598534bcbba914e5b9a9db58746d31183641842e627e632ae64b68ea00bf1028b28c72692b0080e7bfdf75ebe1ad250d982afb13e0eff041ae6dea39bd81c73dff596ae04e29ab13e510524e722e32a372b94b160c62ec1874dbc6f6b78dc6060c3ee2f621c8bbfedea0307578eb17529944596d52dc9cf3ce36a23489f1f6ac8e9f875640c87277067d226467554a246446ecc26839c062f167504a18f84bc277aa36c614456cd8e0f3ae3950793b9d0338b756b275b999be099882319d49996d670bccab251c316d07a258291731841c8d4934f5e24f518e625872d38a50f60de3cf9208164a18666c6446c01e07109a3b0c069a260c15fe25c92ac258f35cce04beadd571084fe6cd7b69ff6e3060a69543e3c3487b3624989bdcfafd05946e9358c2e1ed74ecee9559bdc96999ef308348d3d30b3b4bed56618d2ab85265575fbf57cf04e9727ebd4047b619e820f7392f794cbae73820f1a4dddb39beef22cfaa43e0bea8b69edeacac9f1ec94963f97e847037729dc41087632b491a497abf2cce87d3c8bfc1e1839ed3ca39aa07c38bdd0a716082ed14b4215107785393c572f2681b960944a5bd236976dd64033ef413be6096e44535e5f80f82388353d12094201cafafa91b5376c85949a3887b43b3230f11019f070f5ac8721f5ee403a431d2335d61303392077d836519df7e9ef24f934e1cb280f3317024d29086fe92be30717e9efba8d6eb07c41fd3a6dea96187de1d2ffdb63b04d48f67772b11a6cb34481ef41f990599e1e86752375526784a3c1f4d97b331c1fe66e325978ac95fcadee4000884a771d48dc3ce951a2c9e2f40eee6419948a0941d2743a3c5fb951c0a6770d012693dda2aa872fb47f22567cae362b4828dba8a8347fdcaf136574553e7c3b6794d13d19d2244139fb9732c6ddced4d823e4cf055d3d8c1f73915e837a6e0149255ca89dfbc9ba0e2bd03714a494b0877276524de61b26bac8cd51f55880bcce5289638aa7ec1bf01ff981c5a02bf6268aae8de093187c876595bdddbe1d85efae6bf8f0d6bdb8f1ca6a04489516d637c497ff04eb10b98231b11ae7b841709efb0c84fcfb9b45b50f9090bf4330d72adac441dcbeba3a05922cfa9661cda0c0213044a8f76f71e401800d733fc496316faa2ed4300e38e5d06aba03c22a60e319e59433fc794cf41555549896b7375cfed93906240286b97a608db0049c46d159d1af37c8bcde7f21d3595ad168f9e0e1a4808d3594d66b7abc82f1a6b26a1020d41c3c514e0f74c90e6e8f90923f97b1d1c68e2d5014b4c39e1f141481afe00ef0d661d9e7b7b406a05b27cf1c199b21cc989ecc0f11283d59dbefa13b3b3e42f316cb65b9fe685b3990a502e998c208bcda8bb6e2b005bb3f41b0fdc981949b0333280dd8154f4a58055e287ce4692ce17f75fe4e4363a687768368ff79d60856a1426546d91d501cb68528ad4bdba7c444cdd66926da18092a8eb2ab989029a03e8294bd288b07716737a010385f3788777be8fcc15a8a003f27eb855f7807b78948ee62546c22e2a9d9e986e011e942969c9561c6d3bed28b5f1ea2ed708af3a4b9d8442527d683f2af2ff0f19f5cdddd23702dac08cd3604ce8850c24134eaa883cb39a9d6f3eb2e0975201c806f7c8b237abad30f6261704960b0c859ad26d696e5ea9ec129890ae7b90110902c4ed91aa29563ee9b07e73534f2a48e1a48881cef1bf1a597006b778cb1861f364749880806355d95898cf1a622e5d323452b576741db3cd4ea3c3502fab211755a686cac6ed4479c67636b331c81e84072f6c510e9f42cd50bdf80c1bcb5fbe3a7bb1715afbf89443c08bbb2680588ae67a442e16d4677e345b0509a61d30e5f93414f4b535ecf4797f4ac1b9f2c66d13963eb36bd789c6fc90e79c9021e057df0b6a28c2febef82ca6247a6fac2ef981f2cb13e4ebd5273ff0155a173c85b7525a0401d00a5294f11e295b9d9230c20aee4cc8e17135e5a6484eb43a9a2dff0e9a776ca3bbc0116b24a1230e20a21ad442c2c0e9cf23227d9c74a87485e223d01bcebb5584eccd173f572f00c03187bf0b3a6f01da929040165ddf2da37cc04baab1ab65dea89e4ec45d18c78f6feecb33be606e3c724d914ebb81d9b2e4b21657a0c2d80e0ce3d66f9b5e2a090804a17c487107aac548c011379e2c14fa63c08dc42f95d673da6fbb601f56764bc54741ae4c99a753df9e472768ddac6e05de8de89a6450eacfd96b1f86658e733f5f9c3fb0a48babc6a474b17d518101cacc59e42dd0d22eb29fb9c9f281861dc9e38d2162b2ddad5af5a4b0882f7fb78bc431ad08eece35fa3d6fdb82b01630edd437613fd7200ffd262fa3c30daa5d9eb6746d421cf00b4633a3d869f33794a0144f0dd84a1dff5c0622ca345b0a2938040e70be33ed44c4f43c4addbe087942343fb326450946880b253a96e9691cc2dc6f1d67164887f36be09464cf65b571c123880b400fb6992b533e1f54f939da2f00d4ef68da75ad8f13d4b132e72ecf97752d3a6a46dfe7a1a17e533c0554fbd4296fed20664789062779a328ac65173ec7ded581d4710fda8d2aaf6e8a4a3f302722859caeeccf30f93218352641c32ba67dffd90838e36a735fe319b9b3237a9d3a1aaa332131db6ebc98e051d02d9593e1fd831a457465f2f01c7ff18f0f4819a6527c270fb83a6bcb0478e1bf2287dc1c43c61cf8bc241afd41841373e564d81f1c4ba420a6e71b068bfa6c501ed4d9e9d2be32e6821248821afb7a41a9f9152254ecb53c15cb2b453db2624ea39c534318f8d82227046c4c221d8c3c72ef47928ac3574f1df75d8b9e706df6d71208cb1f1e2b02daf03b5c40d0bcecaedd01961f1c529c76e87eda4361f14790ab7394804094ffe72a12f43514761fb8626b6efa2038d208beae9b3741e2f7f89199ce19521beaf5dfab65cd1a996d9235cb0c2d1a317270c3066a5a7e10c5c07a8517de954dad4dd801eda0c56201bdb05d5dd2ab9d5c714dd2c681ad80cde51ce503056defda8916a91b6223312fce40a2efdfa673af7153d26dd00d0f8f9548d864718e37ae77792f0b2f1ebd15536e2eab78611f5731947debe7f2865bbc12ed2df4c6b979a6eec953cafded230dcee32757228e93b7464ff4d70ce26dbe9f36f063584a5bfe611eac3aada03446a496d66353b62da3732b94e999699e123f04cdc52e7435e27f531422ecac8f975e050e04e402a8b044d5c138d43bdbc529c2bd33cc5d289dec0267a15834e25f8186fff7ff7b28b666d89a9c147e91ff11954bd7ad58dd0a9b64d8d62cef3b1338c793fefef81864607cefbf42345e1929170aecf64d76429d8b9c5d46d4f810b44177c9b49ce51084118516150c6eee2313f668ddaa7bc95ce7427cb7876282ad0e5dd8c7ea32272866dace65fbfd784964de578b9ad3a4cfad36202f0c4a9e21a20817764b4f8d51716c1a53da492be3eeec89308c0f282c9dda08d451b0bec08925414d7de85baf1a25b2df1e064841b8e3b1a54bbbbd28ac2a472ca53b3a562de916208b2e5fd4e266b4e5c871303538ded890008d06eadbb72eee48acd9202c8d842c6b8058f2869182b352f1eda0744a70a84206643f81f3a3ff88cd8fa20c673f2d556cb5bc4c2f8b73cab62f5e95b1c86115eafb0798ec26b69b9143556c28d601d3df060a7f66d7a04f86d643bcb5b2203461b45468c410d0d8ac00a4dfb7f152fe19b7249e96f7e95f26b3171de6dcbd60f948ecb02940eaf5ef7b0f174a4c9c6369aa5acddbdcfcfc7d10dbbd2bb2a37ec89f0a3678f0e13afbba2e93d6ca70262c61be1984cddba702a9b03249ba78f10e42238147473d46c4d83f6267e3b9465feb61709cceafd55b2e40d0f7f6dec8872bc3ce5d76bd95c74bca04acd83553fd8ef3de43c7ab75c563b92b4d1d4574ff1bfc30ca7cff34990827a415544dc3aee8dd4fa829fdc73a90e417be10d0f3b4940dfeaa1b63e342124a549632774e1f8b4ea8551ca0c5db23af8f9d4904b1c856fb1af5b1765e6e10c50f501cec51251ba43065460c4c28c62c6e78c608020d05908569069e923f62f4df9968c5e8cc85ddd1a24b3dfa5bcfda1c63f5c6a5586bcb442edb8477aed80beb98f41199bbbc651faf831da597952b2dcf9827fdcfd7ab73501c12d48290cfa42d54a9feeec882aac8be93cefb68b7df376f08ca181245c161f80f20aa18ed333545a7112d80bd13f7288117ba293e2dbdb89af2a360e95bf9104cf752a5ac0332af003fe04c415958454fd420ae8af4a590a891295c916a1b5ca1575fc907f8ab95b1f3475dbc483f4c4633817c248f7faf20f3e0179497d771d10f982113a7384c0da912e5208c14b3b1b747a43db2bc08f902eb9f0e8ac70ff416041e8f34b463ba0d63ae6e80d7835f3befc3e70f9df6c5384c82313ea953d482c3dc26e75f8e73f6b993b44f20b829b8a60cba964175088d874e762dee989a97e071d36bd3fe1054644594db29ad2dad2daf6cc9e98ce72f6b777246fbff8423431322d63dcc6746b32230ddab0e24699c3e729e81f630689f5683f987144eeba2c4767b615dd1027faa69ec220499be0218e66f0c7b9c97fc10716c60c47ddf165d4dee1157e93dd49568d1f930b9a86d02f7b30ef4fc98ae9fdcf2b0e38a40b6f10741c5ec1303c6d324e178b4d502328f6b4702413f29466fc7774d3a64a0770b39caec7e594e37a6a10f925e3a000c50da50d4fd56b24327b2ad8d9ba29aeb4cc5ada1cac2850c0cfbae38774dc3ba2e27f2ed71abc582da19b0bc57a6281241a548895f81ed1180a2b24499a02b67bea809269ac68b0830db4fb8be4bfcc87e278142f189c71069a48e3924e8ab44e32d2f8eef816b59591175c426a1ef5c7959bc219f895a80640512324fb81b8732cdcdde03ff0a6af260ea81f5379f7badac712b9822bd66f749473fd7b863f3cf1f04d8bb9607e471c0751eac01fc8c93bfeb3325c38b5dd370977fa1633fcc790722693b779ca576136e7423e4e5979bfd8e14905f9b5357af7693cc4d8c5a84bc0649ee9241f3814513b77f950bd3728d2377e543349a1a90d78e282350b331bf9de827780a6d63674993168a8d8a1fa759cda1e6c0afe0be05e14cda543f5256fae8c1dbedfddd1e5ad8662a35f9baaf8858965084f64d6fde2d48008c0d949c02a1e01bfac4ef2d96d83e45c4e72dc00ee043db90d055f918a1c0dae749391fd432e898f19629229829e56380ccdc208b49bcdb6606383ba481bc4be2187af221d0f8579148dd66dd68041bf92e3ff078df9c5df6164129ac31e21238169b163410e2a98163a1523cf3cc1cecfbd5ca7684ab88025e11abc05fe8c52ff68441efff3714e54d9e0417a1425c5f4f1d0e0dfa1868863ca918fec1f5a8e0b21f0ae749fb9295f85c25a4e289d8c0ac32d291f36e0e40ac6d92a227b2b70b8435fecb064531b84263e9dcbf45cdef9cb147f8a87608772e51b4ba2d86978e4102291c858de0eb0a51068d2dbf2ec3298bb593906f83b36484a9ef0019b156abc13a7cf84f7b36199315d609bbca4820beacbf30395b560809ea6acb24e546da890080cee950df19042d6f58acf0d3d0bfb51f02a8e54f76205ff34e670aa75199fc8b4e9765864d8f5601b51f3dc06c9734b322ac4bb5fc4f7059f3ed147169201209b4ea02cf358b5b0ddd6c9890096325e581a5db36dc3225a00ca4be484d58efbb25e3fe537e96e916f0cfca18b8b48bb3f6d9e9243bde52698027d1ae1c050722cca64043a77b68a9b0221b1e02a1eae592186e858d433861679045e6db6f68d3c3dac81a0eb4cf1033329bfb294e4d0d823857c6bc88770f11fb0d1004a410fcd26a5411acb8d85a0c3c188d3ed77c13f7c65c0afb93429da02d6b082c766a46165b91b8a44573e68cd4617bbe673528cd09399890af88c50ff54bfebdcfeda611a7cc5f48f0a67a66ffa9e812e8d9c110309ee9c8a11c9694317b00abb81279ec2f5dd5f52c0f17ea50b92253da2d62318daac6fce4f183302fd9241c03c4cf599d8689bc6afa7257b6e445ed97879daf3ea4fc54cbe441b39838a4449747d1f6c1b5263aa19e4288007888b795549a9f8f68def34ec583a3f88e1c0b9a9b5204a6caab1af4e0207024276fbc31a015e9c86d723dd032d5a57f4064823ab5afedc2e461fb8bbd78402ad3b9d9cbc01afb755b63aa9b0685fc540a24d669d3279fed66609863606aa934fbf0ff767a5586dcbf9e5c0f947ca68d524e36989ed2fd504ad21a3164b7a61bb64a2e761cd81d3524fa7c41915d1620cfa5f9c583be3e1ac14c0c82635c755b954e9ab017a25d35d275b8848051fa154620b5e9db540a9baf6f034b0eed0d88a423a41da50d769467bff952541dcddf5d8fa8b4b4f97fa4634fa1118712eae5c2fcbd9d6762ad60f909f087e4ecf7a97a1d2e885e720ca8bbecb04500277ce1ae37882d034de86b3e6870377889b4a82e83b29b05438b6b2a3811403da177744347832ed885bdb25cbe37ee9d092a2fd0df58e064fb42969ff20c39e915fb4af981a28d032350d3c23f8259a5d7bcdb7544f7d6a815bb0f822e720cf6087d0dc5627b4d2f01d074ef50d6fb5f17794bfd07c446bfa367376aa44b801eb7e30bc5107fccc546c16c52b06e76c4830ab2f8daba95bb0f3a6a5991c267608b0e37a66099e2dc6f24f75b9fb4b3edb65d3a4d9850d487323da40e7cf87da2a7580f0b7da764eb92b9a3053e7e55f0d6f45ceb0333e8050ea6f8b7954bc22075fd3b619a6b09594a0be2aa94da2fadc9cf8e86bd4845a1ae32e571d589e4f59b3edcbcb106dadd32c21c8c476261b7396c924317ed821f4b414d83777a4a5d0db38fdafe99febdf68376631289ba334bf7da9d19ad3999e41348da8bc95b294beaef0e933914029e5e7b8f1ec3086f0dd1f8303f8f6620bbd6c5bae29356e61a1b2b8c8f368547373f4205c3c989baa78e1191da15c4e48c5e98b0d94164613c248516aad49b172d6d0e7afe12d81358cfc0359719e12fb76f36c9629419071511329e0fb9fe544cbec302eb6b7f73fa226f7fd7c662cd248e8cc46a3bd640b9c9677a77947f545cc44a054192a1f7641729f9eed9e9188abe61fabc1e8aa984dfd779d6b0b93f78a089d8187634315aaffabb80e19a1cb8cd15d160937ec91c1097ebf119a296b07485eaf9885760f5deb6a389f534b2679322156b7444a4e8bb34f4ec203156495336766009bec24c2b26acae5894f30025b7a9d28d344f23e274e36071e125121471b585dd1bf65f6b19acec147a948be300cc42b07460aa6bb1a29233fd085fdfe7e15a442c1e6f1af8c9739c54709b70683649e7363fd6bd9d00bbe7846c948ea9eb072eee4fac07a2efee6d7392062ed28643e6c56ee1d021277f08921fe59ee61e07ccf28e5d87a7a6aa514a69196669e600fddc0d51f6da190ecb56a2387fc21b7cfddaa01d4f4f9b0cbb3bcba19fc57439af6aa4bfe9cd2da0fe1d6287f7558ba05adfcb2896e877daaa1635b56cff8e3071da2ce3adf307226a7e2c88f33cf485a9eecbf27b4e986ddfa67418534a8f116a93682f414dff1c458c4f1ba3f342a9228549bfd603078d07061da145dde4ee6021d76bd6df943e5c8ac0a761803387b46a5c166277d4e5a67698608a76045bf4cb81c821f9133a511efc687362d32435f0f2ea6e5cfb24dd2aa201439c7c5cf30782e1b5e16260679ef77bd7abd4b78f153ec69d2e054c520c5d5031809c781e1799705561ace4679dcf33875c334f8a62a9ea0a379c83dbc0dd8359174403e241f19d70abf4b7381810f63462d779543b8f37cb344bd28906139395e8d568fc3cc2a12c77caca902c0aa0a07fd001042182f0332b3812d65ceaeddcbe3f41cade320cc6f15fc245895c5c4f820372155a785f488e54f8d0453d1094448d61540bb8c629a5295156095b036b61c9d6f631fb65293ec16e46d69eec2b025b82b4daecaaef5999df5c99849f07537e31d2bf1d4b70dda3b7cd651b04a0c334e3f05b02dbf6a6868f26d6e9ce254c583653932a3979e58c023582e976cd8185e491d10a2a57d6504204ea005dd8599f3427fde2b688c9d06f6de7e9425b8976b21e9de305f600807b0c1b09148277741a9ab9b96618d03a1d457af7dd7bcfb1afbe8f0b4716a660c8e3b8b33332ddf043857b594fa841dcecd3ebb3bbc9ca310c541ed583bb94ee37875d405b12bd0453546cf0601569238791ec898521e3ef8de5b4156aef5502fddfba88b723561b5b791e024ce65dd3a888b6abaf6550e579a73a5f88537d6966ad4232f9606412c863239ca8f0a070df60b32739dde141fe1f14ac9fd402e0d1702fb1796712c54803fa4f979b12afd0b63bd8bef38813c0c651e0503f058d2a8120249337ba7b09e89e736d3b2ef11b87c117def03a1ce67709a756426e5afe91fec17929e4e8905a058a6131ee3ae14e58f63a81d6f18d54cf9aaf0028eda6c065ebbfe0eeea5b2b8d88da14046847a58c9f4825f82c9af4893184726824c867e0aba615b6344529319e11c3396a42d97705dd31555af7e05b40637c033025ae1d82230f3edda382c482c9adb4a91e42d7196dd463b48fbeecc895a6fe6429bd12002716aa10424645adebe512c6314cecef677cfe6c62d8c2d106d7ed5ce92dcd55e80a3c766f32d1b9b288a63cfdf868cb632914a464646252839312772694e9220bcd748f033830f82a25bc2f3b0e9d86fd78bf37b7beab07f55d0b425b0a9b9842b08de52828a3cbfca156f44c14e9fb6a45be40145b1d76c48ec1bef40d022d7875e76ea550376b8ab110f1d4c460a005ab40b49484cbc985aeb9e6418bbb67bf4b4c39e882ba91084de9662c2ded559e4ecd5126bacd38afe57c017f70ae984432ad57481ef9940e79950051c9bcf9691703fb67a984538563a820783eb0b51a95ebedca9893132a8268935ace2cc95380f02b8c155874007c2182b4a48b18cd622b998bb371e181f37d16b4ee5437ecd419c865900b549a14b9b3ae03399b324458ddd96a1e9d163c8532e19f64e778ac71d8c8ac6072396c5a5d9337704fd15ffb8407cb22b914033fe85382d04f9899b2d13091e03cc4cab0bf1e84f6767c0e29ecaea8f5e9d0cf07be7fc715eb8b1e44754edaa0ccc51c5cebbd29d1349658869b160eaa225f56af4f30f0e\""; diff --git a/primitive-types/impls/serde/src/lib.rs b/primitive-types/impls/serde/src/lib.rs index b03a87bde..50587a91d 100644 --- a/primitive-types/impls/serde/src/lib.rs +++ b/primitive-types/impls/serde/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2019 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license @@ -8,8 +8,16 @@ //! Serde serialization support for uint and fixed hash. +#![no_std] + +#[macro_use] +extern crate alloc; + +#[cfg(feature = "std")] +extern crate std; + #[doc(hidden)] -pub extern crate serde; +pub use serde; #[doc(hidden)] pub mod serialize; @@ -19,25 +27,30 @@ pub mod serialize; macro_rules! impl_uint_serde { ($name: ident, $len: expr) => { impl $crate::serde::Serialize for $name { - fn serialize(&self, serializer: S) -> Result where S: $crate::serde::Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: $crate::serde::Serializer, + { let mut slice = [0u8; 2 + 2 * $len * 8]; - let mut bytes = [0u8; $len * 8]; - self.to_big_endian(&mut bytes); + let bytes = self.to_big_endian(); $crate::serialize::serialize_uint(&mut slice, &bytes, serializer) } } impl<'de> $crate::serde::Deserialize<'de> for $name { - fn deserialize(deserializer: D) -> Result where D: $crate::serde::Deserializer<'de> { + fn deserialize(deserializer: D) -> Result + where + D: $crate::serde::Deserializer<'de>, + { let mut bytes = [0u8; $len * 8]; let wrote = $crate::serialize::deserialize_check_len( deserializer, - $crate::serialize::ExpectedLen::Between(0, &mut bytes) + $crate::serialize::ExpectedLen::Between(0, &mut bytes), )?; - Ok(bytes[0..wrote].into()) + Ok(Self::from_big_endian(&bytes[0..wrote])) } } - } + }; } /// Add Serde serialization support to a fixed-sized hash type created by `construct_fixed_hash!`. @@ -45,21 +58,27 @@ macro_rules! impl_uint_serde { macro_rules! impl_fixed_hash_serde { ($name: ident, $len: expr) => { impl $crate::serde::Serialize for $name { - fn serialize(&self, serializer: S) -> Result where S: $crate::serde::Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: $crate::serde::Serializer, + { let mut slice = [0u8; 2 + 2 * $len]; $crate::serialize::serialize_raw(&mut slice, &self.0, serializer) } } impl<'de> $crate::serde::Deserialize<'de> for $name { - fn deserialize(deserializer: D) -> Result where D: $crate::serde::Deserializer<'de> { + fn deserialize(deserializer: D) -> Result + where + D: $crate::serde::Deserializer<'de>, + { let mut bytes = [0u8; $len]; $crate::serialize::deserialize_check_len( deserializer, - $crate::serialize::ExpectedLen::Exact(&mut bytes) + $crate::serialize::ExpectedLen::Exact(&mut bytes), )?; Ok($name(bytes)) } } - } + }; } diff --git a/primitive-types/impls/serde/src/serialize.rs b/primitive-types/impls/serde/src/serialize.rs index da4fe491f..3017170e6 100644 --- a/primitive-types/impls/serde/src/serialize.rs +++ b/primitive-types/impls/serde/src/serialize.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2019 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license @@ -6,12 +6,38 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::fmt; -use serde::{de, Serializer, Deserializer}; +use alloc::{string::String, vec::Vec}; +use core::{fmt, result::Result}; +use serde::{de, Deserializer, Serializer}; -static CHARS: &'static[u8] = b"0123456789abcdef"; +static CHARS: &[u8] = b"0123456789abcdef"; -fn to_hex<'a>(v: &'a mut [u8], bytes: &[u8], skip_leading_zero: bool) -> &'a str { +/// Serialize given bytes to a 0x-prefixed hex string. +/// +/// If `skip_leading_zero` initial 0s will not be printed out, +/// unless the byte string is empty, in which case `0x0` will be returned. +/// The results are consistent with `serialize_uint` output if the flag is +/// on and `serialize_raw` if the flag is off. +pub fn to_hex(bytes: &[u8], skip_leading_zero: bool) -> String { + let bytes = if skip_leading_zero { + let non_zero = bytes.iter().take_while(|b| **b == 0).count(); + let bytes = &bytes[non_zero..]; + if bytes.is_empty() { + return "0x0".into() + } else { + bytes + } + } else if bytes.is_empty() { + return "0x".into() + } else { + bytes + }; + + let mut slice = vec![0u8; (bytes.len() + 1) * 2]; + to_hex_raw(&mut slice, bytes, skip_leading_zero).into() +} + +fn to_hex_raw<'a>(v: &'a mut [u8], bytes: &[u8], skip_leading_zero: bool) -> &'a str { assert!(v.len() > 1 + bytes.len() * 2); v[0] = b'0'; @@ -33,37 +59,120 @@ fn to_hex<'a>(v: &'a mut [u8], bytes: &[u8], skip_leading_zero: bool) -> &'a str } // SAFETY: all characters come either from CHARS or "0x", therefore valid UTF8 - unsafe { std::str::from_utf8_unchecked(&v[0..idx]) } + unsafe { core::str::from_utf8_unchecked(&v[0..idx]) } +} + +/// Decoding bytes from hex string error. +#[derive(Debug, PartialEq, Eq)] +pub enum FromHexError { + /// The `0x` prefix is missing. + #[deprecated(since = "0.3.2", note = "We support non 0x-prefixed hex strings")] + MissingPrefix, + /// Invalid (non-hex) character encountered. + InvalidHex { + /// The unexpected character. + character: char, + /// Index of that occurrence. + index: usize, + }, +} + +#[cfg(feature = "std")] +impl std::error::Error for FromHexError {} + +impl fmt::Display for FromHexError { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + match *self { + #[allow(deprecated)] + Self::MissingPrefix => write!(fmt, "0x prefix is missing"), + Self::InvalidHex { character, index } => write!(fmt, "invalid hex character: {}, at {}", character, index), + } + } +} + +/// Decode given (both 0x-prefixed or not) hex string into a vector of bytes. +/// +/// Returns an error if non-hex characters are present. +pub fn from_hex(v: &str) -> Result, FromHexError> { + let (v, stripped) = v.strip_prefix("0x").map_or((v, false), |v| (v, true)); + + let mut bytes = vec![0u8; (v.len() + 1) / 2]; + from_hex_raw(v, &mut bytes, stripped)?; + Ok(bytes) +} + +/// Decode given 0x-prefix-stripped hex string into provided slice. +/// Used internally by `from_hex` and `deserialize_check_len`. +/// +/// The method will panic if `bytes` have incorrect length (make sure to allocate enough beforehand). +fn from_hex_raw(v: &str, bytes: &mut [u8], stripped: bool) -> Result { + let bytes_len = v.len(); + let mut modulus = bytes_len % 2; + let mut buf = 0; + let mut pos = 0; + for (index, byte) in v.bytes().enumerate() { + buf <<= 4; + + match byte { + b'A'..=b'F' => buf |= byte - b'A' + 10, + b'a'..=b'f' => buf |= byte - b'a' + 10, + b'0'..=b'9' => buf |= byte - b'0', + b' ' | b'\r' | b'\n' | b'\t' => { + buf >>= 4; + continue + }, + b => { + let character = char::from(b); + return Err(FromHexError::InvalidHex { character, index: index + if stripped { 2 } else { 0 } }) + }, + } + + modulus += 1; + if modulus == 2 { + modulus = 0; + bytes[pos] = buf; + pos += 1; + } + } + + Ok(pos) } /// Serializes a slice of bytes. -pub fn serialize_raw(slice: &mut [u8], bytes: &[u8], serializer: S) -> Result where +pub fn serialize_raw(slice: &mut [u8], bytes: &[u8], serializer: S) -> Result +where S: Serializer, { - serializer.serialize_str(to_hex(slice, bytes, false)) + if bytes.is_empty() { + serializer.serialize_str("0x") + } else { + serializer.serialize_str(to_hex_raw(slice, bytes, false)) + } } /// Serializes a slice of bytes. -pub fn serialize(bytes: &[u8], serializer: S) -> Result where +pub fn serialize(bytes: &[u8], serializer: S) -> Result +where S: Serializer, { let mut slice = vec![0u8; (bytes.len() + 1) * 2]; - serializer.serialize_str(to_hex(&mut *slice, bytes, false)) + serialize_raw(&mut slice, bytes, serializer) } /// Serialize a slice of bytes as uint. /// /// The representation will have all leading zeros trimmed. -pub fn serialize_uint(slice: &mut [u8], bytes: &[u8], serializer: S) -> Result where +pub fn serialize_uint(slice: &mut [u8], bytes: &[u8], serializer: S) -> Result +where S: Serializer, { let non_zero = bytes.iter().take_while(|b| **b == 0).count(); let bytes = &bytes[non_zero..]; if bytes.is_empty() { - return serializer.serialize_str("0x0"); + serializer.serialize_str("0x0") + } else { + serializer.serialize_str(to_hex_raw(slice, bytes, true)) } - - serializer.serialize_str(to_hex(slice, bytes, true)) } /// Expected length of bytes vector. @@ -78,15 +187,16 @@ pub enum ExpectedLen<'a> { impl<'a> fmt::Display for ExpectedLen<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { match *self { - ExpectedLen::Exact(ref v) => write!(fmt, "length of {}", v.len() * 2), - ExpectedLen::Between(min, ref v) => write!(fmt, "length between ({}; {}]", min * 2, v.len() * 2), + ExpectedLen::Exact(ref v) => write!(fmt, "{} bytes", v.len()), + ExpectedLen::Between(min, ref v) => write!(fmt, "between ({}; {}] bytes", min, v.len()), } } } /// Deserialize into vector of bytes. This will allocate an O(n) intermediate /// string. -pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> where +pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> +where D: Deserializer<'de>, { struct Visitor; @@ -95,49 +205,35 @@ pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> where type Value = Vec; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a 0x-prefixed hex string") + write!(formatter, "a (both 0x-prefixed or not) hex string or byte array") } fn visit_str(self, v: &str) -> Result { - if !v.starts_with("0x") { - return Err(E::custom("prefix is missing")) - } + from_hex(v).map_err(E::custom) + } - let bytes_len = v.len() - 2; - let mut modulus = bytes_len % 2; - let mut bytes = vec![0u8; bytes_len / 2]; - let mut buf = 0; - let mut pos = 0; - for (idx, byte) in v.bytes().enumerate().skip(2) { - buf <<= 4; - - match byte { - b'A'..=b'F' => buf |= byte - b'A' + 10, - b'a'..=b'f' => buf |= byte - b'a' + 10, - b'0'..=b'9' => buf |= byte - b'0', - b' '|b'\r'|b'\n'|b'\t' => { - buf >>= 4; - continue - } - b => { - let ch = char::from(b); - return Err(E::custom(&format!("invalid hex character: {}, at {}", ch, idx))) - } - } - - modulus += 1; - if modulus == 2 { - modulus = 0; - bytes[pos] = buf; - pos += 1; - } - } + fn visit_string(self, v: String) -> Result { + self.visit_str(&v) + } + + fn visit_bytes(self, v: &[u8]) -> Result { + Ok(v.to_vec()) + } + + fn visit_byte_buf(self, v: Vec) -> Result { + Ok(v) + } + fn visit_seq>(self, mut seq: A) -> Result { + let mut bytes = vec![]; + while let Some(n) = seq.next_element::()? { + bytes.push(n); + } Ok(bytes) } - fn visit_string(self, v: String) -> Result { - self.visit_str(&v) + fn visit_newtype_struct>(self, deserializer: D) -> Result { + deserializer.deserialize_bytes(self) } } @@ -146,7 +242,8 @@ pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> where /// Deserialize into vector of bytes with additional size check. /// Returns number of bytes written. -pub fn deserialize_check_len<'a, 'de, D>(deserializer: D, len: ExpectedLen<'a>) -> Result where +pub fn deserialize_check_len<'a, 'de, D>(deserializer: D, len: ExpectedLen<'a>) -> Result +where D: Deserializer<'de>, { struct Visitor<'a> { @@ -157,21 +254,20 @@ pub fn deserialize_check_len<'a, 'de, D>(deserializer: D, len: ExpectedLen<'a>) type Value = usize; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a 0x-prefixed hex string with {}", self.len) + write!(formatter, "a (both 0x-prefixed or not) hex string or byte array containing {}", self.len) } fn visit_str(self, v: &str) -> Result { - if !v.starts_with("0x") { - return Err(E::custom("prefix is missing")) - } + let (v, stripped) = v.strip_prefix("0x").map_or((v, false), |v| (v, true)); + let len = v.len(); let is_len_valid = match self.len { - ExpectedLen::Exact(ref slice) => v.len() == 2 * slice.len() + 2, - ExpectedLen::Between(min, ref slice) => v.len() <= 2 * slice.len() + 2 && v.len() > 2 * min + 2, + ExpectedLen::Exact(ref slice) => len == 2 * slice.len(), + ExpectedLen::Between(min, ref slice) => len <= 2 * slice.len() && len > 2 * min, }; if !is_len_valid { - return Err(E::invalid_length(v.len() - 2, &self)) + return Err(E::invalid_length(v.len(), &self)) } let bytes = match self.len { @@ -179,41 +275,230 @@ pub fn deserialize_check_len<'a, 'de, D>(deserializer: D, len: ExpectedLen<'a>) ExpectedLen::Between(_, slice) => slice, }; - let mut modulus = v.len() % 2; - let mut buf = 0; - let mut pos = 0; - for (idx, byte) in v.bytes().enumerate().skip(2) { - buf <<= 4; - - match byte { - b'A'..=b'F' => buf |= byte - b'A' + 10, - b'a'..=b'f' => buf |= byte - b'a' + 10, - b'0'..=b'9' => buf |= byte - b'0', - b' '|b'\r'|b'\n'|b'\t' => { - buf >>= 4; - continue - } - b => { - let ch = char::from(b); - return Err(E::custom(&format!("invalid hex character: {}, at {}", ch, idx))) - } - } - - modulus += 1; - if modulus == 2 { - modulus = 0; - bytes[pos] = buf; - pos += 1; - } - } - - Ok(pos) + from_hex_raw(v, bytes, stripped).map_err(E::custom) } fn visit_string(self, v: String) -> Result { self.visit_str(&v) } + + fn visit_bytes(self, v: &[u8]) -> Result { + let len = v.len(); + let is_len_valid = match self.len { + ExpectedLen::Exact(ref slice) => len == slice.len(), + ExpectedLen::Between(min, ref slice) => len <= slice.len() && len > min, + }; + + if !is_len_valid { + return Err(E::invalid_length(v.len(), &self)) + } + + let bytes = match self.len { + ExpectedLen::Exact(slice) => slice, + ExpectedLen::Between(_, slice) => slice, + }; + + bytes[..len].copy_from_slice(v); + Ok(len) + } + + fn visit_byte_buf(self, v: Vec) -> Result { + self.visit_bytes(&v) + } + + fn visit_seq>(self, mut seq: A) -> Result { + let mut v = vec![]; + while let Some(n) = seq.next_element::()? { + v.push(n); + } + self.visit_byte_buf(v) + } + + fn visit_newtype_struct>(self, deserializer: D) -> Result { + deserializer.deserialize_bytes(self) + } } deserializer.deserialize_str(Visitor { len }) } + +#[cfg(test)] +mod tests { + use super::*; + use serde_derive::{Deserialize, Serialize}; + + #[derive(Serialize, Deserialize)] + struct Bytes(#[serde(with = "super")] Vec); + + #[test] + fn should_not_fail_on_short_string_with_prefix() { + let a: Bytes = serde_json::from_str("\"0x\"").unwrap(); + let b: Bytes = serde_json::from_str("\"0x1\"").unwrap(); + let c: Bytes = serde_json::from_str("\"0x12\"").unwrap(); + let d: Bytes = serde_json::from_str("\"0x123\"").unwrap(); + let e: Bytes = serde_json::from_str("\"0x1234\"").unwrap(); + let f: Bytes = serde_json::from_str("\"0x12345\"").unwrap(); + + assert!(a.0.is_empty()); + assert_eq!(b.0, vec![1]); + assert_eq!(c.0, vec![0x12]); + assert_eq!(d.0, vec![0x1, 0x23]); + assert_eq!(e.0, vec![0x12, 0x34]); + assert_eq!(f.0, vec![0x1, 0x23, 0x45]); + } + + #[test] + fn should_not_fail_on_other_strings_with_prefix() { + let a: Bytes = + serde_json::from_str("\"0x7f864e18e3dd8b58386310d2fe0919eef27c6e558564b7f67f22d99d20f587\"").unwrap(); + let b: Bytes = + serde_json::from_str("\"0x7f864e18e3dd8b58386310d2fe0919eef27c6e558564b7f67f22d99d20f587b\"").unwrap(); + let c: Bytes = + serde_json::from_str("\"0x7f864e18e3dd8b58386310d2fe0919eef27c6e558564b7f67f22d99d20f587b4\"").unwrap(); + + assert_eq!(a.0.len(), 31); + assert_eq!(b.0.len(), 32); + assert_eq!(c.0.len(), 32); + } + + #[test] + fn should_not_fail_on_short_string_without_prefix() { + let a: Bytes = serde_json::from_str("\"\"").unwrap(); + let b: Bytes = serde_json::from_str("\"1\"").unwrap(); + let c: Bytes = serde_json::from_str("\"12\"").unwrap(); + let d: Bytes = serde_json::from_str("\"123\"").unwrap(); + let e: Bytes = serde_json::from_str("\"1234\"").unwrap(); + let f: Bytes = serde_json::from_str("\"12345\"").unwrap(); + + assert!(a.0.is_empty()); + assert_eq!(b.0, vec![1]); + assert_eq!(c.0, vec![0x12]); + assert_eq!(d.0, vec![0x1, 0x23]); + assert_eq!(e.0, vec![0x12, 0x34]); + assert_eq!(f.0, vec![0x1, 0x23, 0x45]); + } + + #[test] + fn should_not_fail_on_other_strings_without_prefix() { + let a: Bytes = + serde_json::from_str("\"7f864e18e3dd8b58386310d2fe0919eef27c6e558564b7f67f22d99d20f587\"").unwrap(); + let b: Bytes = + serde_json::from_str("\"7f864e18e3dd8b58386310d2fe0919eef27c6e558564b7f67f22d99d20f587b\"").unwrap(); + let c: Bytes = + serde_json::from_str("\"7f864e18e3dd8b58386310d2fe0919eef27c6e558564b7f67f22d99d20f587b4\"").unwrap(); + + assert_eq!(a.0.len(), 31); + assert_eq!(b.0.len(), 32); + assert_eq!(c.0.len(), 32); + } + + #[test] + fn should_serialize_and_deserialize_empty_bytes() { + let bytes = Bytes(Vec::new()); + + let data = serde_json::to_string(&bytes).unwrap(); + + assert_eq!("\"0x\"", &data); + + let deserialized: Bytes = serde_json::from_str(&data).unwrap(); + assert!(deserialized.0.is_empty()) + } + + #[test] + fn should_encode_to_and_from_hex_with_prefix() { + assert_eq!(to_hex(&[0, 1, 2], true), "0x102"); + assert_eq!(to_hex(&[0, 1, 2], false), "0x000102"); + assert_eq!(to_hex(&[0], true), "0x0"); + assert_eq!(to_hex(&[], true), "0x0"); + assert_eq!(to_hex(&[], false), "0x"); + assert_eq!(to_hex(&[0], false), "0x00"); + assert_eq!(from_hex("0x0102"), Ok(vec![1, 2])); + assert_eq!(from_hex("0x102"), Ok(vec![1, 2])); + assert_eq!(from_hex("0xf"), Ok(vec![0xf])); + } + + #[test] + fn should_decode_hex_without_prefix() { + assert_eq!(from_hex("0102"), Ok(vec![1, 2])); + assert_eq!(from_hex("102"), Ok(vec![1, 2])); + assert_eq!(from_hex("f"), Ok(vec![0xf])); + } + + #[test] + fn should_deserialize_from_owned_bytes() { + type BytesDeserializer<'a> = serde::de::value::BytesDeserializer<'a, serde::de::value::Error>; + + // using `deserialize` to decode owned bytes. + let des = BytesDeserializer::new(&[1, 2, 3, 4, 5]); + let deserialized: Vec = deserialize(des).unwrap(); + assert_eq!(deserialized, vec![1, 2, 3, 4, 5]); + + // using `deserialize` to decode owned bytes into buffer with fixed length. + let des = BytesDeserializer::new(&[1, 2, 3, 4, 5]); + let mut output = vec![0, 0, 0, 0, 0]; + let expected_len = ExpectedLen::Exact(&mut *output); + let n = deserialize_check_len(des, expected_len).unwrap(); + assert_eq!(n, 5); + assert_eq!(output, vec![1, 2, 3, 4, 5]); + + // using `deserialize` to decode owned bytes into buffer with min/max length. + let des = BytesDeserializer::new(&[1, 2, 3]); + let mut output = vec![0, 0, 0, 0, 0]; + let expected_len = ExpectedLen::Between(2, &mut *output); + let n = deserialize_check_len(des, expected_len).unwrap(); + assert_eq!(n, 3); + assert_eq!(output, vec![1, 2, 3, 0, 0]); + } + + #[test] + fn should_deserialize_from_borrowed_bytes() { + type BytesDeserializer<'a> = serde::de::value::BorrowedBytesDeserializer<'a, serde::de::value::Error>; + + // using `deserialize` to decode borrowed bytes. + let des = BytesDeserializer::new(&[1, 2, 3, 4, 5]); + let deserialized: Vec = deserialize(des).unwrap(); + assert_eq!(deserialized, vec![1, 2, 3, 4, 5]); + + // using `deserialize` to decode borrowed bytes into buffer with fixed length. + let des = BytesDeserializer::new(&[1, 2, 3, 4, 5]); + let mut output = vec![0, 0, 0, 0, 0]; + let expected_len = ExpectedLen::Exact(&mut *output); + let n = deserialize_check_len(des, expected_len).unwrap(); + assert_eq!(n, 5); + assert_eq!(output, vec![1, 2, 3, 4, 5]); + + // using `deserialize` to decode borrowed bytes into buffer with min/max length. + let des = BytesDeserializer::new(&[1, 2, 3]); + let mut output = vec![0, 0, 0, 0, 0]; + let expected_len = ExpectedLen::Between(2, &mut *output); + let n = deserialize_check_len(des, expected_len).unwrap(); + assert_eq!(n, 3); + assert_eq!(output, vec![1, 2, 3, 0, 0]); + } + + #[test] + fn should_deserialize_from_u8_sequence() { + use serde::de::value::SeqDeserializer; + + // using `deserialize` to decode a sequence of bytes. + let des = SeqDeserializer::<_, serde::de::value::Error>::new([1u8, 2, 3, 4, 5].into_iter()); + let deserialized: Vec = deserialize(des).unwrap(); + assert_eq!(deserialized, vec![1, 2, 3, 4, 5]); + + // using `deserialize` to decode a sequence of bytes into a buffer with fixed length. + let des = SeqDeserializer::<_, serde::de::value::Error>::new([1u8, 2, 3, 4, 5].into_iter()); + let mut output = vec![0, 0, 0, 0, 0]; + let expected_len = ExpectedLen::Exact(&mut *output); + let n = deserialize_check_len(des, expected_len).unwrap(); + assert_eq!(n, 5); + assert_eq!(output, vec![1, 2, 3, 4, 5]); + + // using `deserialize` to decode a sequence of bytes into a buffer with min/max length. + let des = SeqDeserializer::<_, serde::de::value::Error>::new([1u8, 2, 3].into_iter()); + let mut output = vec![0, 0, 0, 0, 0]; + let expected_len = ExpectedLen::Between(2, &mut *output); + let n = deserialize_check_len(des, expected_len).unwrap(); + assert_eq!(n, 3); + assert_eq!(output, vec![1, 2, 3, 0, 0]); + } +} diff --git a/primitive-types/src/fp_conversion.rs b/primitive-types/src/fp_conversion.rs new file mode 100644 index 000000000..24f29905d --- /dev/null +++ b/primitive-types/src/fp_conversion.rs @@ -0,0 +1,66 @@ +use super::U256; + +impl U256 { + /// Lossy saturating conversion from a `f64` to a `U256`. Like for floating point to + /// primitive integer type conversions, this truncates fractional parts. + /// + /// The conversion follows the same rules as converting `f64` to other + /// primitive integer types. Namely, the conversion of `value: f64` behaves as + /// follows: + /// - `NaN` => `0` + /// - `(-∞, 0]` => `0` + /// - `(0, u256::MAX]` => `value as u256` + /// - `(u256::MAX, +∞)` => `u256::MAX` + pub fn from_f64_lossy(value: f64) -> U256 { + if value >= 1.0 { + let bits = value.to_bits(); + // NOTE: Don't consider the sign or check that the subtraction will + // underflow since we already checked that the value is greater + // than 1.0. + let exponent = ((bits >> 52) & 0x7ff) - 1023; + let mantissa = (bits & 0x0f_ffff_ffff_ffff) | 0x10_0000_0000_0000; + if exponent <= 52 { + U256::from(mantissa >> (52 - exponent)) + } else if exponent >= 256 { + U256::MAX + } else { + U256::from(mantissa) << U256::from(exponent - 52) + } + } else { + 0.into() + } + } + + /// Lossy conversion of `U256` to `f64`. + pub fn to_f64_lossy(self) -> f64 { + // Reference: https://blog.m-ou.se/floats/ + // Step 1: Get leading zeroes + let leading_zeroes = self.leading_zeros(); + // Step 2: Get msb to be farthest left bit + let left_aligned = self << leading_zeroes; + // Step 3: Shift msb to fit in lower 53 bits of the first u64 (64-53=11) + let quarter_aligned = left_aligned >> 11; + let mantissa = quarter_aligned.0[3]; + // Step 4: For the dropped bits (all bits beyond the 53 most significant + // We want to know only 2 things. If the msb of the dropped bits is 1 or 0, + // and if any of the other bits are 1. (See blog for explanation) + // So we take care to preserve the msb bit, while jumbling the rest of the bits + // together so that any 1s will survive. If all 0s, then the result will also be 0. + let dropped_bits = quarter_aligned.0[1] | quarter_aligned.0[0] | (left_aligned.0[0] & 0xFFFF_FFFF); + let dropped_bits = (dropped_bits & 0x7FFF_FFFF_FFFF_FFFF) | (dropped_bits >> 63); + let dropped_bits = quarter_aligned.0[2] | dropped_bits; + // Step 5: dropped_bits contains the msb of the original bits and an OR-mixed 63 bits. + // If msb of dropped bits is 0, it is mantissa + 0 + // If msb of dropped bits is 1, it is mantissa + 0 only if mantissa lowest bit is 0 + // and other bits of the dropped bits are all 0 (which both can be tested with the below all at once) + let mantissa = mantissa + ((dropped_bits - (dropped_bits >> 63 & !mantissa)) >> 63); + // Step 6: Calculate the exponent + // If self is 0, exponent should be 0 (special meaning) and mantissa will end up 0 too + // Otherwise, (255 - n) + 1022 so it simplifies to 1277 - n + // 1023 and 1022 are the cutoffs for the exponent having the msb next to the decimal point + let exponent = if self.is_zero() { 0 } else { 1277 - leading_zeroes as u64 }; + // Step 7: sign bit is always 0, exponent is shifted into place + // Use addition instead of bitwise OR to saturate the exponent if mantissa overflows + f64::from_bits((exponent << 52) + mantissa) + } +} diff --git a/primitive-types/src/json_schema.rs b/primitive-types/src/json_schema.rs new file mode 100644 index 000000000..948bf86c3 --- /dev/null +++ b/primitive-types/src/json_schema.rs @@ -0,0 +1,75 @@ +use super::*; +#[cfg(not(feature = "std"))] +use alloc::{ + borrow::ToOwned, + string::{String, ToString}, +}; + +use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; + +impl JsonSchema for H160 { + fn schema_name() -> String { + "HexEncoded20Bytes".to_owned() + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let mut schema = gen.subschema_for::().into_object(); + schema.metadata().description = Some("Hex encoded 20 bytes".to_string()); + schema.string().pattern = Some("^0(x|X)[a-fA-F0-9]{40}$".to_string()); + schema.into() + } +} + +impl JsonSchema for U256 { + fn schema_name() -> String { + "U256String".to_string() + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let mut schema = gen.subschema_for::().into_object(); + schema.metadata().description = Some("256-bit Unsigned Integer".to_string()); + schema.string().pattern = Some("^(0|[1-9][0-9]{0,77})$".to_string()); + schema.into() + } +} + +#[cfg(test)] +#[cfg(any(feature = "serde", feature = "serde_no_std"))] +mod tests { + use crate::{H160, U256}; + #[cfg(not(feature = "std"))] + use alloc::string::String; + use jsonschema::Draft; + use schemars::JsonSchema; + + #[test] + fn hex_encoded_20_bytes() { + let schema = H160::json_schema(&mut schemars::gen::SchemaGenerator::default()); + let schema_json = serde_json::to_value(&schema).unwrap(); + let schema = jsonschema::JSONSchema::options() + .with_draft(Draft::Draft7) + .compile(&schema_json) + .unwrap(); + let value = serde_json::to_value("0x55086adeca661185c437d92b9818e6eda6d0d047").unwrap(); + assert!(schema.validate(&value).is_ok()); + let value = serde_json::to_value("0X0E9C8DA9FD4BDD3281879D9E328D8D74D02558CC").unwrap(); + assert!(schema.validate(&value).is_ok()); + + let value = serde_json::to_value("42").unwrap(); + assert!(schema.validate(&value).is_err()); + } + + #[test] + fn u256() { + let schema = U256::json_schema(&mut schemars::gen::SchemaGenerator::default()); + let schema_json = serde_json::to_value(&schema).unwrap(); + let schema = jsonschema::JSONSchema::options() + .with_draft(Draft::Draft7) + .compile(&schema_json) + .unwrap(); + let addr = serde_json::to_value("42").unwrap(); + assert!(schema.validate(&addr).is_ok()); + let addr = serde_json::to_value(['1'; 79].into_iter().collect::()).unwrap(); + assert!(schema.validate(&addr).is_err()); + } +} diff --git a/primitive-types/src/lib.rs b/primitive-types/src/lib.rs index ff7eb8210..41b740298 100644 --- a/primitive-types/src/lib.rs +++ b/primitive-types/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2018 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license @@ -14,28 +14,20 @@ #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(feature = "std")] -extern crate core; +// serde_no_std leads to alloc via impl, json-schema without std requires alloc +#[cfg(all(not(feature = "std"), any(feature = "serde_no_std", feature = "json-schema")))] +extern crate alloc; -#[macro_use] -extern crate uint; - -#[macro_use] -extern crate fixed_hash; - -#[cfg(feature = "impl-serde")] -#[macro_use] -extern crate impl_serde; - -#[cfg(feature = "impl-codec")] -#[macro_use] -extern crate impl_codec; - -#[cfg(feature = "impl-rlp")] -#[macro_use] -extern crate impl_rlp; +#[cfg(feature = "fp-conversion")] +mod fp_conversion; +#[cfg(feature = "json-schema")] +mod json_schema; use core::convert::TryFrom; +use fixed_hash::{construct_fixed_hash, impl_fixed_hash_conversions}; +#[cfg(feature = "scale-info")] +use scale_info_crate::TypeInfo; +use uint::{construct_uint, uint_full_mul_reg}; /// Error type for conversion. #[derive(Debug, PartialEq, Eq)] @@ -46,74 +38,127 @@ pub enum Error { construct_uint! { /// 128-bit unsigned integer. + #[cfg_attr(feature = "scale-info", derive(TypeInfo))] pub struct U128(2); } construct_uint! { /// 256-bit unsigned integer. + #[cfg_attr(feature = "scale-info", derive(TypeInfo))] pub struct U256(4); } construct_uint! { /// 512-bits unsigned integer. + #[cfg_attr(feature = "scale-info", derive(TypeInfo))] pub struct U512(8); } +construct_fixed_hash! { + /// Fixed-size uninterpreted hash type with 16 bytes (128 bits) size. + #[cfg_attr(feature = "scale-info", derive(TypeInfo))] + pub struct H128(16); +} + construct_fixed_hash! { /// Fixed-size uninterpreted hash type with 20 bytes (160 bits) size. + #[cfg_attr(feature = "scale-info", derive(TypeInfo))] pub struct H160(20); } construct_fixed_hash! { /// Fixed-size uninterpreted hash type with 32 bytes (256 bits) size. + #[cfg_attr(feature = "scale-info", derive(TypeInfo))] pub struct H256(32); } +construct_fixed_hash! { + /// Fixed-size uninterpreted hash type with 48 bytes (384 bits) size. + #[cfg_attr(feature = "scale-info", derive(TypeInfo))] + pub struct H384(48); +} construct_fixed_hash! { /// Fixed-size uninterpreted hash type with 64 bytes (512 bits) size. + #[cfg_attr(feature = "scale-info", derive(TypeInfo))] pub struct H512(64); } +construct_fixed_hash! { + /// Fixed-size uninterpreted hash type with 96 bytes (768 bits) size. + #[cfg_attr(feature = "scale-info", derive(TypeInfo))] + pub struct H768(96); +} + +#[cfg(feature = "num-traits")] +mod num_traits { + use super::*; + use impl_num_traits::impl_uint_num_traits; + + impl_uint_num_traits!(U128, 2); + impl_uint_num_traits!(U256, 4); + impl_uint_num_traits!(U512, 8); +} #[cfg(feature = "impl-serde")] mod serde { use super::*; + use impl_serde::{impl_fixed_hash_serde, impl_uint_serde}; impl_uint_serde!(U128, 2); impl_uint_serde!(U256, 4); impl_uint_serde!(U512, 8); + impl_fixed_hash_serde!(H128, 16); impl_fixed_hash_serde!(H160, 20); impl_fixed_hash_serde!(H256, 32); + impl_fixed_hash_serde!(H384, 48); impl_fixed_hash_serde!(H512, 64); + impl_fixed_hash_serde!(H768, 96); } #[cfg(feature = "impl-codec")] mod codec { use super::*; + use impl_codec::{impl_fixed_hash_codec, impl_uint_codec}; impl_uint_codec!(U128, 2); impl_uint_codec!(U256, 4); impl_uint_codec!(U512, 8); + impl_fixed_hash_codec!(H128, 16); impl_fixed_hash_codec!(H160, 20); impl_fixed_hash_codec!(H256, 32); + impl_fixed_hash_codec!(H384, 48); impl_fixed_hash_codec!(H512, 64); + impl_fixed_hash_codec!(H768, 96); } #[cfg(feature = "impl-rlp")] mod rlp { use super::*; + use impl_rlp::{impl_fixed_hash_rlp, impl_uint_rlp}; impl_uint_rlp!(U128, 2); impl_uint_rlp!(U256, 4); impl_uint_rlp!(U512, 8); + impl_fixed_hash_rlp!(H128, 16); impl_fixed_hash_rlp!(H160, 20); impl_fixed_hash_rlp!(H256, 32); + impl_fixed_hash_rlp!(H384, 48); impl_fixed_hash_rlp!(H512, 64); + impl_fixed_hash_rlp!(H768, 96); } impl_fixed_hash_conversions!(H256, H160); +impl U128 { + /// Multiplies two 128-bit integers to produce full 256-bit integer. + /// Overflow is not possible. + #[inline(always)] + pub fn full_mul(self, other: U128) -> U256 { + U256(uint_full_mul_reg!(U128, 2, self, other)) + } +} + impl U256 { - /// Multiplies two 256-bit integers to produce full 512-bit integer - /// No overflow possible + /// Multiplies two 256-bit integers to produce full 512-bit integer. + /// Overflow is not possible. #[inline(always)] pub fn full_mul(self, other: U256) -> U512 { U512(uint_full_mul_reg!(U256, 4, self, other)) @@ -138,7 +183,7 @@ impl TryFrom for U128 { fn try_from(value: U256) -> Result { let U256(ref arr) = value; if arr[2] | arr[3] != 0 { - return Err(Error::Overflow); + return Err(Error::Overflow) } let mut ret = [0; 2]; ret[0] = arr[0]; @@ -153,7 +198,7 @@ impl TryFrom for U256 { fn try_from(value: U512) -> Result { let U512(ref arr) = value; if arr[4] | arr[5] | arr[6] | arr[7] != 0 { - return Err(Error::Overflow); + return Err(Error::Overflow) } let mut ret = [0; 4]; ret[0] = arr[0]; @@ -170,7 +215,7 @@ impl TryFrom for U128 { fn try_from(value: U512) -> Result { let U512(ref arr) = value; if arr[2] | arr[3] | arr[4] | arr[5] | arr[6] | arr[7] != 0 { - return Err(Error::Overflow); + return Err(Error::Overflow) } let mut ret = [0; 2]; ret[0] = arr[0]; @@ -217,7 +262,7 @@ impl<'a> TryFrom<&'a U512> for U256 { fn try_from(value: &'a U512) -> Result { let U512(ref arr) = *value; if arr[4] | arr[5] | arr[6] | arr[7] != 0 { - return Err(Error::Overflow); + return Err(Error::Overflow) } let mut ret = [0; 4]; ret[0] = arr[0]; diff --git a/primitive-types/tests/fp_conversion.rs b/primitive-types/tests/fp_conversion.rs new file mode 100644 index 000000000..ef3112edb --- /dev/null +++ b/primitive-types/tests/fp_conversion.rs @@ -0,0 +1,76 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Testing to and from f64 lossy for U256 primitive type. + +use primitive_types::U256; + +#[test] +#[allow(clippy::float_cmp)] +fn convert_u256_to_f64() { + assert_eq!(U256::from(0).to_f64_lossy(), 0.0); + assert_eq!(U256::from(42).to_f64_lossy(), 42.0); + assert_eq!(U256::from(1_000_000_000_000_000_000u128).to_f64_lossy(), 1_000_000_000_000_000_000.0,); +} + +#[test] +#[allow(clippy::excessive_precision, clippy::float_cmp, clippy::unreadable_literal)] +#[cfg(feature = "std")] +fn convert_u256_to_f64_precision_loss() { + assert_eq!(U256::from(u64::max_value()).to_f64_lossy(), u64::max_value() as f64,); + assert_eq!( + U256::MAX.to_f64_lossy(), + 115792089237316195423570985008687907853269984665640564039457584007913129639935.0, + ); + assert_eq!( + U256::MAX.to_f64_lossy(), + 115792089237316200000000000000000000000000000000000000000000000000000000000000.0, + ); +} + +#[test] +fn convert_f64_to_u256() { + assert_eq!(U256::from_f64_lossy(0.0), 0.into()); + assert_eq!(U256::from_f64_lossy(13.37), 13.into()); + assert_eq!(U256::from_f64_lossy(42.0), 42.into()); + assert_eq!(U256::from_f64_lossy(999.999), 999.into()); + assert_eq!(U256::from_f64_lossy(1_000_000_000_000_000_000.0), 1_000_000_000_000_000_000u128.into(),); +} + +#[test] +fn convert_f64_to_u256_large() { + let value = U256::from(1) << U256::from(255); + assert_eq!(U256::from_f64_lossy(format!("{}", value).parse::().expect("unexpected error parsing f64")), value); +} + +#[test] +#[allow(clippy::unreadable_literal)] +fn convert_f64_to_u256_overflow() { + assert_eq!( + U256::from_f64_lossy(115792089237316200000000000000000000000000000000000000000000000000000000000000.0), + U256::MAX, + ); + assert_eq!( + U256::from_f64_lossy(999999999999999999999999999999999999999999999999999999999999999999999999999999.0), + U256::MAX, + ); +} + +#[test] +fn convert_f64_to_u256_non_normal() { + assert_eq!(U256::from_f64_lossy(f64::EPSILON), 0.into()); + assert_eq!(U256::from_f64_lossy(f64::from_bits(0)), 0.into()); + assert_eq!(U256::from_f64_lossy(f64::NAN), 0.into()); + assert_eq!(U256::from_f64_lossy(f64::NEG_INFINITY), 0.into()); + assert_eq!(U256::from_f64_lossy(f64::INFINITY), U256::MAX); +} + +#[test] +fn f64_to_u256_truncation() { + assert_eq!(U256::from_f64_lossy(10.5), 10.into()); +} diff --git a/primitive-types/tests/num_traits.rs b/primitive-types/tests/num_traits.rs new file mode 100644 index 000000000..9bb26d26a --- /dev/null +++ b/primitive-types/tests/num_traits.rs @@ -0,0 +1,37 @@ +// Copyright 2021 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use impl_num_traits::integer_sqrt::IntegerSquareRoot; +use num_traits::ops::checked::{CheckedAdd, CheckedDiv, CheckedMul, CheckedSub}; +use primitive_types::U256; + +#[test] +fn u256_isqrt() { + let x = U256::MAX; + let s = x.integer_sqrt_checked().unwrap(); + assert_eq!(x.integer_sqrt(), s); +} + +#[test] +fn u256_checked_traits_supported() { + const ZERO: &U256 = &U256::zero(); + const ONE: &U256 = &U256::one(); + const MAX: &U256 = &U256::MAX; + + assert_eq!(::checked_add(MAX, ONE), None); + assert_eq!(::checked_add(ZERO, ONE), Some(*ONE)); + + assert_eq!(::checked_sub(ZERO, ONE), None); + assert_eq!(::checked_sub(ONE, ZERO), Some(*ONE)); + + assert_eq!(::checked_div(MAX, ZERO), None); + assert_eq!(::checked_div(MAX, ONE), Some(*MAX)); + + assert_eq!(::checked_mul(MAX, MAX), None); + assert_eq!(::checked_mul(MAX, ZERO), Some(*ZERO)); +} diff --git a/primitive-types/tests/scale_info.rs b/primitive-types/tests/scale_info.rs new file mode 100644 index 000000000..a6abd7548 --- /dev/null +++ b/primitive-types/tests/scale_info.rs @@ -0,0 +1,30 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Tests for scale-info feature of primitive-types. + +use primitive_types::{H256, U256}; +use scale_info_crate::{build::Fields, Path, Type, TypeInfo}; + +#[test] +fn u256_scale_info() { + let r#type = Type::builder() + .path(Path::new("U256", "primitive_types")) + .composite(Fields::unnamed().field(|f| f.ty::<[u64; 4]>().type_name("[u64; 4]"))); + + assert_eq!(U256::type_info(), r#type.into()); +} + +#[test] +fn h256_scale_info() { + let r#type = Type::builder() + .path(Path::new("H256", "primitive_types")) + .composite(Fields::unnamed().field(|f| f.ty::<[u8; 32]>().type_name("[u8; 32]"))); + + assert_eq!(H256::type_info(), r#type.into()); +} diff --git a/rlp-derive/CHANGELOG.md b/rlp-derive/CHANGELOG.md new file mode 100644 index 000000000..baa0a22f5 --- /dev/null +++ b/rlp-derive/CHANGELOG.md @@ -0,0 +1,11 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [0.2.0] - 2024-09-11 +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) + +## [0.1.0] - 2020-02-13 +- Extracted from parity-ethereum repo. [#343](https://github.com/paritytech/parity-common/pull/343) diff --git a/rlp-derive/Cargo.toml b/rlp-derive/Cargo.toml new file mode 100644 index 000000000..1a0967de1 --- /dev/null +++ b/rlp-derive/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "rlp-derive" +version = "0.2.0" +authors = ["Parity Technologies "] +license = "MIT OR Apache-2.0" +description = "Derive macro for #[derive(RlpEncodable, RlpDecodable)]" +homepage = "http://parity.io" +edition = "2021" +rust-version = "1.56.1" + +[lib] +proc-macro = true + +[dependencies] +syn = "2.0.72" +quote = "1.0.2" +proc-macro2 = "1.0.8" + +[dev-dependencies] +rlp = { version = "0.6.0", path = "../rlp" } diff --git a/rlp-derive/src/de.rs b/rlp-derive/src/de.rs new file mode 100644 index 000000000..f3ec6f178 --- /dev/null +++ b/rlp-derive/src/de.rs @@ -0,0 +1,163 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use proc_macro2::TokenStream; +use quote::quote; + +struct ParseQuotes { + single: TokenStream, + list: TokenStream, + takes_index: bool, +} + +fn decodable_parse_quotes() -> ParseQuotes { + ParseQuotes { single: quote! { rlp.val_at }, list: quote! { rlp.list_at }, takes_index: true } +} + +fn decodable_wrapper_parse_quotes() -> ParseQuotes { + ParseQuotes { single: quote! { rlp.as_val }, list: quote! { rlp.as_list }, takes_index: false } +} + +pub fn impl_decodable(ast: &syn::DeriveInput) -> TokenStream { + let body = if let syn::Data::Struct(s) = &ast.data { + s + } else { + panic!("#[derive(RlpDecodable)] is only defined for structs."); + }; + + let mut default_attribute_encountered = false; + let stmts: Vec<_> = body + .fields + .iter() + .enumerate() + .map(|(i, field)| decodable_field(i, field, decodable_parse_quotes(), &mut default_attribute_encountered)) + .collect(); + let name = &ast.ident; + + let impl_block = quote! { + impl rlp::Decodable for #name { + fn decode(rlp: &rlp::Rlp) -> Result { + let result = #name { + #(#stmts)* + }; + + Ok(result) + } + } + }; + + quote! { + const _: () = { + extern crate rlp; + #impl_block + }; + } +} + +pub fn impl_decodable_wrapper(ast: &syn::DeriveInput) -> TokenStream { + let body = if let syn::Data::Struct(s) = &ast.data { + s + } else { + panic!("#[derive(RlpDecodableWrapper)] is only defined for structs."); + }; + + let stmt = { + let fields: Vec<_> = body.fields.iter().collect(); + if fields.len() == 1 { + let field = fields.first().expect("fields.len() == 1; qed"); + let mut default_attribute_encountered = false; + decodable_field(0, field, decodable_wrapper_parse_quotes(), &mut default_attribute_encountered) + } else { + panic!("#[derive(RlpEncodableWrapper)] is only defined for structs with one field.") + } + }; + + let name = &ast.ident; + + let impl_block = quote! { + impl rlp::Decodable for #name { + fn decode(rlp: &rlp::Rlp) -> Result { + let result = #name { + #stmt + }; + + Ok(result) + } + } + }; + + quote! { + const _: () = { + extern crate rlp; + #impl_block + }; + } +} + +fn decodable_field( + mut index: usize, + field: &syn::Field, + quotes: ParseQuotes, + default_attribute_encountered: &mut bool, +) -> TokenStream { + let id = if let Some(ident) = &field.ident { + quote! { #ident } + } else { + let index = syn::Index::from(index); + quote! { #index } + }; + + if *default_attribute_encountered { + index -= 1; + } + let index = quote! { #index }; + + let single = quotes.single; + let list = quotes.list; + + let attributes = &field.attrs; + let default = if let Some(attr) = attributes.iter().find(|attr| attr.path().is_ident("rlp")) { + if *default_attribute_encountered { + panic!("only 1 #[rlp(default)] attribute is allowed in a struct") + } + match attr.parse_args() { + Ok(proc_macro2::TokenTree::Ident(ident)) if ident == "default" => {}, + _ => panic!("only #[rlp(default)] attribute is supported"), + } + *default_attribute_encountered = true; + true + } else { + false + }; + + if let syn::Type::Path(path) = &field.ty { + let ident = &path.path.segments.first().expect("there must be at least 1 segment").ident; + let ident_type = ident.to_string(); + if ident_type == "Vec" { + if quotes.takes_index { + if default { + quote! { #id: #list(#index).unwrap_or_default(), } + } else { + quote! { #id: #list(#index)?, } + } + } else { + quote! { #id: #list()?, } + } + } else if quotes.takes_index { + if default { + quote! { #id: #single(#index).unwrap_or_default(), } + } else { + quote! { #id: #single(#index)?, } + } + } else { + quote! { #id: #single()?, } + } + } else { + panic!("rlp_derive not supported"); + } +} diff --git a/rlp-derive/src/en.rs b/rlp-derive/src/en.rs new file mode 100644 index 000000000..e33c530c7 --- /dev/null +++ b/rlp-derive/src/en.rs @@ -0,0 +1,115 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use proc_macro2::TokenStream; +use quote::quote; + +pub fn impl_encodable(ast: &syn::DeriveInput) -> TokenStream { + let body = if let syn::Data::Struct(s) = &ast.data { + s + } else { + panic!("#[derive(RlpEncodable)] is only defined for structs."); + }; + + let stmts: Vec<_> = body + .fields + .iter() + .enumerate() + .map(|(i, field)| encodable_field(i, field)) + .collect(); + let name = &ast.ident; + + let stmts_len = stmts.len(); + let stmts_len = quote! { #stmts_len }; + let impl_block = quote! { + impl rlp::Encodable for #name { + fn rlp_append(&self, stream: &mut rlp::RlpStream) { + stream.begin_list(#stmts_len); + #(#stmts)* + } + } + }; + + quote! { + const _: () = { + extern crate rlp; + #impl_block + }; + } +} + +pub fn impl_encodable_wrapper(ast: &syn::DeriveInput) -> TokenStream { + let body = if let syn::Data::Struct(s) = &ast.data { + s + } else { + panic!("#[derive(RlpEncodableWrapper)] is only defined for structs."); + }; + + let stmt = { + let fields: Vec<_> = body.fields.iter().collect(); + if fields.len() == 1 { + let field = fields.first().expect("fields.len() == 1; qed"); + encodable_field(0, field) + } else { + panic!("#[derive(RlpEncodableWrapper)] is only defined for structs with one field.") + } + }; + + let name = &ast.ident; + + let impl_block = quote! { + impl rlp::Encodable for #name { + fn rlp_append(&self, stream: &mut rlp::RlpStream) { + #stmt + } + } + }; + + quote! { + const _: () = { + extern crate rlp; + #impl_block + }; + } +} + +fn encodable_field(index: usize, field: &syn::Field) -> TokenStream { + let ident = if let Some(ident) = &field.ident { + quote! { #ident } + } else { + let index = syn::Index::from(index); + quote! { #index } + }; + + let id = quote! { self.#ident }; + + if let syn::Type::Path(path) = &field.ty { + let top_segment = path.path.segments.first().expect("there must be at least 1 segment"); + let ident = &top_segment.ident; + if ident == "Vec" { + let inner_ident = { + if let syn::PathArguments::AngleBracketed(angle) = &top_segment.arguments { + if let syn::GenericArgument::Type(syn::Type::Path(path)) = + angle.args.first().expect("Vec has only one angle bracketed type; qed") + { + &path.path.segments.first().expect("there must be at least 1 segment").ident + } else { + panic!("rlp_derive not supported"); + } + } else { + unreachable!("Vec has only one angle bracketed type; qed") + } + }; + quote! { stream.append_list::<#inner_ident, _>(&#id); } + } else { + quote! { stream.append(&#id); } + } + } else { + panic!("rlp_derive not supported"); + } +} diff --git a/rlp-derive/src/lib.rs b/rlp-derive/src/lib.rs new file mode 100644 index 000000000..cf6edb4f4 --- /dev/null +++ b/rlp-derive/src/lib.rs @@ -0,0 +1,56 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Derive macro for `#[derive(RlpEncodable, RlpDecodable)]`. +//! +//! For example of usage see `./tests/rlp.rs`. +//! +//! This library also supports up to 1 `#[rlp(default)]` in a struct, +//! which is similar to [`#[serde(default)]`](https://serde.rs/field-attrs.html#default) +//! with the caveat that we use the `Default` value if +//! the field deserialization fails, as we don't serialize field +//! names and there is no way to tell if it is present or not. + +#![warn(clippy::all, clippy::pedantic, clippy::nursery)] + +extern crate proc_macro; + +mod de; +mod en; + +use de::{impl_decodable, impl_decodable_wrapper}; +use en::{impl_encodable, impl_encodable_wrapper}; +use proc_macro::TokenStream; + +#[proc_macro_derive(RlpEncodable, attributes(rlp))] +pub fn encodable(input: TokenStream) -> TokenStream { + let ast = syn::parse(input).unwrap(); + let gen = impl_encodable(&ast); + gen.into() +} + +#[proc_macro_derive(RlpEncodableWrapper)] +pub fn encodable_wrapper(input: TokenStream) -> TokenStream { + let ast = syn::parse(input).unwrap(); + let gen = impl_encodable_wrapper(&ast); + gen.into() +} + +#[proc_macro_derive(RlpDecodable, attributes(rlp))] +pub fn decodable(input: TokenStream) -> TokenStream { + let ast = syn::parse(input).unwrap(); + let gen = impl_decodable(&ast); + gen.into() +} + +#[proc_macro_derive(RlpDecodableWrapper)] +pub fn decodable_wrapper(input: TokenStream) -> TokenStream { + let ast = syn::parse(input).unwrap(); + let gen = impl_decodable_wrapper(&ast); + gen.into() +} diff --git a/rlp-derive/tests/rlp.rs b/rlp-derive/tests/rlp.rs new file mode 100644 index 000000000..24963d323 --- /dev/null +++ b/rlp-derive/tests/rlp.rs @@ -0,0 +1,71 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rlp::{decode, encode}; +use rlp_derive::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; + +#[derive(Debug, PartialEq, RlpEncodable, RlpDecodable)] +struct Item { + a: String, +} + +#[derive(Debug, PartialEq, RlpEncodableWrapper, RlpDecodableWrapper)] +struct ItemWrapper { + a: String, +} + +#[test] +fn test_encode_item() { + let item = Item { a: "cat".into() }; + + let expected = vec![0xc4, 0x83, b'c', b'a', b't']; + let out = encode(&item); + assert_eq!(out, expected); + + let decoded = decode(&expected).expect("decode failure"); + assert_eq!(item, decoded); +} + +#[test] +fn test_encode_item_wrapper() { + let item = ItemWrapper { a: "cat".into() }; + + let expected = vec![0x83, b'c', b'a', b't']; + let out = encode(&item); + assert_eq!(out, expected); + + let decoded = decode(&expected).expect("decode failure"); + assert_eq!(item, decoded); +} + +#[test] +fn test_encode_item_default() { + #[derive(Debug, PartialEq, RlpEncodable, RlpDecodable)] + struct ItemDefault { + a: String, + /// It works with other attributes. + #[rlp(default)] + b: Option>, + } + + let attack_of = "clones"; + let item = Item { a: attack_of.into() }; + + let expected = vec![0xc7, 0x86, b'c', b'l', b'o', b'n', b'e', b's']; + let out = encode(&item); + assert_eq!(out, expected); + + let item_default = ItemDefault { a: attack_of.into(), b: None }; + + let decoded = decode(&expected).expect("default failure"); + assert_eq!(item_default, decoded); + + let item_some = ItemDefault { a: attack_of.into(), b: Some(vec![1, 2, 3]) }; + let out = encode(&item_some); + assert_eq!(decode(&out), Ok(item_some)); +} diff --git a/rlp/CHANGELOG.md b/rlp/CHANGELOG.md new file mode 100644 index 000000000..dd5fd2e4d --- /dev/null +++ b/rlp/CHANGELOG.md @@ -0,0 +1,38 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [0.6.1] - 2024-09-11 +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Updated `rlp-derive` to 0.2.0. [#860](https://github.com/paritytech/parity-common/pull/860) + +## [0.5.2] - 2022-10-21 +- Add optional `derive` feature. [#613](https://github.com/paritytech/parity-common/pull/613) + +## [0.5.1] - 2021-07-30 +- Fix rlp encoding/decoding for bool. [#572](https://github.com/paritytech/parity-common/pull/572) + +## [0.5.0] - 2021-01-05 +### Breaking +- Use BytesMut for `RlpStream`'s backing buffer. [#453](https://github.com/paritytech/parity-common/pull/453) + +## [0.4.6] - 2020-09-29 +- Implement Encodable, Decodable for boxed types. [#427](https://github.com/paritytech/parity-common/pull/427) + +## [0.4.5] - 2020-03-16 +### Dependencies +- Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) + +## [0.4.4] - 2019-11-20 +### Added +- Method `Rlp::at_with_offset`. [#269](https://github.com/paritytech/parity-common/pull/269) + +## [0.4.3] - 2019-10-24 +### Dependencies +- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) +### Fixed +- Fixed nested unbounded lists. [#203](https://github.com/paritytech/parity-common/pull/203) +### Added +- Added no-std support. [#206](https://github.com/paritytech/parity-common/pull/206) diff --git a/rlp/Cargo.toml b/rlp/Cargo.toml index 0ad6d40ea..0f1583e5e 100644 --- a/rlp/Cargo.toml +++ b/rlp/Cargo.toml @@ -1,23 +1,27 @@ [package] name = "rlp" -version = "0.4.2" +version = "0.6.1" description = "Recursive-length prefix encoding, decoding, and compression" repository = "https://github.com/paritytech/parity-common" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" +rust-version = "1.56.1" [dependencies] -rustc-hex = { version = "2.0", default-features = false } +bytes = { version = "1", default-features = false } +rustc-hex = { version = "2.0.1", default-features = false } +rlp-derive = { version = "0.2", path = "../rlp-derive", optional = true } [dev-dependencies] -criterion = "0.3" -hex-literal = "0.2" -primitive-types = { path = "../primitive-types", version = "0.5", features = ["impl-rlp"] } +criterion = "0.5.1" +hex-literal = "0.4.1" +primitive-types = { path = "../primitive-types", version = "0.13", features = ["impl-rlp"] } [features] default = ["std"] -std = ["rustc-hex/std"] +std = ["bytes/std", "rustc-hex/std"] +derive = ["rlp-derive"] [[bench]] name = "rlp" diff --git a/rlp/benches/rlp.rs b/rlp/benches/rlp.rs index e874cb0da..d1de4c93b 100644 --- a/rlp/benches/rlp.rs +++ b/rlp/benches/rlp.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license @@ -11,62 +11,75 @@ use criterion::{criterion_group, criterion_main, Criterion}; fn bench_encode(c: &mut Criterion) { - c.bench_function("encode_u64", |b| b.iter(|| { - let mut stream = rlp::RlpStream::new(); - stream.append(&0x1023_4567_89ab_cdefu64); - let _ = stream.out(); - })); - c.bench_function("encode_u256", |b| b.iter(|| { - let mut stream = rlp::RlpStream::new(); - let uint: primitive_types::U256 = "8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0".into(); - stream.append(&uint); - let _ = stream.out(); - })); - c.bench_function("encode_1000_u64", |b| b.iter(|| { - let mut stream = rlp::RlpStream::new_list(1000); - for i in 0..1000u64 { - stream.append(&i); - } - let _ = stream.out(); - })); - c.bench_function("encode_nested_empty_lists", |b| b.iter(|| { - // [ [], [[]], [ [], [[]] ] ] - let mut stream = rlp::RlpStream::new_list(3); - stream.begin_list(0); - stream.begin_list(1).begin_list(0); - stream.begin_list(2).begin_list(0).begin_list(1).begin_list(0); - let _ = stream.out(); - })); - c.bench_function("encode_1000_empty_lists", |b| b.iter(|| { - let mut stream = rlp::RlpStream::new_list(1000); - for _ in 0..1000 { + c.bench_function("encode_u64", |b| { + b.iter(|| { + let mut stream = rlp::RlpStream::new(); + stream.append(&0x1023_4567_89ab_cdefu64); + let _ = stream.out(); + }) + }); + c.bench_function("encode_u256", |b| { + b.iter(|| { + let mut stream = rlp::RlpStream::new(); + let uint: primitive_types::U256 = "8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0".into(); + stream.append(&uint); + let _ = stream.out(); + }) + }); + c.bench_function("encode_1000_u64", |b| { + b.iter(|| { + let mut stream = rlp::RlpStream::new_list(1000); + for i in 0..1000u64 { + stream.append(&i); + } + let _ = stream.out(); + }) + }); + c.bench_function("encode_nested_empty_lists", |b| { + b.iter(|| { + // [ [], [[]], [ [], [[]] ] ] + let mut stream = rlp::RlpStream::new_list(3); stream.begin_list(0); - } - let _ = stream.out(); - })); + stream.begin_list(1).begin_list(0); + stream.begin_list(2).begin_list(0).begin_list(1).begin_list(0); + let _ = stream.out(); + }) + }); + c.bench_function("encode_1000_empty_lists", |b| { + b.iter(|| { + let mut stream = rlp::RlpStream::new_list(1000); + for _ in 0..1000 { + stream.begin_list(0); + } + let _ = stream.out(); + }) + }); } fn bench_decode(c: &mut Criterion) { - c.bench_function("decode_u64", |b| b.iter(|| { - let data = vec![0x88, 0x10, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]; - let rlp = rlp::Rlp::new(&data); - let _: u64 = rlp.as_val().unwrap(); - })); - c.bench_function("decode_u256", |b| b.iter(|| { - let data = vec![ - 0xa0, 0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0x09, 0x10, 0x20, - 0x30, 0x40, 0x50, 0x60, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0xf0 - ]; - let rlp = rlp::Rlp::new(&data); - let _ : primitive_types::U256 = rlp.as_val().unwrap(); - })); + c.bench_function("decode_u64", |b| { + b.iter(|| { + let data = vec![0x88, 0x10, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]; + let rlp = rlp::Rlp::new(&data); + let _: u64 = rlp.as_val().unwrap(); + }) + }); + c.bench_function("decode_u256", |b| { + b.iter(|| { + let data = vec![ + 0xa0, 0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0x09, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x77, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0xf0, + ]; + let rlp = rlp::Rlp::new(&data); + let _: primitive_types::U256 = rlp.as_val().unwrap(); + }) + }); c.bench_function("decode_1000_u64", |b| { let mut stream = rlp::RlpStream::new_list(1000); for i in 0..1000u64 { stream.append(&i); } - let data= stream.out(); + let data = stream.out(); b.iter(|| { let rlp = rlp::Rlp::new(&data); for i in 0..1000 { @@ -74,16 +87,18 @@ fn bench_decode(c: &mut Criterion) { } }); }); - c.bench_function("decode_nested_empty_lists", |b| b.iter(|| { - // [ [], [[]], [ [], [[]] ] ] - let data = vec![0xc7, 0xc0, 0xc1, 0xc0, 0xc3, 0xc0, 0xc1, 0xc0]; - let rlp = rlp::Rlp::new(&data); - let _v0: Vec = rlp.at(0).unwrap().as_list().unwrap(); - let _v1: Vec = rlp.at(1).unwrap().at(0).unwrap().as_list().unwrap(); - let nested_rlp = rlp.at(2).unwrap(); - let _v2a: Vec = nested_rlp.at(0).unwrap().as_list().unwrap(); - let _v2b: Vec = nested_rlp.at(1).unwrap().at(0).unwrap().as_list().unwrap(); - })); + c.bench_function("decode_nested_empty_lists", |b| { + b.iter(|| { + // [ [], [[]], [ [], [[]] ] ] + let data = vec![0xc7, 0xc0, 0xc1, 0xc0, 0xc3, 0xc0, 0xc1, 0xc0]; + let rlp = rlp::Rlp::new(&data); + let _v0: Vec = rlp.at(0).unwrap().as_list().unwrap(); + let _v1: Vec = rlp.at(1).unwrap().at(0).unwrap().as_list().unwrap(); + let nested_rlp = rlp.at(2).unwrap(); + let _v2a: Vec = nested_rlp.at(0).unwrap().as_list().unwrap(); + let _v2b: Vec = nested_rlp.at(1).unwrap().at(0).unwrap().as_list().unwrap(); + }) + }); c.bench_function("decode_1000_empty_lists", |b| { let mut stream = rlp::RlpStream::new_list(1000); for _ in 0..1000 { diff --git a/rlp/src/error.rs b/rlp/src/error.rs index d810130b0..a965e5626 100644 --- a/rlp/src/error.rs +++ b/rlp/src/error.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/rlp/src/impls.rs b/rlp/src/impls.rs index 3fbf3cf20..750ed5c28 100644 --- a/rlp/src/impls.rs +++ b/rlp/src/impls.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license @@ -7,20 +7,25 @@ // except according to those terms. #[cfg(not(feature = "std"))] -use alloc::{borrow::ToOwned, vec::Vec, string::String}; -use core::{mem, str}; -use core::iter::{once, empty}; - -use crate::error::DecoderError; -use crate::rlpin::Rlp; -use crate::stream::RlpStream; -use crate::traits::{Encodable, Decodable}; +use alloc::{borrow::ToOwned, boxed::Box, string::String, vec::Vec}; +use bytes::{Bytes, BytesMut}; +use core::{ + iter::{empty, once}, + mem, str, +}; + +use crate::{ + error::DecoderError, + rlpin::Rlp, + stream::RlpStream, + traits::{Decodable, Encodable}, +}; pub fn decode_usize(bytes: &[u8]) -> Result { match bytes.len() { l if l <= mem::size_of::() => { if bytes[0] == 0 { - return Err(DecoderError::RlpInvalidIndirection); + return Err(DecoderError::RlpInvalidIndirection) } let mut res = 0usize; for (i, byte) in bytes.iter().enumerate().take(l) { @@ -28,26 +33,38 @@ pub fn decode_usize(bytes: &[u8]) -> Result { res += (*byte as usize) << shift; } Ok(res) - } + }, _ => Err(DecoderError::RlpIsTooBig), } } +impl Encodable for Box { + fn rlp_append(&self, s: &mut RlpStream) { + Encodable::rlp_append(&**self, s) + } +} + +impl Decodable for Box { + fn decode(rlp: &Rlp) -> Result { + T::decode(rlp).map(Box::new) + } +} + impl Encodable for bool { fn rlp_append(&self, s: &mut RlpStream) { - s.encoder().encode_iter(once(if *self { 1u8 } else { 0 })); + let as_uint = u8::from(*self); + Encodable::rlp_append(&as_uint, s); } } impl Decodable for bool { fn decode(rlp: &Rlp) -> Result { - rlp.decoder().decode_value(|bytes| { - match bytes.len() { - 0 => Ok(false), - 1 => Ok(bytes[0] != 0), - _ => Err(DecoderError::RlpIsTooBig), - } - }) + let as_uint = ::decode(rlp)?; + match as_uint { + 0 => Ok(false), + 1 => Ok(true), + _ => Err(DecoderError::Custom("invalid boolean value")), + } } } @@ -65,13 +82,38 @@ impl Encodable for Vec { impl Decodable for Vec { fn decode(rlp: &Rlp) -> Result { - rlp.decoder().decode_value(|bytes| { - Ok(bytes.to_vec()) - }) + rlp.decoder().decode_value(|bytes| Ok(bytes.to_vec())) } } -impl Encodable for Option where T: Encodable { +impl Encodable for Bytes { + fn rlp_append(&self, s: &mut RlpStream) { + s.encoder().encode_value(self); + } +} + +impl Decodable for Bytes { + fn decode(rlp: &Rlp) -> Result { + rlp.decoder().decode_value(|bytes| Ok(Bytes::copy_from_slice(bytes))) + } +} + +impl Encodable for BytesMut { + fn rlp_append(&self, s: &mut RlpStream) { + s.encoder().encode_value(self); + } +} + +impl Decodable for BytesMut { + fn decode(rlp: &Rlp) -> Result { + rlp.decoder().decode_value(|bytes| Ok(bytes.into())) + } +} + +impl Encodable for Option +where + T: Encodable, +{ fn rlp_append(&self, s: &mut RlpStream) { match *self { None => { @@ -80,12 +122,15 @@ impl Encodable for Option where T: Encodable { Some(ref value) => { s.begin_list(1); s.append(value); - } + }, } } } -impl Decodable for Option where T: Decodable { +impl Decodable for Option +where + T: Decodable, +{ fn decode(rlp: &Rlp) -> Result { let items = rlp.item_count()?; match items { @@ -108,13 +153,11 @@ impl Encodable for u8 { impl Decodable for u8 { fn decode(rlp: &Rlp) -> Result { - rlp.decoder().decode_value(|bytes| { - match bytes.len() { - 1 if bytes[0] != 0 => Ok(bytes[0]), - 0 => Ok(0), - 1 => Err(DecoderError::RlpInvalidIndirection), - _ => Err(DecoderError::RlpIsTooBig), - } + rlp.decoder().decode_value(|bytes| match bytes.len() { + 1 if bytes[0] != 0 => Ok(bytes[0]), + 0 => Ok(0), + 1 => Err(DecoderError::RlpInvalidIndirection), + _ => Err(DecoderError::RlpIsTooBig), }) } } @@ -128,42 +171,42 @@ macro_rules! impl_encodable_for_u { s.encoder().encode_value(&buffer[leading_empty_bytes..]); } } - } + }; } macro_rules! impl_decodable_for_u { ($name: ident) => { impl Decodable for $name { fn decode(rlp: &Rlp) -> Result { - rlp.decoder().decode_value(|bytes| { - match bytes.len() { - 0 | 1 => u8::decode(rlp).map(|v| v as $name), - l if l <= mem::size_of::<$name>() => { - if bytes[0] == 0 { - return Err(DecoderError::RlpInvalidIndirection); - } - let mut res = 0 as $name; - for (i, byte) in bytes.iter().enumerate().take(l) { - let shift = (l - 1 - i) * 8; - res += (*byte as $name) << shift; - } - Ok(res) + rlp.decoder().decode_value(|bytes| match bytes.len() { + 0 | 1 => u8::decode(rlp).map(|v| v as $name), + l if l <= mem::size_of::<$name>() => { + if bytes[0] == 0 { + return Err(DecoderError::RlpInvalidIndirection) } - _ => Err(DecoderError::RlpIsTooBig), - } + let mut res = 0 as $name; + for (i, byte) in bytes.iter().enumerate().take(l) { + let shift = (l - 1 - i) * 8; + res += (*byte as $name) << shift; + } + Ok(res) + }, + _ => Err(DecoderError::RlpIsTooBig), }) } } - } + }; } impl_encodable_for_u!(u16); impl_encodable_for_u!(u32); impl_encodable_for_u!(u64); +impl_encodable_for_u!(u128); impl_decodable_for_u!(u16); impl_decodable_for_u!(u32); impl_decodable_for_u!(u64); +impl_decodable_for_u!(u128); impl Encodable for usize { fn rlp_append(&self, s: &mut RlpStream) { diff --git a/rlp/src/lib.rs b/rlp/src/lib.rs index 1baa52c2f..a0bd64ae9 100644 --- a/rlp/src/lib.rs +++ b/rlp/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license @@ -10,23 +10,23 @@ //! //! Allows encoding, decoding, and view onto rlp-slice //! -//!# What should you use when? +//! # What should you use when? //! -//!### Use `encode` function when: +//! ### Use `encode` function when: //! * You want to encode something inline. //! * You do not work on big set of data. //! * You want to encode whole data structure at once. //! -//!### Use `decode` function when: +//! ### Use `decode` function when: //! * You want to decode something inline. //! * You do not work on big set of data. //! * You want to decode whole rlp at once. //! -//!### Use `RlpStream` when: +//! ### Use `RlpStream` when: //! * You want to encode something in portions. //! * You encode a big set of data. //! -//!### Use `Rlp` when: +//! ### Use `Rlp` when: //! * You need to handle data corruption errors. //! * You are working on input data. //! * You want to get view onto rlp-slice. @@ -37,20 +37,26 @@ #[cfg(not(feature = "std"))] extern crate alloc; -mod traits; mod error; +mod impls; mod rlpin; mod stream; -mod impls; +mod traits; #[cfg(not(feature = "std"))] use alloc::vec::Vec; +use bytes::BytesMut; use core::borrow::Borrow; -pub use self::error::DecoderError; -pub use self::rlpin::{Rlp, RlpIterator, PayloadInfo, Prototype}; -pub use self::stream::RlpStream; -pub use self::traits::{Decodable, Encodable}; +#[cfg(feature = "derive")] +pub use rlp_derive::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; + +pub use self::{ + error::DecoderError, + rlpin::{PayloadInfo, Prototype, Rlp, RlpIterator}, + stream::RlpStream, + traits::{Decodable, Encodable}, +}; /// The RLP encoded empty data (used to mean "null value"). pub const NULL_RLP: [u8; 1] = [0x80; 1]; @@ -59,44 +65,49 @@ pub const EMPTY_LIST_RLP: [u8; 1] = [0xC0; 1]; /// Shortcut function to decode trusted rlp /// -/// ```rust -/// extern crate rlp; -/// -/// fn main () { -/// let data = vec![0x83, b'c', b'a', b't']; -/// let animal: String = rlp::decode(&data).expect("could not decode"); -/// assert_eq!(animal, "cat".to_owned()); -/// } /// ``` -pub fn decode(bytes: &[u8]) -> Result where T: Decodable { +/// let data = vec![0x83, b'c', b'a', b't']; +/// let animal: String = rlp::decode(&data).expect("could not decode"); +/// assert_eq!(animal, "cat".to_owned()); +/// ``` +pub fn decode(bytes: &[u8]) -> Result +where + T: Decodable, +{ let rlp = Rlp::new(bytes); rlp.as_val() } -pub fn decode_list(bytes: &[u8]) -> Vec where T: Decodable { +pub fn decode_list(bytes: &[u8]) -> Vec +where + T: Decodable, +{ let rlp = Rlp::new(bytes); rlp.as_list().expect("trusted rlp should be valid") } /// Shortcut function to encode structure into rlp. /// -/// ```rust -/// extern crate rlp; -/// -/// fn main () { -/// let animal = "cat"; -/// let out = rlp::encode(&animal); -/// assert_eq!(out, vec![0x83, b'c', b'a', b't']); -/// } /// ``` -pub fn encode(object: &E) -> Vec where E: Encodable { +/// let animal = "cat"; +/// let out = rlp::encode(&animal); +/// assert_eq!(out, vec![0x83, b'c', b'a', b't']); +/// ``` +pub fn encode(object: &E) -> BytesMut +where + E: Encodable, +{ let mut stream = RlpStream::new(); stream.append(object); - stream.drain() + stream.out() } -pub fn encode_list(object: &[K]) -> Vec where E: Encodable, K: Borrow { +pub fn encode_list(object: &[K]) -> BytesMut +where + E: Encodable, + K: Borrow, +{ let mut stream = RlpStream::new(); stream.append_list(object); - stream.drain() + stream.out() } diff --git a/rlp/src/rlpin.rs b/rlp/src/rlpin.rs index 0d8c9ad2a..395808fa8 100644 --- a/rlp/src/rlpin.rs +++ b/rlp/src/rlpin.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license @@ -8,14 +8,11 @@ #[cfg(not(feature = "std"))] use alloc::{string::String, vec::Vec}; -use core::cell::Cell; -use core::fmt; +use core::{cell::Cell, fmt}; use rustc_hex::ToHex; -use crate::error::DecoderError; -use crate::impls::decode_usize; -use crate::traits::Decodable; +use crate::{error::DecoderError, impls::decode_usize, traits::Decodable}; /// rlp offset #[derive(Copy, Clone, Debug)] @@ -25,7 +22,7 @@ struct OffsetCache { } impl OffsetCache { - fn new(index: usize, offset: usize) -> OffsetCache { + const fn new(index: usize, offset: usize) -> OffsetCache { OffsetCache { index, offset } } } @@ -58,22 +55,24 @@ fn calculate_payload_info(header_bytes: &[u8], len_of_len: usize) -> Result (), } if header_bytes.len() < header_len { - return Err(DecoderError::RlpIsTooShort); + return Err(DecoderError::RlpIsTooShort) } let value_len = decode_usize(&header_bytes[1..header_len])?; if value_len <= 55 { - return Err(DecoderError::RlpInvalidIndirection); + return Err(DecoderError::RlpInvalidIndirection) } Ok(PayloadInfo::new(header_len, value_len)) } impl PayloadInfo { - fn new(header_len: usize, value_len: usize) -> PayloadInfo { + const fn new(header_len: usize, value_len: usize) -> PayloadInfo { PayloadInfo { header_len, value_len } } /// Total size of the RLP. - pub fn total(&self) -> usize { self.header_len + self.value_len } + pub fn total(&self) -> usize { + self.header_len + self.value_len + } /// Create a new object from the given bytes RLP. The bytes pub fn from(header_bytes: &[u8]) -> Result { @@ -114,27 +113,26 @@ impl<'a> fmt::Display for Rlp<'a> { Ok(Prototype::Data(_)) => write!(f, "\"0x{}\"", self.data().unwrap().to_hex::()), Ok(Prototype::List(len)) => { write!(f, "[")?; - for i in 0..len-1 { + for i in 0..len - 1 { write!(f, "{}, ", self.at(i).unwrap())?; } write!(f, "{}", self.at(len - 1).unwrap())?; write!(f, "]") }, - Err(err) => write!(f, "{:?}", err) + Err(err) => write!(f, "{:?}", err), } } } impl<'a> Rlp<'a> { - pub fn new(bytes: &'a [u8]) -> Rlp<'a> { - Rlp { - bytes, - offset_cache: Cell::new(None), - count_cache: Cell::new(None) - } + pub const fn new(bytes: &'a [u8]) -> Rlp<'a> { + Rlp { bytes, offset_cache: Cell::new(None), count_cache: Cell::new(None) } } - pub fn as_raw<'view>(&'view self) -> &'a [u8] where 'a: 'view { + pub fn as_raw<'view>(&'view self) -> &'a [u8] + where + 'a: 'view, + { self.bytes } @@ -153,7 +151,10 @@ impl<'a> Rlp<'a> { BasicDecoder::payload_info(self.bytes) } - pub fn data<'view>(&'view self) -> Result<&'a [u8], DecoderError> where 'a: 'view { + pub fn data<'view>(&'view self) -> Result<&'a [u8], DecoderError> + where + 'a: 'view, + { let pi = BasicDecoder::payload_info(self.bytes)?; Ok(&self.bytes[pi.header_len..(pi.header_len + pi.value_len)]) } @@ -166,10 +167,10 @@ impl<'a> Rlp<'a> { let c = self.iter().count(); self.count_cache.set(Some(c)); Ok(c) - } + }, } } else { - Err(DecoderError::RlpExpectedToBeList) + Err(DecoderError::RlpExpectedToBeList) } } @@ -182,33 +183,51 @@ impl<'a> Rlp<'a> { } } - pub fn at<'view>(&'view self, index: usize) -> Result, DecoderError> where 'a: 'view { + /// Returns an Rlp item in a list at the given index. + /// + /// Returns an error if this Rlp is not a list or if the index is out of range. + pub fn at<'view>(&'view self, index: usize) -> Result, DecoderError> + where + 'a: 'view, + { + let (rlp, _offset) = self.at_with_offset(index)?; + Ok(rlp) + } + + /// Returns an Rlp item in a list at the given index along with the byte offset into the + /// raw data slice. + /// + /// Returns an error if this Rlp is not a list or if the index is out of range. + pub fn at_with_offset<'view>(&'view self, index: usize) -> Result<(Rlp<'a>, usize), DecoderError> + where + 'a: 'view, + { if !self.is_list() { - return Err(DecoderError::RlpExpectedToBeList); + return Err(DecoderError::RlpExpectedToBeList) } // move to cached position if its index is less or equal to // current search index, otherwise move to beginning of list let cache = self.offset_cache.get(); let (bytes, indexes_to_skip, bytes_consumed) = match cache { - Some(ref cache) if cache.index <= index => ( - Rlp::consume(self.bytes, cache.offset)?, index - cache.index, cache.offset - ), + Some(ref cache) if cache.index <= index => + (Rlp::consume(self.bytes, cache.offset)?, index - cache.index, cache.offset), _ => { let (bytes, consumed) = self.consume_list_payload()?; (bytes, index, consumed) - } + }, }; // skip up to x items let (bytes, consumed) = Rlp::consume_items(bytes, indexes_to_skip)?; // update the cache - self.offset_cache.set(Some(OffsetCache::new(index, bytes_consumed + consumed))); + let offset = bytes_consumed + consumed; + self.offset_cache.set(Some(OffsetCache::new(index, offset))); // construct new rlp let found = BasicDecoder::payload_info(bytes)?; - Ok(Rlp::new(&bytes[0..found.header_len + found.value_len])) + Ok((Rlp::new(&bytes[0..found.header_len + found.value_len]), offset)) } pub fn is_null(&self) -> bool { @@ -229,7 +248,7 @@ impl<'a> Rlp<'a> { pub fn is_int(&self) -> bool { if self.is_null() { - return false; + return false } match self.bytes[0] { @@ -239,27 +258,42 @@ impl<'a> Rlp<'a> { let payload_idx = 1 + b as usize - 0xb7; payload_idx < self.bytes.len() && self.bytes[payload_idx] != 0 }, - _ => false + _ => false, } } - pub fn iter<'view>(&'view self) -> RlpIterator<'a, 'view> where 'a: 'view { + pub fn iter<'view>(&'view self) -> RlpIterator<'a, 'view> + where + 'a: 'view, + { self.into_iter() } - pub fn as_val(&self) -> Result where T: Decodable { + pub fn as_val(&self) -> Result + where + T: Decodable, + { T::decode(self) } - pub fn as_list(&self) -> Result, DecoderError> where T: Decodable { + pub fn as_list(&self) -> Result, DecoderError> + where + T: Decodable, + { self.iter().map(|rlp| rlp.as_val()).collect() } - pub fn val_at(&self, index: usize) -> Result where T: Decodable { + pub fn val_at(&self, index: usize) -> Result + where + T: Decodable, + { self.at(index)?.as_val() } - pub fn list_at(&self, index: usize) -> Result, DecoderError> where T: Decodable { + pub fn list_at(&self, index: usize) -> Result, DecoderError> + where + T: Decodable, + { self.at(index)?.as_list() } @@ -271,7 +305,7 @@ impl<'a> Rlp<'a> { fn consume_list_payload(&self) -> Result<(&'a [u8], usize), DecoderError> { let item = BasicDecoder::payload_info(self.bytes)?; if self.bytes.len() < (item.header_len + item.value_len) { - return Err(DecoderError::RlpIsTooShort); + return Err(DecoderError::RlpIsTooShort) } Ok((&self.bytes[item.header_len..item.header_len + item.value_len], item.header_len)) } @@ -300,20 +334,23 @@ impl<'a> Rlp<'a> { } /// Iterator over rlp-slice list elements. -pub struct RlpIterator<'a, 'view> where 'a: 'view { +pub struct RlpIterator<'a, 'view> +where + 'a: 'view, +{ rlp: &'view Rlp<'a>, index: usize, } -impl<'a, 'view> IntoIterator for &'view Rlp<'a> where 'a: 'view { +impl<'a, 'view> IntoIterator for &'view Rlp<'a> +where + 'a: 'view, +{ type Item = Rlp<'a>; type IntoIter = RlpIterator<'a, 'view>; fn into_iter(self) -> Self::IntoIter { - RlpIterator { - rlp: self, - index: 0, - } + RlpIterator { rlp: self, index: 0 } } } @@ -328,15 +365,19 @@ impl<'a, 'view> Iterator for RlpIterator<'a, 'view> { } } +impl<'a, 'view> ExactSizeIterator for RlpIterator<'a, 'view> { + fn len(&self) -> usize { + self.rlp.item_count().unwrap_or(0).saturating_sub(self.index) + } +} + pub struct BasicDecoder<'a> { rlp: &'a [u8], } impl<'a> BasicDecoder<'a> { - pub fn new(rlp: &'a [u8]) -> BasicDecoder<'a> { - BasicDecoder { - rlp, - } + pub const fn new(rlp: &'a [u8]) -> BasicDecoder<'a> { + BasicDecoder { rlp } } /// Return first item info. @@ -349,8 +390,9 @@ impl<'a> BasicDecoder<'a> { } pub fn decode_value(&self, f: F) -> Result - where F: Fn(&[u8]) -> Result { - + where + F: Fn(&[u8]) -> Result, + { let bytes = self.rlp; let l = *bytes.first().ok_or_else(|| DecoderError::RlpIsTooShort)?; @@ -360,25 +402,24 @@ impl<'a> BasicDecoder<'a> { } else if l <= 0xb7 { let last_index_of = 1 + l as usize - 0x80; if bytes.len() < last_index_of { - return Err(DecoderError::RlpInconsistentLengthAndData); + return Err(DecoderError::RlpInconsistentLengthAndData) } let d = &bytes[1..last_index_of]; if l == 0x81 && d[0] < 0x80 { - return Err(DecoderError::RlpInvalidIndirection); + return Err(DecoderError::RlpInvalidIndirection) } Ok(f(d)?) } else if l <= 0xbf { let len_of_len = l as usize - 0xb7; let begin_of_value = 1 as usize + len_of_len; if bytes.len() < begin_of_value { - return Err(DecoderError::RlpInconsistentLengthAndData); + return Err(DecoderError::RlpInconsistentLengthAndData) } let len = decode_usize(&bytes[1..begin_of_value])?; - let last_index_of_value = begin_of_value.checked_add(len) - .ok_or(DecoderError::RlpInvalidLength)?; + let last_index_of_value = begin_of_value.checked_add(len).ok_or(DecoderError::RlpInvalidLength)?; if bytes.len() < last_index_of_value { - return Err(DecoderError::RlpInconsistentLengthAndData); + return Err(DecoderError::RlpInconsistentLengthAndData) } Ok(f(&bytes[begin_of_value..last_index_of_value])?) } else { diff --git a/rlp/src/stream.rs b/rlp/src/stream.rs index cfddc2d95..e7c2a99e4 100644 --- a/rlp/src/stream.rs +++ b/rlp/src/stream.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license @@ -8,6 +8,7 @@ #[cfg(not(feature = "std"))] use alloc::vec::Vec; +use bytes::{BufMut, BytesMut}; use core::borrow::Borrow; use crate::traits::Encodable; @@ -21,18 +22,15 @@ struct ListInfo { impl ListInfo { fn new(position: usize, max: Option) -> ListInfo { - ListInfo { - position, - current: 0, - max, - } + ListInfo { position, current: 0, max } } } /// Appendable rlp encoder. pub struct RlpStream { unfinished_lists: Vec, - buffer: Vec, + start_pos: usize, + buffer: BytesMut, finished_list: bool, } @@ -45,36 +43,42 @@ impl Default for RlpStream { impl RlpStream { /// Initializes instance of empty `Stream`. pub fn new() -> Self { - RlpStream { - unfinished_lists: Vec::with_capacity(16), - buffer: Vec::with_capacity(1024), - finished_list: false, - } + Self::new_with_buffer(BytesMut::with_capacity(1024)) } /// Initializes the `Stream` as a list. pub fn new_list(len: usize) -> Self { - let mut stream = RlpStream::new(); + Self::new_list_with_buffer(BytesMut::with_capacity(1024), len) + } + + /// Initializes instance of empty `Stream`. + pub fn new_with_buffer(buffer: BytesMut) -> Self { + RlpStream { unfinished_lists: Vec::with_capacity(16), start_pos: buffer.len(), buffer, finished_list: false } + } + + /// Initializes the `Stream` as a list. + pub fn new_list_with_buffer(buffer: BytesMut, len: usize) -> Self { + let mut stream = RlpStream::new_with_buffer(buffer); stream.begin_list(len); stream } + fn total_written(&self) -> usize { + self.buffer.len() - self.start_pos + } + /// Apends null to the end of stream, chainable. /// - /// ```rust - /// extern crate rlp; - /// use rlp::*; - /// - /// fn main () { - /// let mut stream = RlpStream::new_list(2); - /// stream.append_empty_data().append_empty_data(); - /// let out = stream.out(); - /// assert_eq!(out, vec![0xc2, 0x80, 0x80]); - /// } + /// ``` + /// use rlp::RlpStream; + /// let mut stream = RlpStream::new_list(2); + /// stream.append_empty_data().append_empty_data(); + /// let out = stream.out(); + /// assert_eq!(out, vec![0xc2, 0x80, 0x80]); /// ``` pub fn append_empty_data(&mut self) -> &mut Self { // self push raw item - self.buffer.push(0x80); + self.buffer.put_u8(0x80); // try to finish and prepend the length self.note_appended(1); @@ -83,11 +87,6 @@ impl RlpStream { self } - /// Drain the object and return the underlying ElasticArray. Panics if it is not finished. - pub fn drain(self) -> Vec { - self.out() - } - /// Appends raw (pre-serialised) RLP data. Use with caution. Chainable. pub fn append_raw(&mut self, bytes: &[u8], item_count: usize) -> &mut Self { // push raw items @@ -102,18 +101,17 @@ impl RlpStream { /// Appends value to the end of stream, chainable. /// - /// ```rust - /// extern crate rlp; - /// use rlp::*; - /// - /// fn main () { - /// let mut stream = RlpStream::new_list(2); - /// stream.append(&"cat").append(&"dog"); - /// let out = stream.out(); - /// assert_eq!(out, vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g']); - /// } /// ``` - pub fn append(&mut self, value: &E) -> &mut Self where E: Encodable { + /// use rlp::RlpStream; + /// let mut stream = RlpStream::new_list(2); + /// stream.append(&"cat").append(&"dog"); + /// let out = stream.out(); + /// assert_eq!(out, vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g']); + /// ``` + pub fn append(&mut self, value: &E) -> &mut Self + where + E: Encodable, + { self.finished_list = false; value.rlp_append(self); if !self.finished_list { @@ -124,19 +122,16 @@ impl RlpStream { /// Appends iterator to the end of stream, chainable. /// - /// ```rust - /// extern crate rlp; - /// use rlp::*; - /// - /// fn main () { - /// let mut stream = RlpStream::new_list(2); - /// stream.append(&"cat").append_iter("dog".as_bytes().iter().cloned()); - /// let out = stream.out(); - /// assert_eq!(out, vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g']); - /// } + /// ``` + /// use rlp::RlpStream; + /// let mut stream = RlpStream::new_list(2); + /// stream.append(&"cat").append_iter("dog".as_bytes().iter().cloned()); + /// let out = stream.out(); + /// assert_eq!(out, vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g']); /// ``` pub fn append_iter(&mut self, value: I) -> &mut Self - where I: IntoIterator, + where + I: IntoIterator, { self.finished_list = false; self.encoder().encode_iter(value); @@ -147,7 +142,11 @@ impl RlpStream { } /// Appends list of values to the end of stream, chainable. - pub fn append_list(&mut self, values: &[K]) -> &mut Self where E: Encodable, K: Borrow { + pub fn append_list(&mut self, values: &[K]) -> &mut Self + where + E: Encodable, + K: Borrow, + { self.begin_list(values.len()); for value in values { self.append(value.borrow()); @@ -157,40 +156,42 @@ impl RlpStream { /// Appends value to the end of stream, but do not count it as an appended item. /// It's useful for wrapper types - pub fn append_internal(&mut self, value: &E) -> &mut Self where E: Encodable { + pub fn append_internal(&mut self, value: &E) -> &mut Self + where + E: Encodable, + { value.rlp_append(self); self } /// Declare appending the list of given size, chainable. /// - /// ```rust - /// extern crate rlp; - /// use rlp::*; - /// - /// fn main () { - /// let mut stream = RlpStream::new_list(2); - /// stream.begin_list(2).append(&"cat").append(&"dog"); - /// stream.append(&""); - /// let out = stream.out(); - /// assert_eq!(out, vec![0xca, 0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g', 0x80]); - /// } + /// ``` + /// use rlp::RlpStream; + /// let mut stream = RlpStream::new_list(2); + /// stream.begin_list(2).append(&"cat").append(&"dog"); + /// stream.append(&""); + /// let out = stream.out(); + /// assert_eq!(out, vec![0xca, 0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g', 0x80]); /// ``` pub fn begin_list(&mut self, len: usize) -> &mut RlpStream { self.finished_list = false; match len { 0 => { // we may finish, if the appended list len is equal 0 - self.buffer.push(0xc0u8); + self.buffer.put_u8(0xc0u8); self.note_appended(1); self.finished_list = true; }, _ => { // payload is longer than 1 byte only for lists > 55 bytes // by pushing always this 1 byte we may avoid unnecessary shift of data - self.buffer.push(0); + // both cases will need at least 1 byte header, so we push 1 byte + // and then, when we know the exactly size of data, the value will be updated + // accordingly in `insert_list_payload` method. + self.buffer.put_u8(0); - let position = self.buffer.len(); + let position = self.total_written(); self.unfinished_lists.push(ListInfo::new(position, Some(len))); }, } @@ -204,17 +205,17 @@ impl RlpStream { self.finished_list = false; // payload is longer than 1 byte only for lists > 55 bytes // by pushing always this 1 byte we may avoid unnecessary shift of data - self.buffer.push(0); - let position = self.buffer.len(); + self.buffer.put_u8(0); + let position = self.total_written(); self.unfinished_lists.push(ListInfo::new(position, None)); // return chainable self self } - /// Appends raw (pre-serialised) RLP data. Checks for size oveflow. + /// Appends raw (pre-serialised) RLP data. Checks for size overflow. pub fn append_raw_checked(&mut self, bytes: &[u8], item_count: usize, max_size: usize) -> bool { if self.estimate_size(bytes.len()) > max_size { - return false; + return false } self.append_raw(bytes, item_count); true @@ -222,7 +223,7 @@ impl RlpStream { /// Calculate total RLP size for appended payload. pub fn estimate_size(&self, add: usize) -> usize { - let total_size = self.buffer.len() + add; + let total_size = self.total_written() + add; let mut base_size = total_size; for list in &self.unfinished_lists[..] { let len = total_size - list.position; @@ -246,21 +247,18 @@ impl RlpStream { /// Clear the output stream so far. /// - /// ```rust - /// extern crate rlp; - /// use rlp::*; - /// - /// fn main () { - /// let mut stream = RlpStream::new_list(3); - /// stream.append(&"cat"); - /// stream.clear(); - /// stream.append(&"dog"); - /// let out = stream.out(); - /// assert_eq!(out, vec![0x83, b'd', b'o', b'g']); - /// } + /// ``` + /// use rlp::RlpStream; + /// let mut stream = RlpStream::new_list(3); + /// stream.append(&"cat"); + /// stream.clear(); + /// stream.append(&"dog"); + /// let out = stream.out(); + /// assert_eq!(out, vec![0x83, b'd', b'o', b'g']); + /// ``` pub fn clear(&mut self) { // clear bytes - self.buffer.clear(); + self.buffer.truncate(self.start_pos); // clear lists self.unfinished_lists.clear(); @@ -268,19 +266,16 @@ impl RlpStream { /// Returns true if stream doesnt expect any more items. /// - /// ```rust - /// extern crate rlp; - /// use rlp::*; - /// - /// fn main () { - /// let mut stream = RlpStream::new_list(2); - /// stream.append(&"cat"); - /// assert_eq!(stream.is_finished(), false); - /// stream.append(&"dog"); - /// assert_eq!(stream.is_finished(), true); - /// let out = stream.out(); - /// assert_eq!(out, vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g']); - /// } + /// ``` + /// use rlp::RlpStream; + /// let mut stream = RlpStream::new_list(2); + /// stream.append(&"cat"); + /// assert_eq!(stream.is_finished(), false); + /// stream.append(&"dog"); + /// assert_eq!(stream.is_finished(), true); + /// let out = stream.out(); + /// assert_eq!(out, vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g']); + /// ``` pub fn is_finished(&self) -> bool { self.unfinished_lists.is_empty() } @@ -294,7 +289,7 @@ impl RlpStream { /// Streams out encoded bytes. /// /// panic! if stream is not finished. - pub fn out(self) -> Vec { + pub fn out(self) -> BytesMut { if self.is_finished() { self.buffer } else { @@ -305,7 +300,7 @@ impl RlpStream { /// Try to finish lists fn note_appended(&mut self, inserted_items: usize) { if self.unfinished_lists.is_empty() { - return; + return } let back = self.unfinished_lists.len() - 1; @@ -314,15 +309,15 @@ impl RlpStream { Some(ref mut x) => { x.current += inserted_items; match x.max { - Some(ref max) if x.current > *max => panic!("You cannot append more items then you expect!"), + Some(ref max) if x.current > *max => panic!("You cannot append more items than you expect!"), Some(ref max) => x.current == *max, _ => false, } - } + }, }; if should_finish { let x = self.unfinished_lists.pop().unwrap(); - let len = self.buffer.len() - x.position; + let len = self.total_written() - x.position; self.encoder().insert_list_payload(len, x.position); self.note_appended(1); } @@ -330,16 +325,16 @@ impl RlpStream { } pub fn encoder(&mut self) -> BasicEncoder { - BasicEncoder::new(self) + BasicEncoder::new(self, self.start_pos) } /// Finalize current unbounded list. Panics if no unbounded list has been opened. - pub fn complete_unbounded_list(&mut self) { + pub fn finalize_unbounded_list(&mut self) { let list = self.unfinished_lists.pop().expect("No open list."); if list.max.is_some() { panic!("List type mismatch."); } - let len = self.buffer.len() - list.position; + let len = self.total_written() - list.position; self.encoder().insert_list_payload(len, list.position); self.note_appended(1); self.finished_list = true; @@ -347,14 +342,17 @@ impl RlpStream { } pub struct BasicEncoder<'a> { - buffer: &'a mut Vec, + buffer: &'a mut BytesMut, + start_pos: usize, } impl<'a> BasicEncoder<'a> { - fn new(stream: &'a mut RlpStream) -> Self { - BasicEncoder { - buffer: &mut stream.buffer - } + fn new(stream: &'a mut RlpStream, start_pos: usize) -> Self { + BasicEncoder { buffer: &mut stream.buffer, start_pos } + } + + fn total_written(&self) -> usize { + self.buffer.len() - self.start_pos } fn insert_size(&mut self, size: usize, position: usize) -> u8 { @@ -362,10 +360,10 @@ impl<'a> BasicEncoder<'a> { let leading_empty_bytes = size.leading_zeros() as usize / 8; let size_bytes = 4 - leading_empty_bytes as u8; let buffer: [u8; 4] = size.to_be_bytes(); - assert!(position <= self.buffer.len()); + assert!(position <= self.total_written()); self.buffer.extend_from_slice(&buffer[leading_empty_bytes..]); - self.buffer[position..].rotate_right(size_bytes as usize); + self.buffer[self.start_pos + position..].rotate_right(size_bytes as usize); size_bytes as u8 } @@ -374,12 +372,12 @@ impl<'a> BasicEncoder<'a> { // 1 byte was already reserved for payload earlier match len { 0..=55 => { - self.buffer[pos - 1] = 0xc0u8 + len as u8; + self.buffer[self.start_pos + pos - 1] = 0xc0u8 + len as u8; }, _ => { let inserted_bytes = self.insert_size(len, pos); - self.buffer[pos - 1] = 0xf7u8 + inserted_bytes; - } + self.buffer[self.start_pos + pos - 1] = 0xf7u8 + inserted_bytes; + }, }; } @@ -389,39 +387,40 @@ impl<'a> BasicEncoder<'a> { /// Pushes encoded value to the end of buffer pub fn encode_iter(&mut self, value: I) - where I: IntoIterator, + where + I: IntoIterator, { let mut value = value.into_iter(); let len = match value.size_hint() { (lower, Some(upper)) if lower == upper => lower, _ => { let value = value.collect::>(); - return self.encode_iter(value); - } + return self.encode_iter(value) + }, }; match len { // just 0 - 0 => self.buffer.push(0x80u8), + 0 => self.buffer.put_u8(0x80u8), len @ 1..=55 => { let first = value.next().expect("iterator length is higher than 1"); if len == 1 && first < 0x80 { // byte is its own encoding if < 0x80 - self.buffer.push(first); + self.buffer.put_u8(first); } else { // (prefix + length), followed by the string - self.buffer.push(0x80u8 + len as u8); - self.buffer.push(first); + self.buffer.put_u8(0x80u8 + len as u8); + self.buffer.put_u8(first); self.buffer.extend(value); } - } + }, // (prefix + length of length), followed by the length, followd by the string len => { - self.buffer.push(0); - let position = self.buffer.len(); + self.buffer.put_u8(0); + let position = self.total_written(); let inserted_bytes = self.insert_size(len, position); - self.buffer[position - 1] = 0xb7 + inserted_bytes; + self.buffer[self.start_pos + position - 1] = 0xb7 + inserted_bytes; self.buffer.extend(value); - } + }, } } } diff --git a/rlp/src/traits.rs b/rlp/src/traits.rs index 13531a1b6..b83b2ddcb 100644 --- a/rlp/src/traits.rs +++ b/rlp/src/traits.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license @@ -7,12 +7,9 @@ // except according to those terms. //! Common RLP traits -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; +use bytes::BytesMut; -use crate::error::DecoderError; -use crate::rlpin::Rlp; -use crate::stream::RlpStream; +use crate::{error::DecoderError, rlpin::Rlp, stream::RlpStream}; /// RLP decodable trait pub trait Decodable: Sized { @@ -26,9 +23,9 @@ pub trait Encodable { fn rlp_append(&self, s: &mut RlpStream); /// Get rlp-encoded bytes for this instance - fn rlp_bytes(&self) -> Vec { + fn rlp_bytes(&self) -> BytesMut { let mut s = RlpStream::new(); self.rlp_append(&mut s); - s.drain() + s.out() } } diff --git a/rlp/tests/tests.rs b/rlp/tests/tests.rs index ba5a423b7..c5224fd5d 100644 --- a/rlp/tests/tests.rs +++ b/rlp/tests/tests.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license @@ -6,11 +6,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use core::{fmt, cmp}; +use core::{cmp, fmt}; +use bytes::{Bytes, BytesMut}; use hex_literal::hex; use primitive_types::{H160, U256}; -use rlp::{Encodable, Decodable, Rlp, RlpStream, DecoderError}; +use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; #[test] fn test_rlp_display() { @@ -21,7 +22,10 @@ fn test_rlp_display() { #[test] fn length_overflow() { - let bs = [0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe5]; + #[cfg(target_pointer_width = "64")] + let bs = hex!("bfffffffffffffffffffffffe5"); + #[cfg(target_pointer_width = "32")] + let bs = hex!("bbffffffffffffffe5"); let rlp = Rlp::new(&bs); let res: Result = rlp.as_val(); assert_eq!(Err(DecoderError::RlpInvalidLength), res); @@ -53,6 +57,35 @@ fn rlp_at() { } } +#[test] +fn rlp_at_with_offset() { + let data = vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g']; + { + let rlp = Rlp::new(&data); + assert!(rlp.is_list()); + let animals: Vec = rlp.as_list().unwrap(); + assert_eq!(animals, vec!["cat".to_owned(), "dog".to_owned()]); + + let (cat, cat_offset) = rlp.at_with_offset(0).unwrap(); + assert!(cat.is_data()); + assert_eq!(cat_offset, 1); + assert_eq!(cat.as_raw(), &[0x83, b'c', b'a', b't']); + assert_eq!(cat.as_val::().unwrap(), "cat".to_owned()); + + let (dog, dog_offset) = rlp.at_with_offset(1).unwrap(); + assert!(dog.is_data()); + assert_eq!(dog_offset, 5); + assert_eq!(dog.as_raw(), &[0x83, b'd', b'o', b'g']); + assert_eq!(dog.as_val::().unwrap(), "dog".to_owned()); + + let (cat_again, cat_offset) = rlp.at_with_offset(0).unwrap(); + assert!(cat_again.is_data()); + assert_eq!(cat_offset, 1); + assert_eq!(cat_again.as_raw(), &[0x83, b'c', b'a', b't']); + assert_eq!(cat_again.as_val::().unwrap(), "cat".to_owned()); + } +} + #[test] fn rlp_at_err() { let data = vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o']; @@ -75,14 +108,20 @@ fn rlp_iter() { let rlp = Rlp::new(&data); let mut iter = rlp.iter(); + assert_eq!(iter.len(), 2); + let cat = iter.next().unwrap(); assert!(cat.is_data()); assert_eq!(cat.as_raw(), &[0x83, b'c', b'a', b't']); + assert_eq!(iter.len(), 1); + let dog = iter.next().unwrap(); assert!(dog.is_data()); assert_eq!(dog.as_raw(), &[0x83, b'd', b'o', b'g']); + assert_eq!(iter.len(), 0); + let none = iter.next(); assert!(none.is_none()); @@ -92,10 +131,13 @@ fn rlp_iter() { } } -struct ETestPair(T, Vec) where T: Encodable; +struct ETestPair(T, Vec) +where + T: Encodable; fn run_encode_tests(tests: Vec>) - where T: Encodable +where + T: Encodable, { for t in &tests { let res = rlp::encode(&t.0); @@ -103,10 +145,13 @@ fn run_encode_tests(tests: Vec>) } } -struct VETestPair(Vec, Vec) where T: Encodable; +struct VETestPair(Vec, Vec) +where + T: Encodable; fn run_encode_tests_list(tests: Vec>) - where T: Encodable +where + T: Encodable, { for t in &tests { let res = rlp::encode_list(&t.0); @@ -114,12 +159,32 @@ fn run_encode_tests_list(tests: Vec>) } } +impl From<(T, Repr)> for ETestPair +where + T: Encodable, + Repr: Into>, +{ + fn from((v, repr): (T, Repr)) -> Self { + Self(v, repr.into()) + } +} + +impl From<(Vec, Repr)> for VETestPair +where + T: Encodable, + Repr: Into>, +{ + fn from((v, repr): (Vec, Repr)) -> Self { + Self(v, repr.into()) + } +} + #[test] fn encode_u16() { let tests = vec![ - ETestPair(0u16, vec![0x80u8]), - ETestPair(0x100, vec![0x82, 0x01, 0x00]), - ETestPair(0xffff, vec![0x82, 0xff, 0xff]), + ETestPair::from((0_u16, hex!("80"))), + ETestPair::from((0x100_u16, hex!("820100"))), + ETestPair::from((0xffff_u16, hex!("82ffff"))), ]; run_encode_tests(tests); } @@ -127,9 +192,9 @@ fn encode_u16() { #[test] fn encode_u32() { let tests = vec![ - ETestPair(0u32, vec![0x80u8]), - ETestPair(0x0001_0000, vec![0x83, 0x01, 0x00, 0x00]), - ETestPair(0x00ff_ffff, vec![0x83, 0xff, 0xff, 0xff]), + ETestPair::from((0_u32, hex!("80"))), + ETestPair::from((0x0001_0000_u32, hex!("83010000"))), + ETestPair::from((0x00ff_ffff_u32, hex!("83ffffff"))), ]; run_encode_tests(tests); } @@ -137,53 +202,87 @@ fn encode_u32() { #[test] fn encode_u64() { let tests = vec![ - ETestPair(0u64, vec![0x80u8]), - ETestPair(0x0100_0000, vec![0x84, 0x01, 0x00, 0x00, 0x00]), - ETestPair(0xFFFF_FFFF, vec![0x84, 0xff, 0xff, 0xff, 0xff]), + ETestPair::from((0_u64, hex!("80"))), + ETestPair::from((0x0100_0000_u64, hex!("8401000000"))), + ETestPair::from((0xFFFF_FFFF_u64, hex!("84ffffffff"))), + ]; + run_encode_tests(tests); +} + +#[test] +fn encode_u128() { + let tests = vec![ + ETestPair::from((0_u128, hex!("80"))), + ETestPair::from((0x0100_0000_0000_0000_u128, hex!("880100000000000000"))), + ETestPair::from((0xFFFF_FFFF_FFFF_FFFF_u128, hex!("88ffffffffffffffff"))), ]; run_encode_tests(tests); } #[test] fn encode_u256() { - let tests = vec![ETestPair(U256::from(0u64), vec![0x80u8]), - ETestPair(U256::from(0x0100_0000u64), vec![0x84, 0x01, 0x00, 0x00, 0x00]), - ETestPair(U256::from(0xffff_ffffu64), - vec![0x84, 0xff, 0xff, 0xff, 0xff]), - ETestPair(("8090a0b0c0d0e0f00910203040506077000000000000\ - 000100000000000012f0").into(), - vec![0xa0, 0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, - 0x09, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x77, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x12, 0xf0])]; + let tests = vec![ + ETestPair::from((U256::from(0_u64), hex!("80"))), + ETestPair::from((U256::from(0x0100_0000_u64), hex!("8401000000"))), + ETestPair::from((U256::from(0xffff_ffff_u64), hex!("84ffffffff"))), + ETestPair::from(( + U256::from_big_endian(&hex!(" 8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0")), + hex!("a08090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0"), + )), + ]; run_encode_tests(tests); } #[test] fn encode_str() { - let tests = vec![ETestPair("cat", vec![0x83, b'c', b'a', b't']), - ETestPair("dog", vec![0x83, b'd', b'o', b'g']), - ETestPair("Marek", vec![0x85, b'M', b'a', b'r', b'e', b'k']), - ETestPair("", vec![0x80]), - ETestPair("Lorem ipsum dolor sit amet, consectetur adipisicing elit", - vec![0xb8, 0x38, b'L', b'o', b'r', b'e', b'm', b' ', b'i', - b'p', b's', b'u', b'm', b' ', b'd', b'o', b'l', b'o', - b'r', b' ', b's', b'i', b't', b' ', b'a', b'm', b'e', - b't', b',', b' ', b'c', b'o', b'n', b's', b'e', b'c', - b't', b'e', b't', b'u', b'r', b' ', b'a', b'd', b'i', - b'p', b'i', b's', b'i', b'c', b'i', b'n', b'g', b' ', - b'e', b'l', b'i', b't'])]; + let tests = vec![ + ETestPair::from(("cat", vec![0x83, b'c', b'a', b't'])), + ETestPair::from(("dog", vec![0x83, b'd', b'o', b'g'])), + ETestPair::from(("Marek", vec![0x85, b'M', b'a', b'r', b'e', b'k'])), + ETestPair::from(("", hex!("80"))), + ETestPair::from(( + "Lorem ipsum dolor sit amet, consectetur adipisicing elit", + vec![ + 0xb8, 0x38, b'L', b'o', b'r', b'e', b'm', b' ', b'i', b'p', b's', b'u', b'm', b' ', b'd', b'o', b'l', + b'o', b'r', b' ', b's', b'i', b't', b' ', b'a', b'm', b'e', b't', b',', b' ', b'c', b'o', b'n', b's', + b'e', b'c', b't', b'e', b't', b'u', b'r', b' ', b'a', b'd', b'i', b'p', b'i', b's', b'i', b'c', b'i', + b'n', b'g', b' ', b'e', b'l', b'i', b't', + ], + )), + ]; run_encode_tests(tests); } +#[test] +fn encode_into_existing_buffer() { + let mut buffer = BytesMut::new(); + buffer.extend_from_slice(b"junk"); + + let mut split_buffer = buffer.split_off(buffer.len()); + split_buffer.extend_from_slice(b"!"); + + let mut s = RlpStream::new_with_buffer(split_buffer); + s.append(&"cat"); + buffer.unsplit(s.out()); + + buffer.extend_from_slice(b" and "); + + let mut s = RlpStream::new_with_buffer(buffer); + s.append(&"dog"); + let buffer = s.out(); + + assert_eq!( + &buffer[..], + &[b'j', b'u', b'n', b'k', b'!', 0x83, b'c', b'a', b't', b' ', b'a', b'n', b'd', b' ', 0x83, b'd', b'o', b'g'] + ); +} + #[test] fn encode_address() { - let tests = vec![ - ETestPair(H160::from(hex!("ef2d6d194084c2de36e0dabfce45d046b37d1106")), - vec![0x94, 0xef, 0x2d, 0x6d, 0x19, 0x40, 0x84, 0xc2, 0xde, - 0x36, 0xe0, 0xda, 0xbf, 0xce, 0x45, 0xd0, 0x46, - 0xb3, 0x7d, 0x11, 0x06]) - ]; + let tests = vec![ETestPair::from(( + H160::from(hex!("ef2d6d194084c2de36e0dabfce45d046b37d1106")), + hex!("94ef2d6d194084c2de36e0dabfce45d046b37d1106"), + ))]; run_encode_tests(tests); } @@ -191,10 +290,32 @@ fn encode_address() { #[test] fn encode_vector_u8() { let tests = vec![ - ETestPair(vec![], vec![0x80]), - ETestPair(vec![0u8], vec![0]), - ETestPair(vec![0x15], vec![0x15]), - ETestPair(vec![0x40, 0x00], vec![0x82, 0x40, 0x00]), + ETestPair::from((vec![], hex!("80"))), + ETestPair::from((vec![0u8], hex!("00"))), + ETestPair::from((vec![0x15], hex!("15"))), + ETestPair::from((vec![0x40, 0x00], hex!("824000"))), + ]; + run_encode_tests(tests); +} + +#[test] +fn encode_bytes() { + let tests = vec![ + ETestPair::from((Bytes::from_static(&hex!("")), hex!("80"))), + ETestPair::from((Bytes::from_static(&hex!("00")), hex!("00"))), + ETestPair::from((Bytes::from_static(&hex!("15")), hex!("15"))), + ETestPair::from((Bytes::from_static(&hex!("4000")), hex!("824000"))), + ]; + run_encode_tests(tests); +} + +#[test] +fn encode_bytesmut() { + let tests = vec![ + ETestPair::from((BytesMut::from(&[] as &[u8]), hex!("80"))), + ETestPair::from((BytesMut::from(&hex!("00") as &[u8]), hex!("00"))), + ETestPair::from((BytesMut::from(&hex!("15") as &[u8]), hex!("15"))), + ETestPair::from((BytesMut::from(&hex!("4000") as &[u8]), hex!("824000"))), ]; run_encode_tests(tests); } @@ -202,156 +323,223 @@ fn encode_vector_u8() { #[test] fn encode_vector_u64() { let tests = vec![ - VETestPair(vec![], vec![0xc0]), - VETestPair(vec![15u64], vec![0xc1, 0x0f]), - VETestPair(vec![1, 2, 3, 7, 0xff], vec![0xc6, 1, 2, 3, 7, 0x81, 0xff]), - VETestPair(vec![0xffff_ffff, 1, 2, 3, 7, 0xff], vec![0xcb, 0x84, 0xff, 0xff, 0xff, 0xff, 1, 2, 3, 7, 0x81, 0xff]), + VETestPair::from((vec![], hex!("c0"))), + VETestPair::from((vec![15_u64], hex!("c10f"))), + VETestPair::from((vec![1, 2, 3, 7, 0xff], hex!("c60102030781ff"))), + VETestPair::from((vec![0xffff_ffff, 1, 2, 3, 7, 0xff], hex!("cb84ffffffff0102030781ff"))), ]; run_encode_tests_list(tests); } #[test] fn encode_vector_str() { - let tests = vec![VETestPair(vec!["cat", "dog"], - vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g'])]; + let tests = vec![VETestPair(vec!["cat", "dog"], vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g'])]; run_encode_tests_list(tests); } -struct DTestPair(T, Vec) where T: Decodable + fmt::Debug + cmp::Eq; +#[test] +fn clear() { + let mut buffer = BytesMut::new(); + buffer.extend_from_slice(b"junk"); + + let mut s = RlpStream::new_with_buffer(buffer); + s.append(&"parrot"); + s.clear(); + s.append(&"cat"); + + assert_eq!(&s.out()[..], &[b'j', b'u', b'n', b'k', 0x83, b'c', b'a', b't']); +} -struct VDTestPair(Vec, Vec) where T: Decodable + fmt::Debug + cmp::Eq; +struct DTestPair(T, Vec) +where + T: Decodable + fmt::Debug + cmp::Eq; -fn run_decode_tests(tests: Vec>) where T: Decodable + fmt::Debug + cmp::Eq { +struct VDTestPair(Vec, Vec) +where + T: Decodable + fmt::Debug + cmp::Eq; + +fn run_decode_tests(tests: Vec>) +where + T: Decodable + fmt::Debug + cmp::Eq, +{ for t in &tests { - let res : Result = rlp::decode(&t.1); + let res: Result = rlp::decode(&t.1); assert!(res.is_ok()); let res = res.unwrap(); assert_eq!(&res, &t.0); } } -fn run_decode_tests_list(tests: Vec>) where T: Decodable + fmt::Debug + cmp::Eq { +fn run_decode_tests_list(tests: Vec>) +where + T: Decodable + fmt::Debug + cmp::Eq, +{ for t in &tests { let res: Vec = rlp::decode_list(&t.1); assert_eq!(res, t.0); } } +impl From<(T, Repr)> for DTestPair +where + T: Decodable + fmt::Debug + cmp::Eq, + Repr: Into>, +{ + fn from((v, repr): (T, Repr)) -> Self { + Self(v, repr.into()) + } +} + +impl From<(Vec, Repr)> for VDTestPair +where + T: Decodable + fmt::Debug + cmp::Eq, + Repr: Into>, +{ + fn from((v, repr): (Vec, Repr)) -> Self { + Self(v, repr.into()) + } +} + /// Vec (Bytes) is treated as a single value #[test] fn decode_vector_u8() { let tests = vec![ - DTestPair(vec![], vec![0x80]), - DTestPair(vec![0u8], vec![0]), - DTestPair(vec![0x15], vec![0x15]), - DTestPair(vec![0x40, 0x00], vec![0x82, 0x40, 0x00]), + DTestPair::from((vec![], hex!("80"))), + DTestPair::from((vec![0_u8], hex!("00"))), + DTestPair::from((vec![0x15], hex!("15"))), + DTestPair::from((vec![0x40, 0x00], hex!("824000"))), ]; run_decode_tests(tests); } #[test] -fn decode_untrusted_u8() { +fn decode_bytes() { let tests = vec![ - DTestPair(0x0u8, vec![0x80]), - DTestPair(0x77u8, vec![0x77]), - DTestPair(0xccu8, vec![0x81, 0xcc]), + DTestPair::from((Bytes::from_static(&hex!("")), hex!("80"))), + DTestPair::from((Bytes::from_static(&hex!("00")), hex!("00"))), + DTestPair::from((Bytes::from_static(&hex!("15")), hex!("15"))), + DTestPair::from((Bytes::from_static(&hex!("4000")), hex!("824000"))), ]; run_decode_tests(tests); } #[test] -fn decode_untrusted_u16() { +fn decode_bytesmut() { let tests = vec![ - DTestPair(0x100u16, vec![0x82, 0x01, 0x00]), - DTestPair(0xffffu16, vec![0x82, 0xff, 0xff]), + DTestPair::from((BytesMut::from(&hex!("") as &[u8]), hex!("80"))), + DTestPair::from((BytesMut::from(&hex!("00") as &[u8]), hex!("00"))), + DTestPair::from((BytesMut::from(&hex!("15") as &[u8]), hex!("15"))), + DTestPair::from((BytesMut::from(&hex!("4000") as &[u8]), hex!("824000"))), ]; run_decode_tests(tests); } #[test] -fn decode_untrusted_u32() { +fn decode_untrusted_u8() { let tests = vec![ - DTestPair(0x0001_0000u32, vec![0x83, 0x01, 0x00, 0x00]), - DTestPair(0x00ff_ffffu32, vec![0x83, 0xff, 0xff, 0xff]), + DTestPair::from((0x0_u8, hex!("80"))), + DTestPair::from((0x77_u8, hex!("77"))), + DTestPair::from((0xcc_u8, hex!("81cc"))), ]; run_decode_tests(tests); } +#[test] +fn decode_untrusted_u16() { + let tests = vec![DTestPair::from((0x100u16, hex!("820100"))), DTestPair::from((0xffffu16, hex!("82ffff")))]; + run_decode_tests(tests); +} + +#[test] +fn decode_untrusted_u32() { + let tests = + vec![DTestPair::from((0x0001_0000u32, hex!("83010000"))), DTestPair::from((0x00ff_ffffu32, hex!("83ffffff")))]; + run_decode_tests(tests); +} + #[test] fn decode_untrusted_u64() { let tests = vec![ - DTestPair(0x0100_0000u64, vec![0x84, 0x01, 0x00, 0x00, 0x00]), - DTestPair(0xFFFF_FFFFu64, vec![0x84, 0xff, 0xff, 0xff, 0xff]), + DTestPair::from((0x0100_0000_u64, hex!("8401000000"))), + DTestPair::from((0xFFFF_FFFF_u64, hex!("84ffffffff"))), + ]; + run_decode_tests(tests); +} + +#[test] +fn decode_untrusted_u128() { + let tests = vec![ + DTestPair::from((0x0100_0000_0000_0000_u128, hex!("880100000000000000"))), + DTestPair::from((0xFFFF_FFFF_FFFF_FFFF_u128, hex!("88ffffffffffffffff"))), ]; run_decode_tests(tests); } #[test] fn decode_untrusted_u256() { - let tests = vec![DTestPair(U256::from(0u64), vec![0x80u8]), - DTestPair(U256::from(0x0100_0000u64), vec![0x84, 0x01, 0x00, 0x00, 0x00]), - DTestPair(U256::from(0xffff_ffffu64), - vec![0x84, 0xff, 0xff, 0xff, 0xff]), - DTestPair(("8090a0b0c0d0e0f00910203040506077000000000000\ - 000100000000000012f0").into(), - vec![0xa0, 0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, - 0x09, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x77, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x12, 0xf0])]; + let tests = vec![ + DTestPair::from((U256::from(0_u64), hex!("80"))), + DTestPair::from((U256::from(0x0100_0000_u64), hex!("8401000000"))), + DTestPair::from((U256::from(0xffff_ffff_u64), hex!("84ffffffff"))), + DTestPair::from(( + U256::from_big_endian(&hex!(" 8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0")), + hex!("a08090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0"), + )), + ]; run_decode_tests(tests); } #[test] fn decode_untrusted_str() { - let tests = vec![DTestPair("cat".to_owned(), vec![0x83, b'c', b'a', b't']), - DTestPair("dog".to_owned(), vec![0x83, b'd', b'o', b'g']), - DTestPair("Marek".to_owned(), - vec![0x85, b'M', b'a', b'r', b'e', b'k']), - DTestPair("".to_owned(), vec![0x80]), - DTestPair("Lorem ipsum dolor sit amet, consectetur adipisicing elit" - .to_owned(), - vec![0xb8, 0x38, b'L', b'o', b'r', b'e', b'm', b' ', b'i', - b'p', b's', b'u', b'm', b' ', b'd', b'o', b'l', b'o', - b'r', b' ', b's', b'i', b't', b' ', b'a', b'm', b'e', - b't', b',', b' ', b'c', b'o', b'n', b's', b'e', b'c', - b't', b'e', b't', b'u', b'r', b' ', b'a', b'd', b'i', - b'p', b'i', b's', b'i', b'c', b'i', b'n', b'g', b' ', - b'e', b'l', b'i', b't'])]; + let tests = vec![ + DTestPair::from(("cat".to_owned(), vec![0x83, b'c', b'a', b't'])), + DTestPair::from(("dog".to_owned(), vec![0x83, b'd', b'o', b'g'])), + DTestPair::from(("Marek".to_owned(), vec![0x85, b'M', b'a', b'r', b'e', b'k'])), + DTestPair::from(("".to_owned(), hex!("80"))), + DTestPair::from(( + "Lorem ipsum dolor sit amet, consectetur adipisicing elit".to_owned(), + vec![ + 0xb8, 0x38, b'L', b'o', b'r', b'e', b'm', b' ', b'i', b'p', b's', b'u', b'm', b' ', b'd', b'o', b'l', + b'o', b'r', b' ', b's', b'i', b't', b' ', b'a', b'm', b'e', b't', b',', b' ', b'c', b'o', b'n', b's', + b'e', b'c', b't', b'e', b't', b'u', b'r', b' ', b'a', b'd', b'i', b'p', b'i', b's', b'i', b'c', b'i', + b'n', b'g', b' ', b'e', b'l', b'i', b't', + ], + )), + ]; run_decode_tests(tests); } #[test] fn decode_untrusted_address() { - let tests = vec![ - DTestPair(H160::from(hex!("ef2d6d194084c2de36e0dabfce45d046b37d1106")), - vec![0x94, 0xef, 0x2d, 0x6d, 0x19, 0x40, 0x84, 0xc2, 0xde, - 0x36, 0xe0, 0xda, 0xbf, 0xce, 0x45, 0xd0, 0x46, - 0xb3, 0x7d, 0x11, 0x06]) - ]; + let tests = vec![DTestPair::from(( + H160::from(hex!("ef2d6d194084c2de36e0dabfce45d046b37d1106")), + hex!("94ef2d6d194084c2de36e0dabfce45d046b37d1106"), + ))]; run_decode_tests(tests); } #[test] fn decode_untrusted_vector_u64() { let tests = vec![ - VDTestPair(vec![], vec![0xc0]), - VDTestPair(vec![15u64], vec![0xc1, 0x0f]), - VDTestPair(vec![1, 2, 3, 7, 0xff], vec![0xc6, 1, 2, 3, 7, 0x81, 0xff]), - VDTestPair(vec![0xffff_ffff, 1, 2, 3, 7, 0xff], vec![0xcb, 0x84, 0xff, 0xff, 0xff, 0xff, 1, 2, 3, 7, 0x81, 0xff]), + VDTestPair::from((vec![], hex!("c0"))), + VDTestPair::from((vec![15_u64], hex!("c10f"))), + VDTestPair::from((vec![1, 2, 3, 7, 0xff], hex!("c60102030781ff"))), + VDTestPair::from((vec![0xffff_ffff, 1, 2, 3, 7, 0xff], hex!("cb84ffffffff0102030781ff"))), ]; run_decode_tests_list(tests); } #[test] fn decode_untrusted_vector_str() { - let tests = vec![VDTestPair(vec!["cat".to_owned(), "dog".to_owned()], - vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g'])]; + let tests = vec![VDTestPair( + vec!["cat".to_owned(), "dog".to_owned()], + vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g'], + )]; run_decode_tests_list(tests); } #[test] -fn test_rlp_data_length_check() -{ +fn test_rlp_data_length_check() { let data = vec![0x84, b'c', b'a', b't']; let rlp = Rlp::new(&data); @@ -360,9 +548,8 @@ fn test_rlp_data_length_check() } #[test] -fn test_rlp_long_data_length_check() -{ - let mut data: Vec = vec![0xb8, 255]; +fn test_rlp_long_data_length_check() { + let mut data = hex!("b8ff").to_vec(); for _ in 0..253 { data.push(b'c'); } @@ -374,9 +561,8 @@ fn test_rlp_long_data_length_check() } #[test] -fn test_the_exact_long_string() -{ - let mut data: Vec = vec![0xb8, 255]; +fn test_the_exact_long_string() { + let mut data = hex!("b8ff").to_vec(); for _ in 0..255 { data.push(b'c'); } @@ -388,9 +574,8 @@ fn test_the_exact_long_string() } #[test] -fn test_rlp_2bytes_data_length_check() -{ - let mut data: Vec = vec![0xb9, 2, 255]; // 512+255 +fn test_rlp_2bytes_data_length_check() { + let mut data = hex!("b902ff").to_vec(); // 512+255 for _ in 0..700 { data.push(b'c'); } @@ -405,13 +590,16 @@ fn test_rlp_2bytes_data_length_check() fn test_rlp_nested_empty_list_encode() { let mut stream = RlpStream::new_list(2); stream.append_list(&(Vec::new() as Vec)); - stream.append(&40u32); - assert_eq!(stream.drain()[..], [0xc2u8, 0xc0u8, 40u8][..]); + stream.append(&0x28_u32); + assert_eq!(stream.out()[..], hex!("c2c028")[..]); } #[test] fn test_rlp_list_length_overflow() { - let data: Vec = vec![0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00]; + #[cfg(target_pointer_width = "64")] + let data = hex!("ffffffffffffffffff000000"); + #[cfg(target_pointer_width = "32")] + let data = hex!("fbffffffff000000"); let rlp = Rlp::new(&data); let as_val: Result = rlp.val_at(0); assert_eq!(Err(DecoderError::RlpIsTooShort), as_val); @@ -419,11 +607,11 @@ fn test_rlp_list_length_overflow() { #[test] fn test_rlp_stream_size_limit() { - for limit in 40 .. 270 { + for limit in 40..270 { let item = [0u8; 1]; let mut stream = RlpStream::new(); while stream.append_raw_checked(&item, 1, limit) {} - assert_eq!(stream.drain().len(), limit); + assert_eq!(stream.out().len(), limit); } } @@ -434,7 +622,7 @@ fn test_rlp_stream_unbounded_list() { stream.append(&40u32); stream.append(&41u32); assert!(!stream.is_finished()); - stream.complete_unbounded_list(); + stream.finalize_unbounded_list(); assert!(stream.is_finished()); } @@ -443,10 +631,19 @@ fn test_rlp_is_int() { for b in 0xb8..0xc0 { let data: Vec = vec![b]; let rlp = Rlp::new(&data); - assert_eq!(rlp.is_int(), false); + assert!(!rlp.is_int()); } } +#[test] +fn test_bool_same_as_int() { + assert_eq!(rlp::encode(&false), rlp::encode(&0x00u8)); + assert_eq!(rlp::encode(&true), rlp::encode(&0x01u8)); + let two = rlp::encode(&0x02u8); + let invalid: Result = rlp::decode(&two); + invalid.unwrap_err(); +} + // test described in // // https://github.com/paritytech/parity-common/issues/49 @@ -504,12 +701,12 @@ fn test_nested_list_roundtrip() { s.begin_unbounded_list() .append(&self.0) .append(&self.1) - .complete_unbounded_list(); + .finalize_unbounded_list(); } } impl Decodable for Inner { - fn decode(rlp: &Rlp) -> Result { + fn decode(rlp: &Rlp<'_>) -> Result { Ok(Inner(rlp.val_at(0)?, rlp.val_at(1)?)) } } @@ -519,19 +716,16 @@ fn test_nested_list_roundtrip() { impl Encodable for Nest { fn rlp_append(&self, s: &mut RlpStream) { - s.begin_unbounded_list() - .append_list(&self.0) - .complete_unbounded_list(); + s.begin_unbounded_list().append_list(&self.0).finalize_unbounded_list(); } } impl Decodable for Nest { - fn decode(rlp: &Rlp) -> Result { + fn decode(rlp: &Rlp<'_>) -> Result { Ok(Nest(rlp.list_at(0)?)) } } - let items = (0..4).map(|i| Inner(i, i + 1)).collect(); let nest = Nest(items); diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 000000000..4c2c9e8d8 --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,23 @@ +# https://github.com/paritytech/substrate/blob/master/rustfmt.toml +# Basic +hard_tabs = true +max_width = 120 +use_small_heuristics = "Max" +# Imports +imports_granularity = "Crate" +reorder_imports = true +# Consistency +newline_style = "Unix" +normalize_comments = true +normalize_doc_attributes = true +# Misc +chain_width = 80 +spaces_around_ranges = false +binop_separator = "Back" +reorder_impl_items = false +match_arm_leading_pipes = "Preserve" +match_arm_blocks = false +match_block_trailing_comma = true +trailing_comma = "Vertical" +trailing_semicolon = false +use_field_init_shorthand = true diff --git a/trace-time/Cargo.toml b/trace-time/Cargo.toml deleted file mode 100644 index 383d5d3cc..000000000 --- a/trace-time/Cargo.toml +++ /dev/null @@ -1,10 +0,0 @@ -[package] -name = "trace-time" -description = "Easily trace time to execute a scope." -version = "0.1.1" -authors = ["Parity Technologies "] -repository = "https://github.com/paritytech/parity-common" -license = "GPL-3.0" - -[dependencies] -log = "0.4" diff --git a/trace-time/src/lib.rs b/trace-time/src/lib.rs deleted file mode 100644 index 4c3b0b274..000000000 --- a/trace-time/src/lib.rs +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Performance timer with logging - -#[macro_use] -extern crate log; - -use std::time::Instant; - -#[macro_export] -macro_rules! trace_time { - ($name: expr) => { - let _timer = $crate::PerfTimer::new($name); - } -} - -/// Performance timer with logging. Starts measuring time in the constructor, prints -/// elapsed time in the destructor or when `stop` is called. -pub struct PerfTimer { - name: &'static str, - start: Instant, -} - -impl PerfTimer { - /// Create an instance with given name. - pub fn new(name: &'static str) -> PerfTimer { - PerfTimer { - name, - start: Instant::now(), - } - } -} - -impl Drop for PerfTimer { - fn drop(&mut self) { - let elapsed = self.start.elapsed(); - let ms = elapsed.subsec_nanos() as f32 / 1_000_000.0 + - elapsed.as_secs() as f32 * 1_000.0; - trace!(target: "perf", "{}: {:.2}ms", self.name, ms); - } -} diff --git a/transaction-pool/Cargo.toml b/transaction-pool/Cargo.toml deleted file mode 100644 index 0a2a213fb..000000000 --- a/transaction-pool/Cargo.toml +++ /dev/null @@ -1,16 +0,0 @@ -[package] -description = "Generic transaction pool." -name = "transaction-pool" -version = "2.0.1" -license = "GPL-3.0" -authors = ["Parity Technologies "] -repository = "https://github.com/paritytech/parity-common" -edition = "2018" - -[dependencies] -log = "0.4" -smallvec = "0.6" -trace-time = { path = "../trace-time", version = "0.1" } - -[dev-dependencies] -ethereum-types = { version = "0.7", path = "../ethereum-types" } diff --git a/transaction-pool/src/error.rs b/transaction-pool/src/error.rs deleted file mode 100644 index 851f1f6e7..000000000 --- a/transaction-pool/src/error.rs +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use std::{error, fmt, result}; - -/// Transaction Pool Error -#[derive(Debug)] -pub enum Error { - /// Transaction is already imported - AlreadyImported(Hash), - /// Transaction is too cheap to enter the queue - TooCheapToEnter(Hash, String), - /// Transaction is too cheap to replace existing transaction that occupies the same slot. - TooCheapToReplace(Hash, Hash), -} - -/// Transaction Pool Result -pub type Result = result::Result>; - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Error::AlreadyImported(h) => - write!(f, "[{:?}] already imported", h), - Error::TooCheapToEnter(hash, min_score) => - write!(f, "[{:x}] too cheap to enter the pool. Min score: {}", hash, min_score), - Error::TooCheapToReplace(old_hash, hash) => - write!(f, "[{:x}] too cheap to replace: {:x}", hash, old_hash), - } - } -} - -impl error::Error for Error {} - -#[cfg(test)] -impl PartialEq for Error where H: PartialEq { - fn eq(&self, other: &Self) -> bool { - use self::Error::*; - - match (self, other) { - (&AlreadyImported(ref h1), &AlreadyImported(ref h2)) => h1 == h2, - (&TooCheapToEnter(ref h1, ref s1), &TooCheapToEnter(ref h2, ref s2)) => h1 == h2 && s1 == s2, - (&TooCheapToReplace(ref old1, ref new1), &TooCheapToReplace(ref old2, ref new2)) => old1 == old2 && new1 == new2, - _ => false, - } - } -} diff --git a/transaction-pool/src/lib.rs b/transaction-pool/src/lib.rs deleted file mode 100644 index 8a8e3ffb0..000000000 --- a/transaction-pool/src/lib.rs +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Generic Transaction Pool -//! -//! An extensible and performant implementation of Ethereum Transaction Pool. -//! The pool stores ordered, verified transactions according to some pluggable -//! `Scoring` implementation. -//! The pool also allows you to construct a set of `pending` transactions according -//! to some notion of `Readiness` (pluggable). -//! -//! The pool is generic over transactions and should make no assumptions about them. -//! The only thing we can rely on is the `Scoring` that defines: -//! - the ordering of transactions from a single sender -//! - the priority of the transaction compared to other transactions from different senders -//! -//! NOTE: the transactions from a single sender are not ordered by priority, -//! but still when constructing pending set we always need to maintain the ordering -//! (i.e. `txs[1]` always needs to be included after `txs[0]` even if it has higher priority) -//! -//! ### Design Details -//! -//! Performance assumptions: -//! - Possibility to handle tens of thousands of transactions -//! - Fast insertions and replacements `O(per-sender + log(senders))` -//! - Reasonably fast removal of stalled transactions `O(per-sender)` -//! - Reasonably fast construction of pending set `O(txs * (log(senders) + log(per-sender))` -//! -//! The removal performance could be improved by trading some memory. Currently `SmallVec` is used -//! to store senders transactions, instead we could use `VecDeque` and efficiently `pop_front` -//! the best transactions. -//! -//! The pending set construction and insertion complexity could be reduced by introducing -//! a notion of `nonce` - an absolute, numeric ordering of transactions. -//! We don't do that because of possible implications of EIP208 where nonce might not be -//! explicitly available. -//! -//! 1. The pool groups transactions from particular sender together -//! and stores them ordered by `Scoring` within that group -//! i.e. `HashMap>`. -//! 2. Additionaly we maintain the best and the worst transaction from each sender -//! (by `Scoring` not `priority`) ordered by `priority`. -//! It means that we can easily identify the best transaction inside the entire pool -//! and the worst transaction. -//! 3. Whenever new transaction is inserted to the queue: -//! - first check all the limits (overall, memory, per-sender) -//! - retrieve all transactions from a sender -//! - binary search for position to insert the transaction -//! - decide if we are replacing existing transaction (3 outcomes: drop, replace, insert) -//! - update best and worst transaction from that sender if affected -//! 4. Pending List construction: -//! - Take the best transaction (by priority) from all senders to the List -//! - Replace the transaction with next transaction (by ordering) from that sender (if any) -//! - Repeat - -#![warn(missing_docs)] - -#[cfg(test)] -mod tests; - -mod error; -mod listener; -mod options; -mod pool; -mod ready; -mod replace; -mod status; -mod transactions; -mod verifier; - -pub mod scoring; - -pub use self::error::Error; -pub use self::listener::{Listener, NoopListener}; -pub use self::options::Options; -pub use self::pool::{Pool, PendingIterator, UnorderedIterator, Transaction}; -pub use self::ready::{Ready, Readiness}; -pub use self::replace::{ShouldReplace, ReplaceTransaction}; -pub use self::scoring::Scoring; -pub use self::status::{LightStatus, Status}; -pub use self::verifier::Verifier; - -use std::fmt; -use std::hash::Hash; - -/// Already verified transaction that can be safely queued. -pub trait VerifiedTransaction: fmt::Debug { - /// Transaction hash type. - type Hash: fmt::Debug + fmt::LowerHex + Eq + Clone + Hash; - - /// Transaction sender type. - type Sender: fmt::Debug + Eq + Clone + Hash + Send; - - /// Transaction hash - fn hash(&self) -> &Self::Hash; - - /// Memory usage - fn mem_usage(&self) -> usize; - - /// Transaction sender - fn sender(&self) -> &Self::Sender; -} diff --git a/transaction-pool/src/listener.rs b/transaction-pool/src/listener.rs deleted file mode 100644 index 75b59bda6..000000000 --- a/transaction-pool/src/listener.rs +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use std::{fmt::{Debug, LowerHex}, sync::Arc}; -use crate::error::Error; - -/// Transaction pool listener. -/// -/// Listener is being notified about status of every transaction in the pool. -pub trait Listener { - /// The transaction has been successfuly added to the pool. - /// If second argument is `Some` the transaction has took place of some other transaction - /// which was already in pool. - /// NOTE: You won't be notified about drop of `old` transaction separately. - fn added(&mut self, _tx: &Arc, _old: Option<&Arc>) {} - - /// The transaction was rejected from the pool. - /// It means that it was too cheap to replace any transaction already in the pool. - fn rejected(&mut self, _tx: &Arc, _reason: &Error) {} - - /// The transaction was pushed out from the pool because of the limit. - fn dropped(&mut self, _tx: &Arc, _by: Option<&T>) {} - - /// The transaction was marked as invalid by executor. - fn invalid(&mut self, _tx: &Arc) {} - - /// The transaction has been canceled. - fn canceled(&mut self, _tx: &Arc) {} - - /// The transaction has been culled from the pool. - fn culled(&mut self, _tx: &Arc) {} -} - -/// A no-op implementation of `Listener`. -#[derive(Debug)] -pub struct NoopListener; -impl Listener for NoopListener {} - -impl Listener for (A, B) where - A: Listener, - B: Listener, -{ - fn added(&mut self, tx: &Arc, old: Option<&Arc>) { - self.0.added(tx, old); - self.1.added(tx, old); - } - - fn rejected(&mut self, tx: &Arc, reason: &Error) { - self.0.rejected(tx, reason); - self.1.rejected(tx, reason); - } - - fn dropped(&mut self, tx: &Arc, by: Option<&T>) { - self.0.dropped(tx, by); - self.1.dropped(tx, by); - } - - fn invalid(&mut self, tx: &Arc) { - self.0.invalid(tx); - self.1.invalid(tx); - } - - fn canceled(&mut self, tx: &Arc) { - self.0.canceled(tx); - self.1.canceled(tx); - } - - fn culled(&mut self, tx: &Arc) { - self.0.culled(tx); - self.1.culled(tx); - } -} diff --git a/transaction-pool/src/options.rs b/transaction-pool/src/options.rs deleted file mode 100644 index 291001a20..000000000 --- a/transaction-pool/src/options.rs +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -/// Transaction Pool options. -#[derive(Clone, Debug, PartialEq)] -pub struct Options { - /// Maximal number of transactions in the pool. - pub max_count: usize, - /// Maximal number of transactions from single sender. - pub max_per_sender: usize, - /// Maximal memory usage. - pub max_mem_usage: usize, -} - -impl Default for Options { - fn default() -> Self { - Options { - max_count: 1024, - max_per_sender: 16, - max_mem_usage: 8 * 1024 * 1024, - } - } -} diff --git a/transaction-pool/src/pool.rs b/transaction-pool/src/pool.rs deleted file mode 100644 index 4e9bc35d2..000000000 --- a/transaction-pool/src/pool.rs +++ /dev/null @@ -1,639 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use std::sync::Arc; -use std::slice; -use std::collections::{hash_map, HashMap, BTreeSet}; -use log::{trace, warn}; - -use crate::{ - error, - listener::{Listener, NoopListener}, - options::Options, - ready::{Ready, Readiness}, - replace::{ShouldReplace, ReplaceTransaction}, - scoring::{self, Scoring, ScoreWithRef}, - status::{LightStatus, Status}, - transactions::{AddResult, Transactions}, - VerifiedTransaction, -}; - -/// Internal representation of transaction. -/// -/// Includes unique insertion id that can be used for scoring explictly, -/// but internally is used to resolve conflicts in case of equal scoring -/// (newer transactionsa are preferred). -#[derive(Debug)] -pub struct Transaction { - /// Sequential id of the transaction - pub insertion_id: u64, - /// Shared transaction - pub transaction: Arc, -} - -impl Clone for Transaction { - fn clone(&self) -> Self { - Transaction { - insertion_id: self.insertion_id, - transaction: self.transaction.clone(), - } - } -} - -impl ::std::ops::Deref for Transaction { - type Target = Arc; - - fn deref(&self) -> &Self::Target { - &self.transaction - } -} - -/// A transaction pool. -#[derive(Debug)] -pub struct Pool, L = NoopListener> { - listener: L, - scoring: S, - options: Options, - mem_usage: usize, - - transactions: HashMap>, - by_hash: HashMap>, - - best_transactions: BTreeSet>, - worst_transactions: BTreeSet>, - - insertion_id: u64, -} - -impl + Default> Default for Pool { - fn default() -> Self { - Self::with_scoring(S::default(), Options::default()) - } -} - -impl + Default> Pool { - /// Creates a new `Pool` with given options - /// and default `Scoring` and `Listener`. - pub fn with_options(options: Options) -> Self { - Self::with_scoring(S::default(), options) - } -} - -impl> Pool { - /// Creates a new `Pool` with given `Scoring` and options. - pub fn with_scoring(scoring: S, options: Options) -> Self { - Self::new(NoopListener, scoring, options) - } -} - -const INITIAL_NUMBER_OF_SENDERS: usize = 16; - -impl Pool where - T: VerifiedTransaction, - S: Scoring, - L: Listener, -{ - /// Creates new `Pool` with given `Scoring`, `Listener` and options. - pub fn new(listener: L, scoring: S, options: Options) -> Self { - let transactions = HashMap::with_capacity(INITIAL_NUMBER_OF_SENDERS); - let by_hash = HashMap::with_capacity(options.max_count / 16); - - Pool { - listener, - scoring, - options, - mem_usage: 0, - transactions, - by_hash, - best_transactions: Default::default(), - worst_transactions: Default::default(), - insertion_id: 0, - } - - } - - /// Attempts to import new transaction to the pool, returns a `Arc` or an `Error`. - /// - /// NOTE: Since `Ready`ness is separate from the pool it's possible to import stalled transactions. - /// It's the caller responsibility to make sure that's not the case. - /// - /// NOTE: The transaction may push out some other transactions from the pool - /// either because of limits (see `Options`) or because `Scoring` decides that the transaction - /// replaces an existing transaction from that sender. - /// - /// If any limit is reached the transaction with the lowest `Score` will be compared with the - /// new transaction via the supplied `ShouldReplace` implementation and may be evicted. - /// - /// The `Listener` will be informed on any drops or rejections. - pub fn import(&mut self, transaction: T, replace: &ShouldReplace) -> error::Result, T::Hash> { - let mem_usage = transaction.mem_usage(); - - if self.by_hash.contains_key(transaction.hash()) { - return Err(error::Error::AlreadyImported(transaction.hash().clone())) - } - - self.insertion_id += 1; - let transaction = Transaction { - insertion_id: self.insertion_id, - transaction: Arc::new(transaction), - }; - - // TODO [ToDr] Most likely move this after the transaction is inserted. - // Avoid using should_replace, but rather use scoring for that. - { - let remove_worst = |s: &mut Self, transaction| { - match s.remove_worst(transaction, replace) { - Err(err) => { - s.listener.rejected(transaction, &err); - Err(err) - }, - Ok(None) => Ok(false), - Ok(Some(removed)) => { - s.listener.dropped(&removed, Some(transaction)); - s.finalize_remove(removed.hash()); - Ok(true) - }, - } - }; - - while self.by_hash.len() + 1 > self.options.max_count { - trace!("Count limit reached: {} > {}", self.by_hash.len() + 1, self.options.max_count); - if !remove_worst(self, &transaction)? { - break; - } - } - - while self.mem_usage + mem_usage > self.options.max_mem_usage { - trace!("Mem limit reached: {} > {}", self.mem_usage + mem_usage, self.options.max_mem_usage); - if !remove_worst(self, &transaction)? { - break; - } - } - } - - let (result, prev_state, current_state) = { - let transactions = self.transactions.entry(transaction.sender().clone()).or_insert_with(Transactions::default); - // get worst and best transactions for comparison - let prev = transactions.worst_and_best(); - let result = transactions.add(transaction, &self.scoring, self.options.max_per_sender); - let current = transactions.worst_and_best(); - (result, prev, current) - }; - - // update best and worst transactions from this sender (if required) - self.update_senders_worst_and_best(prev_state, current_state); - - match result { - AddResult::Ok(tx) => { - self.listener.added(&tx, None); - self.finalize_insert(&tx, None); - Ok(tx.transaction) - }, - AddResult::PushedOut { new, old } | - AddResult::Replaced { new, old } => { - self.listener.added(&new, Some(&old)); - self.finalize_insert(&new, Some(&old)); - Ok(new.transaction) - }, - AddResult::TooCheap { new, old } => { - let error = error::Error::TooCheapToReplace(old.hash().clone(), new.hash().clone()); - self.listener.rejected(&new, &error); - return Err(error) - }, - AddResult::TooCheapToEnter(new, score) => { - let error = error::Error::TooCheapToEnter(new.hash().clone(), format!("{:#x}", score)); - self.listener.rejected(&new, &error); - return Err(error) - } - } - } - - /// Updates state of the pool statistics if the transaction was added to a set. - fn finalize_insert(&mut self, new: &Transaction, old: Option<&Transaction>) { - self.mem_usage += new.mem_usage(); - self.by_hash.insert(new.hash().clone(), new.clone()); - - if let Some(old) = old { - self.finalize_remove(old.hash()); - } - } - - /// Updates the pool statistics if transaction was removed. - fn finalize_remove(&mut self, hash: &T::Hash) -> Option> { - self.by_hash.remove(hash).map(|old| { - self.mem_usage -= old.transaction.mem_usage(); - old.transaction - }) - } - - /// Updates best and worst transactions from a sender. - fn update_senders_worst_and_best( - &mut self, - previous: Option<((S::Score, Transaction), (S::Score, Transaction))>, - current: Option<((S::Score, Transaction), (S::Score, Transaction))>, - ) { - let worst_collection = &mut self.worst_transactions; - let best_collection = &mut self.best_transactions; - - let is_same = |a: &(S::Score, Transaction), b: &(S::Score, Transaction)| { - a.0 == b.0 && a.1.hash() == b.1.hash() - }; - - let update = |collection: &mut BTreeSet<_>, (score, tx), remove| if remove { - collection.remove(&ScoreWithRef::new(score, tx)); - } else { - collection.insert(ScoreWithRef::new(score, tx)); - }; - - match (previous, current) { - (None, Some((worst, best))) => { - update(worst_collection, worst, false); - update(best_collection, best, false); - }, - (Some((worst, best)), None) => { - // all transactions from that sender has been removed. - // We can clear a hashmap entry. - self.transactions.remove(worst.1.sender()); - update(worst_collection, worst, true); - update(best_collection, best, true); - }, - (Some((w1, b1)), Some((w2, b2))) => { - if !is_same(&w1, &w2) { - update(worst_collection, w1, true); - update(worst_collection, w2, false); - } - if !is_same(&b1, &b2) { - update(best_collection, b1, true); - update(best_collection, b2, false); - } - }, - (None, None) => {}, - } - } - - /// Attempts to remove the worst transaction from the pool if it's worse than the given one. - /// - /// Returns `None` in case we couldn't decide if the transaction should replace the worst transaction or not. - /// In such case we will accept the transaction even though it is going to exceed the limit. - fn remove_worst(&mut self, transaction: &Transaction, replace: &ShouldReplace) -> error::Result>, T::Hash> { - let to_remove = match self.worst_transactions.iter().next_back() { - // No elements to remove? and the pool is still full? - None => { - warn!("The pool is full but there are no transactions to remove."); - return Err(error::Error::TooCheapToEnter(transaction.hash().clone(), "unknown".into())) - }, - Some(old) => { - let txs = &self.transactions; - let get_replace_tx = |tx| { - let sender_txs = txs.get(transaction.sender()).map(|txs| txs.iter().as_slice()); - ReplaceTransaction::new(tx, sender_txs) - }; - let old_replace = get_replace_tx(&old.transaction); - let new_replace = get_replace_tx(transaction); - - match replace.should_replace(&old_replace, &new_replace) { - // We can't decide which of them should be removed, so accept both. - scoring::Choice::InsertNew => None, - // New transaction is better than the worst one so we can replace it. - scoring::Choice::ReplaceOld => Some(old.clone()), - // otherwise fail - scoring::Choice::RejectNew => { - return Err(error::Error::TooCheapToEnter(transaction.hash().clone(), format!("{:#x}", old.score))) - }, - } - }, - }; - - if let Some(to_remove) = to_remove { - // Remove from transaction set - self.remove_from_set(to_remove.transaction.sender(), |set, scoring| { - set.remove(&to_remove.transaction, scoring) - }); - - Ok(Some(to_remove.transaction)) - } else { - Ok(None) - } - } - - /// Removes transaction from sender's transaction `HashMap`. - fn remove_from_set, &S) -> R>(&mut self, sender: &T::Sender, f: F) -> Option { - let (prev, next, result) = if let Some(set) = self.transactions.get_mut(sender) { - let prev = set.worst_and_best(); - let result = f(set, &self.scoring); - (prev, set.worst_and_best(), result) - } else { - return None; - }; - - self.update_senders_worst_and_best(prev, next); - Some(result) - } - - /// Clears pool from all transactions. - /// This causes a listener notification that all transactions were dropped. - /// NOTE: the drop-notification order will be arbitrary. - pub fn clear(&mut self) { - self.mem_usage = 0; - self.transactions.clear(); - self.best_transactions.clear(); - self.worst_transactions.clear(); - - for (_hash, tx) in self.by_hash.drain() { - self.listener.dropped(&tx.transaction, None) - } - } - - /// Removes single transaction from the pool. - /// Depending on the `is_invalid` flag the listener - /// will either get a `cancelled` or `invalid` notification. - pub fn remove(&mut self, hash: &T::Hash, is_invalid: bool) -> Option> { - if let Some(tx) = self.finalize_remove(hash) { - self.remove_from_set(tx.sender(), |set, scoring| { - set.remove(&tx, scoring) - }); - if is_invalid { - self.listener.invalid(&tx); - } else { - self.listener.canceled(&tx); - } - Some(tx) - } else { - None - } - } - - /// Removes all stalled transactions from given sender. - fn remove_stalled>(&mut self, sender: &T::Sender, ready: &mut R) -> usize { - let removed_from_set = self.remove_from_set(sender, |transactions, scoring| { - transactions.cull(ready, scoring) - }); - - match removed_from_set { - Some(removed) => { - let len = removed.len(); - for tx in removed { - self.finalize_remove(tx.hash()); - self.listener.culled(&tx); - } - len - }, - None => 0, - } - } - - /// Removes all stalled transactions from given sender list (or from all senders). - pub fn cull>(&mut self, senders: Option<&[T::Sender]>, mut ready: R) -> usize { - let mut removed = 0; - match senders { - Some(senders) => { - for sender in senders { - removed += self.remove_stalled(sender, &mut ready); - } - }, - None => { - let senders = self.transactions.keys().cloned().collect::>(); - for sender in senders { - removed += self.remove_stalled(&sender, &mut ready); - } - }, - } - - removed - } - - /// Returns a transaction if it's part of the pool or `None` otherwise. - pub fn find(&self, hash: &T::Hash) -> Option> { - self.by_hash.get(hash).map(|t| t.transaction.clone()) - } - - /// Returns worst transaction in the queue (if any). - pub fn worst_transaction(&self) -> Option> { - self.worst_transactions.iter().next_back().map(|x| x.transaction.transaction.clone()) - } - - /// Returns true if the pool is at it's capacity. - pub fn is_full(&self) -> bool { - self.by_hash.len() >= self.options.max_count - || self.mem_usage >= self.options.max_mem_usage - } - - /// Returns senders ordered by priority of their transactions. - pub fn senders(&self) -> impl Iterator { - self.best_transactions.iter().map(|tx| tx.transaction.sender()) - } - - /// Returns an iterator of pending (ready) transactions. - pub fn pending>(&self, ready: R) -> PendingIterator { - PendingIterator { - ready, - best_transactions: self.best_transactions.clone(), - pool: self, - } - } - - /// Returns pending (ready) transactions from given sender. - pub fn pending_from_sender>(&self, ready: R, sender: &T::Sender) -> PendingIterator { - let best_transactions = self.transactions.get(sender) - .and_then(|transactions| transactions.worst_and_best()) - .map(|(_, best)| ScoreWithRef::new(best.0, best.1)) - .map(|s| { - let mut set = BTreeSet::new(); - set.insert(s); - set - }) - .unwrap_or_default(); - - PendingIterator { - ready, - best_transactions, - pool: self, - } - } - - /// Returns unprioritized list of ready transactions. - pub fn unordered_pending>(&self, ready: R) -> UnorderedIterator { - UnorderedIterator { - ready, - senders: self.transactions.iter(), - transactions: None, - } - } - - /// Update score of transactions of a particular sender. - pub fn update_scores(&mut self, sender: &T::Sender, event: S::Event) { - let res = if let Some(set) = self.transactions.get_mut(sender) { - let prev = set.worst_and_best(); - set.update_scores(&self.scoring, event); - let current = set.worst_and_best(); - Some((prev, current)) - } else { - None - }; - - if let Some((prev, current)) = res { - self.update_senders_worst_and_best(prev, current); - } - } - - /// Computes the full status of the pool (including readiness). - pub fn status>(&self, mut ready: R) -> Status { - let mut status = Status::default(); - - for (_sender, transactions) in &self.transactions { - let len = transactions.len(); - for (idx, tx) in transactions.iter().enumerate() { - match ready.is_ready(tx) { - Readiness::Stale => status.stalled += 1, - Readiness::Ready => status.pending += 1, - Readiness::Future => { - status.future += len - idx; - break; - } - } - } - } - - status - } - - /// Returns light status of the pool. - pub fn light_status(&self) -> LightStatus { - LightStatus { - mem_usage: self.mem_usage, - transaction_count: self.by_hash.len(), - senders: self.transactions.len(), - } - } - - /// Returns current pool options. - pub fn options(&self) -> Options { - self.options.clone() - } - - /// Borrows listener instance. - pub fn listener(&self) -> &L { - &self.listener - } - - /// Borrows scoring instance. - pub fn scoring(&self) -> &S { - &self.scoring - } - - /// Borrows listener mutably. - pub fn listener_mut(&mut self) -> &mut L { - &mut self.listener - } -} - -/// An iterator over all pending (ready) transactions in unoredered fashion. -/// -/// NOTE: Current implementation will iterate over all transactions from particular sender -/// ordered by nonce, but that might change in the future. -/// -/// NOTE: the transactions are not removed from the queue. -/// You might remove them later by calling `cull`. -pub struct UnorderedIterator<'a, T, R, S> where - T: VerifiedTransaction + 'a, - S: Scoring + 'a, -{ - ready: R, - senders: hash_map::Iter<'a, T::Sender, Transactions>, - transactions: Option>>, -} - -impl<'a, T, R, S> Iterator for UnorderedIterator<'a, T, R, S> where - T: VerifiedTransaction, - R: Ready, - S: Scoring, -{ - type Item = Arc; - - fn next(&mut self) -> Option { - loop { - if let Some(transactions) = self.transactions.as_mut() { - if let Some(tx) = transactions.next() { - match self.ready.is_ready(&tx) { - Readiness::Ready => { - return Some(tx.transaction.clone()); - }, - state => trace!("[{:?}] Ignoring {:?} transaction.", tx.hash(), state), - } - } - } - - // otherwise fallback and try next sender - let next_sender = self.senders.next()?; - self.transactions = Some(next_sender.1.iter()); - } - } -} - - -/// An iterator over all pending (ready) transactions. -/// NOTE: the transactions are not removed from the queue. -/// You might remove them later by calling `cull`. -pub struct PendingIterator<'a, T, R, S, L> where - T: VerifiedTransaction + 'a, - S: Scoring + 'a, - L: 'a, -{ - ready: R, - best_transactions: BTreeSet>, - pool: &'a Pool, -} - -impl<'a, T, R, S, L> Iterator for PendingIterator<'a, T, R, S, L> where - T: VerifiedTransaction, - R: Ready, - S: Scoring, -{ - type Item = Arc; - - fn next(&mut self) -> Option { - while !self.best_transactions.is_empty() { - let best = { - let best = self.best_transactions.iter().next().expect("current_best is not empty; qed").clone(); - self.best_transactions.take(&best).expect("Just taken from iterator; qed") - }; - - let tx_state = self.ready.is_ready(&best.transaction); - // Add the next best sender's transaction when applicable - match tx_state { - Readiness::Ready | Readiness::Stale => { - // retrieve next one from the same sender. - let next = self.pool.transactions - .get(best.transaction.sender()) - .and_then(|s| s.find_next(&best.transaction, &self.pool.scoring)); - if let Some((score, tx)) = next { - self.best_transactions.insert(ScoreWithRef::new(score, tx)); - } - }, - _ => (), - } - - if tx_state == Readiness::Ready { - return Some(best.transaction.transaction) - } - - trace!("[{:?}] Ignoring {:?} transaction.", best.transaction.hash(), tx_state); - } - - None - } -} - diff --git a/transaction-pool/src/ready.rs b/transaction-pool/src/ready.rs deleted file mode 100644 index 0bee5188d..000000000 --- a/transaction-pool/src/ready.rs +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -/// Transaction readiness. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum Readiness { - /// The transaction is stale (and should/will be removed from the pool). - Stale, - /// The transaction is ready to be included in pending set. - Ready, - /// The transaction is not yet ready. - Future, -} - -/// A readiness indicator. -pub trait Ready { - /// Returns true if transaction is ready to be included in pending block, - /// given all previous transactions that were ready are already included. - /// - /// NOTE: readiness of transactions will be checked according to `Score` ordering, - /// the implementation should maintain a state of already checked transactions. - fn is_ready(&mut self, tx: &T) -> Readiness; -} - -impl Ready for F where F: FnMut(&T) -> Readiness { - fn is_ready(&mut self, tx: &T) -> Readiness { - (*self)(tx) - } -} - -impl Ready for (A, B) where - A: Ready, - B: Ready, -{ - fn is_ready(&mut self, tx: &T) -> Readiness { - match self.0.is_ready(tx) { - Readiness::Ready => self.1.is_ready(tx), - r => r, - } - } -} diff --git a/transaction-pool/src/replace.rs b/transaction-pool/src/replace.rs deleted file mode 100644 index 7a8896995..000000000 --- a/transaction-pool/src/replace.rs +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! When queue limits are reached, decide whether to replace an existing transaction from the pool - -use crate::{ - pool::Transaction, - scoring::Choice, -}; - -/// Encapsulates a transaction to be compared, along with pooled transactions from the same sender -pub struct ReplaceTransaction<'a, T> { - /// The transaction to be compared for replacement - pub transaction: &'a Transaction, - /// Other transactions currently in the pool for the same sender - pub pooled_by_sender: Option<&'a [Transaction]>, -} - -impl<'a, T> ReplaceTransaction<'a, T> { - /// Creates a new `ReplaceTransaction` - pub fn new(transaction: &'a Transaction, pooled_by_sender: Option<&'a [Transaction]>) -> Self { - ReplaceTransaction { - transaction, - pooled_by_sender, - } - } -} - -impl<'a, T> ::std::ops::Deref for ReplaceTransaction<'a, T> { - type Target = Transaction; - fn deref(&self) -> &Self::Target { - &self.transaction - } -} - -/// Chooses whether a new transaction should replace an existing transaction if the pool is full. -pub trait ShouldReplace { - /// Decides if `new` should push out `old` transaction from the pool. - /// - /// NOTE returning `InsertNew` here can lead to some transactions being accepted above pool limits. - fn should_replace(&self, old: &ReplaceTransaction, new: &ReplaceTransaction) -> Choice; -} diff --git a/transaction-pool/src/scoring.rs b/transaction-pool/src/scoring.rs deleted file mode 100644 index c3edbb1ac..000000000 --- a/transaction-pool/src/scoring.rs +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! A transactions ordering abstraction. - -use std::{cmp, fmt}; -use crate::pool::Transaction; - -/// Represents a decision what to do with -/// a new transaction that tries to enter the pool. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum Choice { - /// New transaction should be rejected - /// (i.e. the old transaction that occupies the same spot - /// is better). - RejectNew, - /// The old transaction should be dropped - /// in favour of the new one. - ReplaceOld, - /// The new transaction should be inserted - /// and both (old and new) should stay in the pool. - InsertNew, -} - -/// Describes a reason why the `Score` of transactions -/// should be updated. -/// The `Scoring` implementations can use this information -/// to update the `Score` table more efficiently. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum Change { - /// New transaction has been inserted at given index. - /// The Score at that index is initialized with default value - /// and needs to be filled in. - InsertedAt(usize), - /// The transaction has been removed at given index and other transactions - /// shifted to it's place. - /// The scores were removed and shifted as well. - /// For simple scoring algorithms no action is required here. - RemovedAt(usize), - /// The transaction at given index has replaced a previous transaction. - /// The score at that index needs to be update (it contains value from previous transaction). - ReplacedAt(usize), - /// Given number of stalled transactions has been culled from the beginning. - /// The scores has been removed from the beginning as well. - /// For simple scoring algorithms no action is required here. - Culled(usize), - /// Custom event to update the score triggered outside of the pool. - /// Handling this event is up to scoring implementation. - Event(T), -} - -/// A transaction ordering. -/// -/// The implementation should decide on order of transactions in the pool. -/// Each transaction should also get assigned a `Score` which is used to later -/// prioritize transactions in the pending set. -/// -/// Implementation notes: -/// - Returned `Score`s should match ordering of `compare` method. -/// - `compare` will be called only within a context of transactions from the same sender. -/// - `choose` may be called even if `compare` returns `Ordering::Equal` -/// - `Score`s and `compare` should align with `Ready` implementation. -/// -/// Example: Natural ordering of Ethereum transactions. -/// - `compare`: compares transaction `nonce` () -/// - `choose`: compares transactions `gasPrice` (decides if old transaction should be replaced) -/// - `update_scores`: score defined as `gasPrice` if `n==0` and `max(scores[n-1], gasPrice)` if `n>0` -/// -pub trait Scoring: fmt::Debug { - /// A score of a transaction. - type Score: cmp::Ord + Clone + Default + fmt::Debug + Send + fmt::LowerHex; - /// Custom scoring update event type. - type Event: fmt::Debug; - - /// Decides on ordering of `T`s from a particular sender. - fn compare(&self, old: &T, other: &T) -> cmp::Ordering; - - /// Decides how to deal with two transactions from a sender that seem to occupy the same slot in the queue. - fn choose(&self, old: &T, new: &T) -> Choice; - - /// Updates the transaction scores given a list of transactions and a change to previous scoring. - /// NOTE: you can safely assume that both slices have the same length. - /// (i.e. score at index `i` represents transaction at the same index) - fn update_scores(&self, txs: &[Transaction], scores: &mut [Self::Score], change: Change); - - /// Decides if the transaction should ignore per-sender limit in the pool. - /// - /// If you return `true` for given transaction it's going to be accepted even though - /// the per-sender limit is exceeded. - fn should_ignore_sender_limit(&self, _new: &T) -> bool { false } -} - -/// A score with a reference to the transaction. -#[derive(Debug)] -pub struct ScoreWithRef { - /// Score - pub score: S, - /// Shared transaction - pub transaction: Transaction, -} - -impl ScoreWithRef { - /// Creates a new `ScoreWithRef` - pub fn new(score: S, transaction: Transaction) -> Self { - ScoreWithRef { score, transaction } - } -} - -impl Clone for ScoreWithRef { - fn clone(&self) -> Self { - ScoreWithRef { - score: self.score.clone(), - transaction: self.transaction.clone(), - } - } -} - -impl Ord for ScoreWithRef { - fn cmp(&self, other: &Self) -> cmp::Ordering { - other.score.cmp(&self.score) - .then(self.transaction.insertion_id.cmp(&other.transaction.insertion_id)) - } -} - -impl PartialOrd for ScoreWithRef { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl PartialEq for ScoreWithRef { - fn eq(&self, other: &Self) -> bool { - self.score == other.score && self.transaction.insertion_id == other.transaction.insertion_id - } -} - -impl Eq for ScoreWithRef {} - - -#[cfg(test)] -mod tests { - use super::*; - - fn score(score: u64, insertion_id: u64) -> ScoreWithRef<(), u64> { - ScoreWithRef { - score, - transaction: Transaction { - insertion_id, - transaction: Default::default(), - }, - } - } - - #[test] - fn scoring_comparison() { - // the higher the score the better - assert_eq!(score(10, 0).cmp(&score(0, 0)), cmp::Ordering::Less); - assert_eq!(score(0, 0).cmp(&score(10, 0)), cmp::Ordering::Greater); - - // equal is equal - assert_eq!(score(0, 0).cmp(&score(0, 0)), cmp::Ordering::Equal); - - // lower insertion id is better - assert_eq!(score(0, 0).cmp(&score(0, 10)), cmp::Ordering::Less); - assert_eq!(score(0, 10).cmp(&score(0, 0)), cmp::Ordering::Greater); - } -} diff --git a/transaction-pool/src/status.rs b/transaction-pool/src/status.rs deleted file mode 100644 index b9e7656d4..000000000 --- a/transaction-pool/src/status.rs +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -/// Light pool status. -/// This status is cheap to compute and can be called frequently. -#[derive(Default, Debug, Clone, PartialEq, Eq)] -pub struct LightStatus { - /// Memory usage in bytes. - pub mem_usage: usize, - /// Total number of transactions in the pool. - pub transaction_count: usize, - /// Number of unique senders in the pool. - pub senders: usize, -} - -/// A full queue status. -/// To compute this status it is required to provide `Ready`. -/// NOTE: To compute the status we need to visit each transaction in the pool. -#[derive(Default, Debug, Clone, PartialEq, Eq)] -pub struct Status { - /// Number of stalled transactions. - pub stalled: usize, - /// Number of pending (ready) transactions. - pub pending: usize, - /// Number of future (not ready) transactions. - pub future: usize, -} diff --git a/transaction-pool/src/tests/helpers.rs b/transaction-pool/src/tests/helpers.rs deleted file mode 100644 index f14314c2e..000000000 --- a/transaction-pool/src/tests/helpers.rs +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use std::cmp; -use std::collections::HashMap; - -use ethereum_types::{H160 as Sender, U256}; -use crate::{pool, scoring, Scoring, ShouldReplace, ReplaceTransaction, Ready, Readiness}; -use super::Transaction; - -#[derive(Debug, Default)] -pub struct DummyScoring { - always_insert: bool, -} - -impl DummyScoring { - pub fn always_insert() -> Self { - DummyScoring { - always_insert: true, - } - } -} - -impl Scoring for DummyScoring { - type Score = U256; - type Event = (); - - fn compare(&self, old: &Transaction, new: &Transaction) -> cmp::Ordering { - old.nonce.cmp(&new.nonce) - } - - fn choose(&self, old: &Transaction, new: &Transaction) -> scoring::Choice { - if old.nonce == new.nonce { - if new.gas_price > old.gas_price { - scoring::Choice::ReplaceOld - } else { - scoring::Choice::RejectNew - } - } else { - scoring::Choice::InsertNew - } - } - - fn update_scores(&self, txs: &[pool::Transaction], scores: &mut [Self::Score], change: scoring::Change) { - if let scoring::Change::Event(_) = change { - // In case of event reset all scores to 0 - for i in 0..txs.len() { - scores[i] = 0.into(); - } - } else { - // Set to a gas price otherwise - for i in 0..txs.len() { - scores[i] = txs[i].gas_price; - } - } - } - - fn should_ignore_sender_limit(&self, _new: &Transaction) -> bool { - self.always_insert - } -} - -impl ShouldReplace for DummyScoring { - fn should_replace(&self, old: &ReplaceTransaction, new: &ReplaceTransaction) -> scoring::Choice { - if self.always_insert { - scoring::Choice::InsertNew - } else if new.gas_price > old.gas_price { - scoring::Choice::ReplaceOld - } else { - scoring::Choice::RejectNew - } - } -} - -#[derive(Default)] -pub struct NonceReady(HashMap, U256); - -impl NonceReady { - pub fn new>(min: T) -> Self { - let mut n = NonceReady::default(); - n.1 = min.into(); - n - } -} - -impl Ready for NonceReady { - fn is_ready(&mut self, tx: &Transaction) -> Readiness { - let min = self.1; - let nonce = self.0.entry(tx.sender).or_insert_with(|| min); - match tx.nonce.cmp(nonce) { - cmp::Ordering::Greater => Readiness::Future, - cmp::Ordering::Equal => { - *nonce += 1.into(); - Readiness::Ready - }, - cmp::Ordering::Less => Readiness::Stale, - } - } -} diff --git a/transaction-pool/src/tests/mod.rs b/transaction-pool/src/tests/mod.rs deleted file mode 100644 index 3d1ca4af4..000000000 --- a/transaction-pool/src/tests/mod.rs +++ /dev/null @@ -1,783 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -mod helpers; -mod tx_builder; - -use self::helpers::{DummyScoring, NonceReady}; -use self::tx_builder::TransactionBuilder; - -use std::sync::Arc; - -use ethereum_types::{H256, U256, Address}; -use super::*; - -#[derive(Debug, PartialEq)] -pub struct Transaction { - pub hash: H256, - pub nonce: U256, - pub gas_price: U256, - pub gas: U256, - pub sender: Address, - pub mem_usage: usize, -} - -impl VerifiedTransaction for Transaction { - type Hash = H256; - type Sender = Address; - - fn hash(&self) -> &H256 { &self.hash } - fn mem_usage(&self) -> usize { self.mem_usage } - fn sender(&self) -> &Address { &self.sender } -} - -pub type SharedTransaction = Arc; - -type TestPool = Pool; - -impl TestPool { - pub fn with_limit(max_count: usize) -> Self { - Self::with_options(Options { - max_count, - ..Default::default() - }) - } -} - -fn import, L: Listener>(txq: &mut Pool, tx: Transaction) - -> Result, Error<::Hash>> { - txq.import(tx, &mut DummyScoring::default()) -} - -#[test] -fn should_clear_queue() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::default(); - assert_eq!(txq.light_status(), LightStatus { - mem_usage: 0, - transaction_count: 0, - senders: 0, - }); - let tx1 = b.tx().nonce(0).new(); - let tx2 = b.tx().nonce(1).mem_usage(1).new(); - - // add - import(&mut txq, tx1).unwrap(); - import(&mut txq, tx2).unwrap(); - assert_eq!(txq.light_status(), LightStatus { - mem_usage: 1, - transaction_count: 2, - senders: 1, - }); - - // when - txq.clear(); - - // then - assert_eq!(txq.light_status(), LightStatus { - mem_usage: 0, - transaction_count: 0, - senders: 0, - }); -} - -#[test] -fn should_not_allow_same_transaction_twice() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::default(); - let tx1 = b.tx().nonce(0).new(); - let tx2 = b.tx().nonce(0).new(); - - // when - import(&mut txq, tx1).unwrap(); - import(&mut txq, tx2).unwrap_err(); - - // then - assert_eq!(txq.light_status().transaction_count, 1); -} - -#[test] -fn should_replace_transaction() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::default(); - let tx1 = b.tx().nonce(0).gas_price(1).new(); - let tx2 = b.tx().nonce(0).gas_price(2).new(); - - // when - import(&mut txq, tx1).unwrap(); - import(&mut txq, tx2).unwrap(); - - // then - assert_eq!(txq.light_status().transaction_count, 1); -} - -#[test] -fn should_reject_if_above_count() { - let b = TransactionBuilder::default(); - let mut txq = TestPool::with_options(Options { - max_count: 1, - ..Default::default() - }); - - // Reject second - let tx1 = b.tx().nonce(0).new(); - let tx2 = b.tx().nonce(1).new(); - let hash = tx2.hash.clone(); - import(&mut txq, tx1).unwrap(); - assert_eq!(import(&mut txq, tx2).unwrap_err(), error::Error::TooCheapToEnter(hash, "0x0".into())); - assert_eq!(txq.light_status().transaction_count, 1); - - txq.clear(); - - // Replace first - let tx1 = b.tx().nonce(0).new(); - let tx2 = b.tx().nonce(0).sender(1).gas_price(2).new(); - import(&mut txq, tx1).unwrap(); - import(&mut txq, tx2).unwrap(); - assert_eq!(txq.light_status().transaction_count, 1); -} - -#[test] -fn should_reject_if_above_mem_usage() { - let b = TransactionBuilder::default(); - let mut txq = TestPool::with_options(Options { - max_mem_usage: 1, - ..Default::default() - }); - - // Reject second - let tx1 = b.tx().nonce(1).mem_usage(1).new(); - let tx2 = b.tx().nonce(2).mem_usage(2).new(); - let hash = tx2.hash.clone(); - import(&mut txq, tx1).unwrap(); - assert_eq!(import(&mut txq, tx2).unwrap_err(), error::Error::TooCheapToEnter(hash, "0x0".into())); - assert_eq!(txq.light_status().transaction_count, 1); - - txq.clear(); - - // Replace first - let tx1 = b.tx().nonce(1).mem_usage(1).new(); - let tx2 = b.tx().nonce(1).sender(1).gas_price(2).mem_usage(1).new(); - import(&mut txq, tx1).unwrap(); - import(&mut txq, tx2).unwrap(); - assert_eq!(txq.light_status().transaction_count, 1); -} - -#[test] -fn should_reject_if_above_sender_count() { - let b = TransactionBuilder::default(); - let mut txq = TestPool::with_options(Options { - max_per_sender: 1, - ..Default::default() - }); - - // Reject second - let tx1 = b.tx().nonce(1).new(); - let tx2 = b.tx().nonce(2).new(); - let hash = tx2.hash.clone(); - import(&mut txq, tx1).unwrap(); - assert_eq!(import(&mut txq, tx2).unwrap_err(), error::Error::TooCheapToEnter(hash, "0x0".into())); - assert_eq!(txq.light_status().transaction_count, 1); - - txq.clear(); - - // Replace first - let tx1 = b.tx().nonce(1).new(); - let tx2 = b.tx().nonce(2).gas_price(2).new(); - let hash = tx2.hash.clone(); - import(&mut txq, tx1).unwrap(); - // This results in error because we also compare nonces - assert_eq!(import(&mut txq, tx2).unwrap_err(), error::Error::TooCheapToEnter(hash, "0x0".into())); - assert_eq!(txq.light_status().transaction_count, 1); -} - -#[test] -fn should_construct_pending() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::default(); - - let tx0 = import(&mut txq, b.tx().nonce(0).gas_price(5).new()).unwrap(); - let tx1 = import(&mut txq, b.tx().nonce(1).gas_price(5).new()).unwrap(); - - let tx9 = import(&mut txq, b.tx().sender(2).nonce(0).new()).unwrap(); - - let tx5 = import(&mut txq, b.tx().sender(1).nonce(0).new()).unwrap(); - let tx6 = import(&mut txq, b.tx().sender(1).nonce(1).new()).unwrap(); - let tx7 = import(&mut txq, b.tx().sender(1).nonce(2).new()).unwrap(); - let tx8 = import(&mut txq, b.tx().sender(1).nonce(3).gas_price(4).new()).unwrap(); - - let tx2 = import(&mut txq, b.tx().nonce(2).new()).unwrap(); - // this transaction doesn't get to the block despite high gas price - // because of block gas limit and simplistic ordering algorithm. - import(&mut txq, b.tx().nonce(3).gas_price(4).new()).unwrap(); - //gap - import(&mut txq, b.tx().nonce(5).new()).unwrap(); - - // gap - import(&mut txq, b.tx().sender(1).nonce(5).new()).unwrap(); - - assert_eq!(txq.light_status().transaction_count, 11); - assert_eq!(txq.status(NonceReady::default()), Status { - stalled: 0, - pending: 9, - future: 2, - }); - assert_eq!(txq.status(NonceReady::new(1)), Status { - stalled: 3, - pending: 6, - future: 2, - }); - - // when - let mut current_gas = U256::zero(); - let limit = (21_000 * 8).into(); - let mut pending = txq.pending(NonceReady::default()).take_while(|tx| { - let should_take = tx.gas + current_gas <= limit; - if should_take { - current_gas = current_gas + tx.gas - } - should_take - }); - - assert_eq!(pending.next(), Some(tx0)); - assert_eq!(pending.next(), Some(tx1)); - assert_eq!(pending.next(), Some(tx9)); - assert_eq!(pending.next(), Some(tx5)); - assert_eq!(pending.next(), Some(tx6)); - assert_eq!(pending.next(), Some(tx7)); - assert_eq!(pending.next(), Some(tx8)); - assert_eq!(pending.next(), Some(tx2)); - assert_eq!(pending.next(), None); -} - -#[test] -fn should_skip_staled_pending_transactions() { - let b = TransactionBuilder::default(); - let mut txq = TestPool::default(); - - let tx0 = import(&mut txq, b.tx().nonce(0).gas_price(5).new()).unwrap(); - let tx2 = import(&mut txq, b.tx().nonce(2).gas_price(5).new()).unwrap(); - let tx1 = import(&mut txq, b.tx().nonce(1).gas_price(5).new()).unwrap(); - - // tx0 and tx1 are Stale, tx2 is Ready - let mut pending = txq.pending(NonceReady::new(2)); - - // tx0 and tx1 should be skipped, tx2 should be the next Ready - assert_eq!(pending.next(), Some(tx2)); - assert_eq!(pending.next(), None); -} - -#[test] -fn should_return_unordered_iterator() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::default(); - - let tx0 = import(&mut txq, b.tx().nonce(0).gas_price(5).new()).unwrap(); - let tx1 = import(&mut txq, b.tx().nonce(1).gas_price(5).new()).unwrap(); - let tx2 = import(&mut txq, b.tx().nonce(2).new()).unwrap(); - let tx3 = import(&mut txq, b.tx().nonce(3).gas_price(4).new()).unwrap(); - //gap - import(&mut txq, b.tx().nonce(5).new()).unwrap(); - - let tx5 = import(&mut txq, b.tx().sender(1).nonce(0).new()).unwrap(); - let tx6 = import(&mut txq, b.tx().sender(1).nonce(1).new()).unwrap(); - let tx7 = import(&mut txq, b.tx().sender(1).nonce(2).new()).unwrap(); - let tx8 = import(&mut txq, b.tx().sender(1).nonce(3).gas_price(4).new()).unwrap(); - // gap - import(&mut txq, b.tx().sender(1).nonce(5).new()).unwrap(); - - let tx9 = import(&mut txq, b.tx().sender(2).nonce(0).new()).unwrap(); - assert_eq!(txq.light_status().transaction_count, 11); - assert_eq!(txq.status(NonceReady::default()), Status { - stalled: 0, - pending: 9, - future: 2, - }); - assert_eq!(txq.status(NonceReady::new(1)), Status { - stalled: 3, - pending: 6, - future: 2, - }); - - // when - let all: Vec<_> = txq.unordered_pending(NonceReady::default()).collect(); - - let chain1 = vec![tx0, tx1, tx2, tx3]; - let chain2 = vec![tx5, tx6, tx7, tx8]; - let chain3 = vec![tx9]; - - assert_eq!(all.len(), chain1.len() + chain2.len() + chain3.len()); - - let mut options = vec![ - vec![chain1.clone(), chain2.clone(), chain3.clone()], - vec![chain2.clone(), chain1.clone(), chain3.clone()], - vec![chain2.clone(), chain3.clone(), chain1.clone()], - vec![chain3.clone(), chain2.clone(), chain1.clone()], - vec![chain3.clone(), chain1.clone(), chain2.clone()], - vec![chain1.clone(), chain3.clone(), chain2.clone()], - ].into_iter().map(|mut v| { - let mut first = v.pop().unwrap(); - for mut x in v { - first.append(&mut x); - } - first - }); - - assert!(options.any(|opt| all == opt)); -} - -#[test] -fn should_update_scoring_correctly() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::default(); - - let tx9 = import(&mut txq, b.tx().sender(2).nonce(0).new()).unwrap(); - - let tx5 = import(&mut txq, b.tx().sender(1).nonce(0).new()).unwrap(); - let tx6 = import(&mut txq, b.tx().sender(1).nonce(1).new()).unwrap(); - let tx7 = import(&mut txq, b.tx().sender(1).nonce(2).new()).unwrap(); - let tx8 = import(&mut txq, b.tx().sender(1).nonce(3).gas_price(4).new()).unwrap(); - - let tx0 = import(&mut txq, b.tx().nonce(0).gas_price(5).new()).unwrap(); - let tx1 = import(&mut txq, b.tx().nonce(1).gas_price(5).new()).unwrap(); - let tx2 = import(&mut txq, b.tx().nonce(2).new()).unwrap(); - // this transaction doesn't get to the block despite high gas price - // because of block gas limit and simplistic ordering algorithm. - import(&mut txq, b.tx().nonce(3).gas_price(4).new()).unwrap(); - //gap - import(&mut txq, b.tx().nonce(5).new()).unwrap(); - - // gap - import(&mut txq, b.tx().sender(1).nonce(5).new()).unwrap(); - - assert_eq!(txq.light_status().transaction_count, 11); - assert_eq!(txq.status(NonceReady::default()), Status { - stalled: 0, - pending: 9, - future: 2, - }); - assert_eq!(txq.status(NonceReady::new(1)), Status { - stalled: 3, - pending: 6, - future: 2, - }); - - txq.update_scores(&Address::zero(), ()); - - // when - let mut current_gas = U256::zero(); - let limit = (21_000 * 8).into(); - let mut pending = txq.pending(NonceReady::default()).take_while(|tx| { - let should_take = tx.gas + current_gas <= limit; - if should_take { - current_gas = current_gas + tx.gas - } - should_take - }); - - assert_eq!(pending.next(), Some(tx9)); - assert_eq!(pending.next(), Some(tx5)); - assert_eq!(pending.next(), Some(tx6)); - assert_eq!(pending.next(), Some(tx7)); - assert_eq!(pending.next(), Some(tx8)); - // penalized transactions - assert_eq!(pending.next(), Some(tx0)); - assert_eq!(pending.next(), Some(tx1)); - assert_eq!(pending.next(), Some(tx2)); - assert_eq!(pending.next(), None); -} - -#[test] -fn should_remove_transaction() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::default(); - - let tx1 = import(&mut txq, b.tx().nonce(0).new()).unwrap(); - let tx2 = import(&mut txq, b.tx().nonce(1).new()).unwrap(); - import(&mut txq, b.tx().nonce(2).new()).unwrap(); - assert_eq!(txq.light_status().transaction_count, 3); - - // when - assert!(txq.remove(&tx2.hash(), false).is_some()); - - // then - assert_eq!(txq.light_status().transaction_count, 2); - let mut pending = txq.pending(NonceReady::default()); - assert_eq!(pending.next(), Some(tx1)); - assert_eq!(pending.next(), None); -} - -#[test] -fn should_cull_stalled_transactions() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::default(); - - import(&mut txq, b.tx().nonce(0).gas_price(5).new()).unwrap(); - import(&mut txq, b.tx().nonce(1).new()).unwrap(); - import(&mut txq, b.tx().nonce(3).new()).unwrap(); - - import(&mut txq, b.tx().sender(1).nonce(0).new()).unwrap(); - import(&mut txq, b.tx().sender(1).nonce(1).new()).unwrap(); - import(&mut txq, b.tx().sender(1).nonce(5).new()).unwrap(); - - assert_eq!(txq.status(NonceReady::new(1)), Status { - stalled: 2, - pending: 2, - future: 2, - }); - - // when - assert_eq!(txq.cull(None, NonceReady::new(1)), 2); - - // then - assert_eq!(txq.status(NonceReady::new(1)), Status { - stalled: 0, - pending: 2, - future: 2, - }); - assert_eq!(txq.light_status(), LightStatus { - transaction_count: 4, - senders: 2, - mem_usage: 0, - }); -} - -#[test] -fn should_cull_stalled_transactions_from_a_sender() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::default(); - - import(&mut txq, b.tx().nonce(0).gas_price(5).new()).unwrap(); - import(&mut txq, b.tx().nonce(1).new()).unwrap(); - - import(&mut txq, b.tx().sender(1).nonce(0).new()).unwrap(); - import(&mut txq, b.tx().sender(1).nonce(1).new()).unwrap(); - import(&mut txq, b.tx().sender(1).nonce(2).new()).unwrap(); - - assert_eq!(txq.status(NonceReady::new(2)), Status { - stalled: 4, - pending: 1, - future: 0, - }); - - // when - let sender = Address::zero(); - assert_eq!(txq.cull(Some(&[sender]), NonceReady::new(2)), 2); - - // then - assert_eq!(txq.status(NonceReady::new(2)), Status { - stalled: 2, - pending: 1, - future: 0, - }); - assert_eq!(txq.light_status(), LightStatus { - transaction_count: 3, - senders: 1, - mem_usage: 0, - }); -} - -#[test] -fn should_re_insert_after_cull() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::default(); - - import(&mut txq, b.tx().nonce(0).gas_price(5).new()).unwrap(); - import(&mut txq, b.tx().nonce(1).new()).unwrap(); - import(&mut txq, b.tx().sender(1).nonce(0).new()).unwrap(); - import(&mut txq, b.tx().sender(1).nonce(1).new()).unwrap(); - assert_eq!(txq.status(NonceReady::new(1)), Status { - stalled: 2, - pending: 2, - future: 0, - }); - - // when - assert_eq!(txq.cull(None, NonceReady::new(1)), 2); - assert_eq!(txq.status(NonceReady::new(1)), Status { - stalled: 0, - pending: 2, - future: 0, - }); - import(&mut txq, b.tx().nonce(0).gas_price(5).new()).unwrap(); - import(&mut txq, b.tx().sender(1).nonce(0).new()).unwrap(); - - assert_eq!(txq.status(NonceReady::new(1)), Status { - stalled: 2, - pending: 2, - future: 0, - }); -} - -#[test] -fn should_return_worst_transaction() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::default(); - assert!(txq.worst_transaction().is_none()); - - // when - import(&mut txq, b.tx().nonce(0).gas_price(5).new()).unwrap(); - import(&mut txq, b.tx().sender(1).nonce(0).gas_price(4).new()).unwrap(); - - // then - assert_eq!(txq.worst_transaction().unwrap().gas_price, 4.into()); -} - -#[test] -fn should_return_is_full() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::with_limit(2); - assert!(!txq.is_full()); - - // when - import(&mut txq, b.tx().nonce(0).gas_price(110).new()).unwrap(); - assert!(!txq.is_full()); - - import(&mut txq, b.tx().sender(1).nonce(0).gas_price(100).new()).unwrap(); - - // then - assert!(txq.is_full()); -} - -#[test] -fn should_import_even_if_limit_is_reached_and_should_replace_returns_insert_new() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::with_scoring(DummyScoring::always_insert(), Options { - max_count: 1, - ..Default::default() - }); - txq.import(b.tx().nonce(0).gas_price(5).new(), &mut DummyScoring::always_insert()).unwrap(); - assert_eq!(txq.light_status(), LightStatus { - transaction_count: 1, - senders: 1, - mem_usage: 0, - }); - - // when - txq.import(b.tx().nonce(1).gas_price(5).new(), &mut DummyScoring::always_insert()).unwrap(); - - // then - assert_eq!(txq.light_status(), LightStatus { - transaction_count: 2, - senders: 1, - mem_usage: 0, - }); -} - -#[test] -fn should_not_import_even_if_limit_is_reached_and_should_replace_returns_false() { - use std::str::FromStr; - - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::with_scoring(DummyScoring::default(), Options { - max_count: 1, - ..Default::default() - }); - import(&mut txq, b.tx().nonce(0).gas_price(5).new()).unwrap(); - assert_eq!(txq.light_status(), LightStatus { - transaction_count: 1, - senders: 1, - mem_usage: 0, - }); - - // when - let err = import(&mut txq, b.tx().nonce(1).gas_price(5).new()).unwrap_err(); - - // then - assert_eq!( - err, - error::Error::TooCheapToEnter( - H256::from_str("00000000000000000000000000000000000000000000000000000000000001f5").unwrap(), - "0x5".into() - ) - ); - assert_eq!(txq.light_status(), LightStatus { - transaction_count: 1, - senders: 1, - mem_usage: 0, - }); -} - -#[test] -fn should_import_even_if_sender_limit_is_reached() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::with_scoring(DummyScoring::always_insert(), Options { - max_count: 1, - max_per_sender: 1, - ..Default::default() - }); - txq.import(b.tx().nonce(0).gas_price(5).new(), &mut DummyScoring::always_insert()).unwrap(); - assert_eq!(txq.light_status(), LightStatus { - transaction_count: 1, - senders: 1, - mem_usage: 0, - }); - - // when - txq.import(b.tx().nonce(1).gas_price(5).new(), &mut DummyScoring::always_insert()).unwrap(); - - // then - assert_eq!(txq.light_status(), LightStatus { - transaction_count: 2, - senders: 1, - mem_usage: 0, - }); -} - -mod listener { - use std::cell::RefCell; - use std::rc::Rc; - use std::fmt; - - use super::*; - - #[derive(Default)] - struct MyListener(pub Rc>>); - - impl Listener for MyListener { - fn added(&mut self, _tx: &SharedTransaction, old: Option<&SharedTransaction>) { - self.0.borrow_mut().push(if old.is_some() { "replaced" } else { "added" }); - } - - fn rejected(&mut self, _tx: &SharedTransaction, _reason: &error::Error) { - self.0.borrow_mut().push("rejected".into()); - } - - fn dropped(&mut self, _tx: &SharedTransaction, _new: Option<&Transaction>) { - self.0.borrow_mut().push("dropped".into()); - } - - fn invalid(&mut self, _tx: &SharedTransaction) { - self.0.borrow_mut().push("invalid".into()); - } - - fn canceled(&mut self, _tx: &SharedTransaction) { - self.0.borrow_mut().push("canceled".into()); - } - - fn culled(&mut self, _tx: &SharedTransaction) { - self.0.borrow_mut().push("culled".into()); - } - } - - #[test] - fn insert_transaction() { - let b = TransactionBuilder::default(); - let listener = MyListener::default(); - let results = listener.0.clone(); - let mut txq = Pool::new(listener, DummyScoring::default(), Options { - max_per_sender: 1, - max_count: 2, - ..Default::default() - }); - assert!(results.borrow().is_empty()); - - // Regular import - import(&mut txq, b.tx().nonce(1).new()).unwrap(); - assert_eq!(*results.borrow(), &["added"]); - // Already present (no notification) - import(&mut txq, b.tx().nonce(1).new()).unwrap_err(); - assert_eq!(*results.borrow(), &["added"]); - // Push out the first one - import(&mut txq, b.tx().nonce(1).gas_price(1).new()).unwrap(); - assert_eq!(*results.borrow(), &["added", "replaced"]); - // Reject - import(&mut txq, b.tx().nonce(1).new()).unwrap_err(); - assert_eq!(*results.borrow(), &["added", "replaced", "rejected"]); - results.borrow_mut().clear(); - // Different sender (accept) - import(&mut txq, b.tx().sender(1).nonce(1).gas_price(2).new()).unwrap(); - assert_eq!(*results.borrow(), &["added"]); - // Third sender push out low gas price - import(&mut txq, b.tx().sender(2).nonce(1).gas_price(4).new()).unwrap(); - assert_eq!(*results.borrow(), &["added", "dropped", "added"]); - // Reject (too cheap) - import(&mut txq, b.tx().sender(2).nonce(1).gas_price(2).new()).unwrap_err(); - assert_eq!(*results.borrow(), &["added", "dropped", "added", "rejected"]); - - assert_eq!(txq.light_status().transaction_count, 2); - } - - #[test] - fn remove_transaction() { - let b = TransactionBuilder::default(); - let listener = MyListener::default(); - let results = listener.0.clone(); - let mut txq = Pool::new(listener, DummyScoring::default(), Options::default()); - - // insert - let tx1 = import(&mut txq, b.tx().nonce(1).new()).unwrap(); - let tx2 = import(&mut txq, b.tx().nonce(2).new()).unwrap(); - - // then - txq.remove(&tx1.hash(), false); - assert_eq!(*results.borrow(), &["added", "added", "canceled"]); - txq.remove(&tx2.hash(), true); - assert_eq!(*results.borrow(), &["added", "added", "canceled", "invalid"]); - assert_eq!(txq.light_status().transaction_count, 0); - } - - #[test] - fn clear_queue() { - let b = TransactionBuilder::default(); - let listener = MyListener::default(); - let results = listener.0.clone(); - let mut txq = Pool::new(listener, DummyScoring::default(), Options::default()); - - // insert - import(&mut txq, b.tx().nonce(1).new()).unwrap(); - import(&mut txq, b.tx().nonce(2).new()).unwrap(); - - // when - txq.clear(); - - // then - assert_eq!(*results.borrow(), &["added", "added", "dropped", "dropped"]); - } - - #[test] - fn cull_stalled() { - let b = TransactionBuilder::default(); - let listener = MyListener::default(); - let results = listener.0.clone(); - let mut txq = Pool::new(listener, DummyScoring::default(), Options::default()); - - // insert - import(&mut txq, b.tx().nonce(1).new()).unwrap(); - import(&mut txq, b.tx().nonce(2).new()).unwrap(); - - // when - txq.cull(None, NonceReady::new(3)); - - // then - assert_eq!(*results.borrow(), &["added", "added", "culled", "culled"]); - } -} diff --git a/transaction-pool/src/tests/tx_builder.rs b/transaction-pool/src/tests/tx_builder.rs deleted file mode 100644 index dae2bb248..000000000 --- a/transaction-pool/src/tests/tx_builder.rs +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use super::{Transaction, U256, H256, Address}; -use ethereum_types::BigEndianHash; - -#[derive(Debug, Default, Clone)] -pub struct TransactionBuilder { - nonce: U256, - gas_price: U256, - gas: U256, - sender: Address, - mem_usage: usize, -} - -impl TransactionBuilder { - pub fn tx(&self) -> Self { - self.clone() - } - - pub fn nonce(mut self, nonce: usize) -> Self { - self.nonce = U256::from(nonce); - self - } - - pub fn gas_price(mut self, gas_price: usize) -> Self { - self.gas_price = U256::from(gas_price); - self - } - - pub fn sender(mut self, sender: u64) -> Self { - self.sender = Address::from_low_u64_be(sender); - self - } - - pub fn mem_usage(mut self, mem_usage: usize) -> Self { - self.mem_usage = mem_usage; - self - } - - pub fn new(self) -> Transaction { - let hash: U256 = self.nonce ^ (U256::from(100) * self.gas_price) ^ (U256::from(100_000) * U256::from(self.sender.to_low_u64_be())); - Transaction { - hash: H256::from_uint(&hash), - nonce: self.nonce, - gas_price: self.gas_price, - gas: 21_000.into(), - sender: self.sender, - mem_usage: self.mem_usage, - } - } -} diff --git a/transaction-pool/src/transactions.rs b/transaction-pool/src/transactions.rs deleted file mode 100644 index eb6151131..000000000 --- a/transaction-pool/src/transactions.rs +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use std::{fmt, mem}; - -use smallvec::SmallVec; -use log::warn; - -use crate::{ - ready::{Ready, Readiness}, - scoring::{self, Scoring}, - pool::Transaction, -}; - -#[derive(Debug)] -pub enum AddResult { - Ok(T), - TooCheapToEnter(T, S), - TooCheap { - old: T, - new: T, - }, - Replaced { - old: T, - new: T, - }, - PushedOut { - old: T, - new: T, - }, -} - -/// Represents all transactions from a particular sender ordered by nonce. -const PER_SENDER: usize = 8; -#[derive(Debug)] -pub struct Transactions> { - // TODO [ToDr] Consider using something that doesn't require shifting all records. - transactions: SmallVec<[Transaction; PER_SENDER]>, - scores: SmallVec<[S::Score; PER_SENDER]>, -} - -impl> Default for Transactions { - fn default() -> Self { - Transactions { - transactions: Default::default(), - scores: Default::default(), - } - } -} - -impl> Transactions { - pub fn is_empty(&self) -> bool { - self.transactions.is_empty() - } - - pub fn len(&self) -> usize { - self.transactions.len() - } - - pub fn iter(&self) -> ::std::slice::Iter> { - self.transactions.iter() - } - - pub fn worst_and_best(&self) -> Option<((S::Score, Transaction), (S::Score, Transaction))> { - let len = self.scores.len(); - self.scores.get(0).cloned().map(|best| { - let worst = self.scores[len - 1].clone(); - let best_tx = self.transactions[0].clone(); - let worst_tx = self.transactions[len - 1].clone(); - - ((worst, worst_tx), (best, best_tx)) - }) - } - - pub fn find_next(&self, tx: &T, scoring: &S) -> Option<(S::Score, Transaction)> { - self.transactions.binary_search_by(|old| scoring.compare(old, &tx)).ok().and_then(|index| { - let index = index + 1; - if index < self.scores.len() { - Some((self.scores[index].clone(), self.transactions[index].clone())) - } else { - None - } - }) - } - - fn push_cheapest_transaction(&mut self, tx: Transaction, scoring: &S, max_count: usize) -> AddResult, S::Score> { - let index = self.transactions.len(); - if index == max_count && !scoring.should_ignore_sender_limit(&tx) { - let min_score = self.scores[index - 1].clone(); - AddResult::TooCheapToEnter(tx, min_score) - } else { - self.transactions.push(tx.clone()); - self.scores.push(Default::default()); - scoring.update_scores(&self.transactions, &mut self.scores, scoring::Change::InsertedAt(index)); - - AddResult::Ok(tx) - } - } - - pub fn update_scores(&mut self, scoring: &S, event: S::Event) { - scoring.update_scores(&self.transactions, &mut self.scores, scoring::Change::Event(event)); - } - - pub fn add(&mut self, new: Transaction, scoring: &S, max_count: usize) -> AddResult, S::Score> { - let index = match self.transactions.binary_search_by(|old| scoring.compare(old, &new)) { - Ok(index) => index, - Err(index) => index, - }; - - // Insert at the end. - if index == self.transactions.len() { - return self.push_cheapest_transaction(new, scoring, max_count) - } - - // Decide if the transaction should replace some other. - match scoring.choose(&self.transactions[index], &new) { - // New transaction should be rejected - scoring::Choice::RejectNew => AddResult::TooCheap { - old: self.transactions[index].clone(), - new, - }, - // New transaction should be kept along with old ones. - scoring::Choice::InsertNew => { - self.transactions.insert(index, new.clone()); - self.scores.insert(index, Default::default()); - scoring.update_scores(&self.transactions, &mut self.scores, scoring::Change::InsertedAt(index)); - - if self.transactions.len() > max_count { - let old = self.transactions.pop().expect("len is non-zero"); - self.scores.pop(); - scoring.update_scores(&self.transactions, &mut self.scores, scoring::Change::RemovedAt(self.transactions.len())); - - AddResult::PushedOut { - old, - new, - } - } else { - AddResult::Ok(new) - } - }, - // New transaction is replacing some other transaction already in the queue. - scoring::Choice::ReplaceOld => { - let old = mem::replace(&mut self.transactions[index], new.clone()); - scoring.update_scores(&self.transactions, &mut self.scores, scoring::Change::ReplacedAt(index)); - - AddResult::Replaced { - old, - new, - } - }, - } - } - - pub fn remove(&mut self, tx: &T, scoring: &S) -> bool { - let index = match self.transactions.binary_search_by(|old| scoring.compare(old, tx)) { - Ok(index) => index, - Err(_) => { - warn!("Attempting to remove non-existent transaction {:?}", tx); - return false; - }, - }; - - self.transactions.remove(index); - self.scores.remove(index); - // Update scoring - scoring.update_scores(&self.transactions, &mut self.scores, scoring::Change::RemovedAt(index)); - return true; - } - - pub fn cull>(&mut self, ready: &mut R, scoring: &S) -> SmallVec<[Transaction; PER_SENDER]> { - let mut result = SmallVec::new(); - if self.is_empty() { - return result; - } - - let mut first_non_stalled = 0; - for tx in &self.transactions { - match ready.is_ready(tx) { - Readiness::Stale => { - first_non_stalled += 1; - }, - Readiness::Ready | Readiness::Future => break, - } - } - - if first_non_stalled == 0 { - return result; - } - - // reverse the vectors to easily remove first elements. - self.transactions.reverse(); - self.scores.reverse(); - - for _ in 0..first_non_stalled { - self.scores.pop(); - result.push( - self.transactions.pop().expect("first_non_stalled is never greater than transactions.len(); qed") - ); - } - - self.transactions.reverse(); - self.scores.reverse(); - - // update scoring - scoring.update_scores(&self.transactions, &mut self.scores, scoring::Change::Culled(result.len())); - - // reverse the result to maintain correct order. - result.reverse(); - result - } -} diff --git a/transaction-pool/src/verifier.rs b/transaction-pool/src/verifier.rs deleted file mode 100644 index 991b78ebd..000000000 --- a/transaction-pool/src/verifier.rs +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use crate::VerifiedTransaction; - -/// Transaction verification. -/// -/// Verifier is responsible to decide if the transaction should even be considered for pool inclusion. -pub trait Verifier { - /// Verification error. - type Error; - - /// Verified transaction. - type VerifiedTransaction: VerifiedTransaction; - - /// Verifies a `UnverifiedTransaction` and produces `VerifiedTransaction` instance. - fn verify_transaction(&self, tx: U) -> Result; -} diff --git a/triehash/Cargo.toml b/triehash/Cargo.toml deleted file mode 100644 index 453815c6c..000000000 --- a/triehash/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "triehash" -version = "0.8.0" -authors = ["Parity Technologies "] -description = "In-memory patricia trie operations" -repository = "https://github.com/paritytech/parity-common" -license = "GPL-3.0" -edition = "2018" - -[dependencies] -hash-db = "0.15" -rlp = { version = "0.4", path = "../rlp" } - -[dev-dependencies] -criterion = "0.3" -keccak-hasher = "0.15" -tiny-keccak = "1.5" -trie-standardmap = "0.15" -hex-literal = "0.2" -ethereum-types = { version = "0.7", path = "../ethereum-types" } - -[[bench]] -name = "triehash" -path = "benches/triehash.rs" -harness = false diff --git a/triehash/README.md b/triehash/README.md deleted file mode 100644 index 99c5ce459..000000000 --- a/triehash/README.md +++ /dev/null @@ -1,2 +0,0 @@ -This crate provides utility functions to validate and initialize tries using flexible input. -It is used extensively in `parity-ethereum` to validate blocks (mostly transactions and receipt roots). \ No newline at end of file diff --git a/triehash/benches/triehash.rs b/triehash/benches/triehash.rs deleted file mode 100644 index 8930f473e..000000000 --- a/triehash/benches/triehash.rs +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use criterion::{criterion_group, criterion_main, Criterion}; -use ethereum_types::H256; -use keccak_hasher::KeccakHasher; -use tiny_keccak::keccak256; -use trie_standardmap::{Alphabet, StandardMap, ValueMode}; -use triehash::trie_root; - -fn random_word(alphabet: &[u8], min_count: usize, diff_count: usize, seed: &mut H256) -> Vec { - assert!(min_count + diff_count <= 32); - *seed = H256(keccak256(seed.as_bytes())); - let r = min_count + (seed[31] as usize % (diff_count + 1)); - let mut ret: Vec = Vec::with_capacity(r); - for i in 0..r { - ret.push(alphabet[seed[i] as usize % alphabet.len()]); - } - ret -} - -fn random_bytes(min_count: usize, diff_count: usize, seed: &mut H256) -> Vec { - assert!(min_count + diff_count <= 32); - *seed = H256(keccak256(seed.as_bytes())); - let r = min_count + (seed[31] as usize % (diff_count + 1)); - seed[0..r].to_vec() -} - -fn random_value(seed: &mut H256) -> Vec { - *seed = H256(keccak256(seed.as_bytes())); - match seed[0] % 2 { - 1 => vec![seed[31];1], - _ => seed.as_bytes().to_vec(), - } -} - -fn bench_insertions(c: &mut Criterion) { - c.bench_function("32_mir_1k", |b| { - let st = StandardMap { - alphabet: Alphabet::All, - min_key: 32, - journal_key: 0, - value_mode: ValueMode::Mirror, - count: 1000, - }; - let d = st.make(); - b.iter(|| trie_root::(d.clone())); - }); - - c.bench_function("32_ran_1k", |b| { - let st = StandardMap { - alphabet: Alphabet::All, - min_key: 32, - journal_key: 0, - value_mode: ValueMode::Random, - count: 1000, - }; - let d = st.make(); - b.iter(|| trie_root::(d.clone())); - }); - - c.bench_function("six_high", |b| { - let mut d: Vec<(Vec, Vec)> = Vec::new(); - let mut seed = H256::default(); - for _ in 0..1000 { - let k = random_bytes(6, 0, &mut seed); - let v = random_value(&mut seed); - d.push((k, v)) - } - b.iter(|| trie_root::(d.clone())); - }); - - c.bench_function("six_mid", |b| { - let alphabet = b"@QWERTYUIOPASDFGHJKLZXCVBNM[/]^_"; - let mut d: Vec<(Vec, Vec)> = Vec::new(); - let mut seed = H256::default(); - for _ in 0..1000 { - let k = random_word(alphabet, 6, 0, &mut seed); - let v = random_value(&mut seed); - d.push((k, v)) - } - b.iter(|| trie_root::(d.clone())); - }); - - c.bench_function("random_mid", |b| { - let alphabet = b"@QWERTYUIOPASDFGHJKLZXCVBNM[/]^_"; - let mut d: Vec<(Vec, Vec)> = Vec::new(); - let mut seed = H256::default(); - for _ in 0..1000 { - let k = random_word(alphabet, 1, 5, &mut seed); - let v = random_value(&mut seed); - d.push((k, v)) - } - b.iter(|| trie_root::(d.clone())); - }); - - c.bench_function("six_low", |b| { - let alphabet = b"abcdef"; - let mut d: Vec<(Vec, Vec)> = Vec::new(); - let mut seed = H256::default(); - for _ in 0..1000 { - let k = random_word(alphabet, 6, 0, &mut seed); - let v = random_value(&mut seed); - d.push((k, v)) - } - b.iter(|| trie_root::(d.clone())); - }); -} - -criterion_group!(benches, bench_insertions); -criterion_main!(benches); diff --git a/triehash/src/lib.rs b/triehash/src/lib.rs deleted file mode 100644 index 94bcbfaed..000000000 --- a/triehash/src/lib.rs +++ /dev/null @@ -1,364 +0,0 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Generetes trie root. -//! -//! This module should be used to generate trie root hash. - -use std::cmp; -use std::collections::BTreeMap; -use std::iter::once; - -use hash_db::Hasher; -use rlp::RlpStream; - -fn shared_prefix_len(first: &[T], second: &[T]) -> usize { - first.iter() - .zip(second.iter()) - .position(|(f, s)| f != s) - .unwrap_or_else(|| cmp::min(first.len(), second.len())) -} - -/// Generates a trie root hash for a vector of values -/// -/// ```rust -/// extern crate triehash; -/// extern crate keccak_hasher; -/// extern crate ethereum_types; -/// #[macro_use] extern crate hex_literal; -/// use ethereum_types::H256; -/// use triehash::ordered_trie_root; -/// use keccak_hasher::KeccakHasher; -/// -/// fn main() { -/// let v = &["doe", "reindeer"]; -/// let root = H256::from(hex!("e766d5d51b89dc39d981b41bda63248d7abce4f0225eefd023792a540bcffee3")); -/// assert_eq!(ordered_trie_root::(v), root.as_ref()); -/// } -/// ``` -pub fn ordered_trie_root(input: I) -> H::Out -where - I: IntoIterator, - I::Item: AsRef<[u8]>, - H: Hasher, - ::Out: cmp::Ord, -{ - trie_root::(input.into_iter().enumerate().map(|(i, v)| (rlp::encode(&i), v))) -} - -/// Generates a trie root hash for a vector of key-value tuples -/// -/// ```rust -/// extern crate triehash; -/// extern crate ethereum_types; -/// extern crate keccak_hasher; -/// #[macro_use] extern crate hex_literal; -/// use triehash::trie_root; -/// use ethereum_types::H256; -/// use keccak_hasher::KeccakHasher; -/// -/// fn main() { -/// let v = vec![ -/// ("doe", "reindeer"), -/// ("dog", "puppy"), -/// ("dogglesworth", "cat"), -/// ]; -/// -/// let root = H256::from(hex!("8aad789dff2f538bca5d8ea56e8abe10f4c7ba3a5dea95fea4cd6e7c3a1168d3")); -/// assert_eq!(trie_root::(v), root.as_ref()); -/// } -/// ``` -pub fn trie_root(input: I) -> H::Out -where - I: IntoIterator, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]>, - H: Hasher, - ::Out: cmp::Ord, -{ - // first put elements into btree to sort them and to remove duplicates - let input = input - .into_iter() - .collect::>(); - - let mut nibbles = Vec::with_capacity(input.keys().map(|k| k.as_ref().len()).sum::() * 2); - let mut lens = Vec::with_capacity(input.len() + 1); - lens.push(0); - for k in input.keys() { - for &b in k.as_ref() { - nibbles.push(b >> 4); - nibbles.push(b & 0x0F); - } - lens.push(nibbles.len()); - } - - // then move them to a vector - let input = input.into_iter().zip(lens.windows(2)) - .map(|((_, v), w)| (&nibbles[w[0]..w[1]], v)) - .collect::>(); - - let mut stream = RlpStream::new(); - hash256rlp::(&input, 0, &mut stream); - H::hash(&stream.out()) -} - -/// Generates a key-hashed (secure) trie root hash for a vector of key-value tuples. -/// -/// ```rust -/// extern crate triehash; -/// extern crate keccak_hasher; -/// extern crate ethereum_types; -/// #[macro_use] extern crate hex_literal; -/// use ethereum_types::H256; -/// use triehash::sec_trie_root; -/// use keccak_hasher::KeccakHasher; -/// -/// fn main() { -/// let v = vec![ -/// ("doe", "reindeer"), -/// ("dog", "puppy"), -/// ("dogglesworth", "cat"), -/// ]; -/// -/// let root = H256::from(hex!("d4cd937e4a4368d7931a9cf51686b7e10abb3dce38a39000fd7902a092b64585")); -/// assert_eq!(sec_trie_root::(v), root.as_ref()); -/// } -/// ``` -pub fn sec_trie_root(input: I) -> H::Out -where - I: IntoIterator, - A: AsRef<[u8]>, - B: AsRef<[u8]>, - H: Hasher, - ::Out: cmp::Ord, -{ - trie_root::(input.into_iter().map(|(k, v)| (H::hash(k.as_ref()), v))) -} - -/// Hex-prefix Notation. First nibble has flags: oddness = 2^0 & termination = 2^1. -/// -/// The "termination marker" and "leaf-node" specifier are completely equivalent. -/// -/// Input values are in range `[0, 0xf]`. -/// -/// ```markdown -/// [0,0,1,2,3,4,5] 0x10012345 // 7 > 4 -/// [0,1,2,3,4,5] 0x00012345 // 6 > 4 -/// [1,2,3,4,5] 0x112345 // 5 > 3 -/// [0,0,1,2,3,4] 0x00001234 // 6 > 3 -/// [0,1,2,3,4] 0x101234 // 5 > 3 -/// [1,2,3,4] 0x001234 // 4 > 3 -/// [0,0,1,2,3,4,5,T] 0x30012345 // 7 > 4 -/// [0,0,1,2,3,4,T] 0x20001234 // 6 > 4 -/// [0,1,2,3,4,5,T] 0x20012345 // 6 > 4 -/// [1,2,3,4,5,T] 0x312345 // 5 > 3 -/// [1,2,3,4,T] 0x201234 // 4 > 3 -/// ``` -fn hex_prefix_encode<'a>(nibbles: &'a [u8], leaf: bool) -> impl Iterator + 'a { - let inlen = nibbles.len(); - let oddness_factor = inlen % 2; - - let first_byte = { - let mut bits = ((inlen as u8 & 1) + (2 * leaf as u8)) << 4; - if oddness_factor == 1 { - bits += nibbles[0]; - } - bits - }; - once(first_byte).chain(nibbles[oddness_factor..].chunks(2).map(|ch| ch[0] << 4 | ch[1])) -} - -fn hash256rlp(input: &[(A, B)], pre_len: usize, stream: &mut RlpStream) -where - A: AsRef<[u8]>, - B: AsRef<[u8]>, - H: Hasher, -{ - let inlen = input.len(); - - // in case of empty slice, just append empty data - if inlen == 0 { - stream.append_empty_data(); - return; - } - - // take slices - let key: &[u8] = &input[0].0.as_ref(); - let value: &[u8] = &input[0].1.as_ref(); - - // if the slice contains just one item, append the suffix of the key - // and then append value - if inlen == 1 { - stream.begin_list(2); - stream.append_iter(hex_prefix_encode(&key[pre_len..], true)); - stream.append(&value); - return; - } - - // get length of the longest shared prefix in slice keys - let shared_prefix = input.iter() - // skip first tuple - .skip(1) - // get minimum number of shared nibbles between first and each successive - .fold(key.len(), | acc, &(ref k, _) | { - cmp::min(shared_prefix_len(key, k.as_ref()), acc) - }); - - // if shared prefix is higher than current prefix append its - // new part of the key to the stream - // then recursively append suffixes of all items who had this key - if shared_prefix > pre_len { - stream.begin_list(2); - stream.append_iter(hex_prefix_encode(&key[pre_len..shared_prefix], false)); - hash256aux::(input, shared_prefix, stream); - return; - } - - // an item for every possible nibble/suffix - // + 1 for data - stream.begin_list(17); - - // if first key len is equal to prefix_len, move to next element - let mut begin = if pre_len == key.len() { 1 } else { 0 }; - - // iterate over all possible nibbles - for i in 0..16 { - // count how many successive elements have same next nibble - let len = input - .iter() - .skip(begin) - .take_while(|pair| pair.0.as_ref()[pre_len] == i) - .count(); - - // if at least 1 successive element has the same nibble - // append their suffixes - match len { - 0 => { stream.append_empty_data(); }, - _ => hash256aux::(&input[begin..(begin + len)], pre_len + 1, stream) - } - begin += len; - } - - // if fist key len is equal prefix, append its value - if pre_len == key.len() { - stream.append(&value); - } else { - stream.append_empty_data(); - } -} - -fn hash256aux(input: &[(A, B)], pre_len: usize, stream: &mut RlpStream) -where - A: AsRef<[u8]>, - B: AsRef<[u8]>, - H: Hasher, -{ - let mut s = RlpStream::new(); - hash256rlp::(input, pre_len, &mut s); - let out = s.out(); - match out.len() { - 0..=31 => stream.append_raw(&out, 1), - _ => stream.append(&H::hash(&out).as_ref()) - }; -} - -#[cfg(test)] -mod tests { - use super::{trie_root, shared_prefix_len, hex_prefix_encode}; - use keccak_hasher::KeccakHasher; - use ethereum_types::H256; - use hex_literal::hex; - - #[test] - fn test_hex_prefix_encode() { - let v = vec![0, 0, 1, 2, 3, 4, 5]; - let e = vec![0x10, 0x01, 0x23, 0x45]; - let h = hex_prefix_encode(&v, false).collect::>(); - assert_eq!(h, e); - - let v = vec![0, 1, 2, 3, 4, 5]; - let e = vec![0x00, 0x01, 0x23, 0x45]; - let h = hex_prefix_encode(&v, false).collect::>(); - assert_eq!(h, e); - - let v = vec![0, 1, 2, 3, 4, 5]; - let e = vec![0x20, 0x01, 0x23, 0x45]; - let h = hex_prefix_encode(&v, true).collect::>(); - assert_eq!(h, e); - - let v = vec![1, 2, 3, 4, 5]; - let e = vec![0x31, 0x23, 0x45]; - let h = hex_prefix_encode(&v, true).collect::>(); - assert_eq!(h, e); - - let v = vec![1, 2, 3, 4]; - let e = vec![0x00, 0x12, 0x34]; - let h = hex_prefix_encode(&v, false).collect::>(); - assert_eq!(h, e); - - let v = vec![4, 1]; - let e = vec![0x20, 0x41]; - let h = hex_prefix_encode(&v, true).collect::>(); - assert_eq!(h, e); - } - - #[test] - fn simple_test() { - assert_eq!( - trie_root::(vec![ - (b"A", b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" as &[u8]) - ]), - H256::from(hex!("d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab")).as_ref(), - ); - } - - #[test] - fn test_triehash_out_of_order() { - assert_eq!( - trie_root::(vec![ - (vec![0x01u8, 0x23], vec![0x01u8, 0x23]), - (vec![0x81u8, 0x23], vec![0x81u8, 0x23]), - (vec![0xf1u8, 0x23], vec![0xf1u8, 0x23]), - ]), - trie_root::(vec![ - (vec![0x01u8, 0x23], vec![0x01u8, 0x23]), - (vec![0xf1u8, 0x23], vec![0xf1u8, 0x23]), // last two tuples are swapped - (vec![0x81u8, 0x23], vec![0x81u8, 0x23]), - ]), - ); - } - - #[test] - fn test_shared_prefix() { - let a = vec![1,2,3,4,5,6]; - let b = vec![4,2,3,4,5,6]; - assert_eq!(shared_prefix_len(&a, &b), 0); - } - - #[test] - fn test_shared_prefix2() { - let a = vec![1,2,3,3,5]; - let b = vec![1,2,3]; - assert_eq!(shared_prefix_len(&a, &b), 3); - } - - #[test] - fn test_shared_prefix3() { - let a = vec![1,2,3,4,5,6]; - let b = vec![1,2,3,4,5,6]; - assert_eq!(shared_prefix_len(&a, &b), 6); - } -} diff --git a/uint/CHANGELOG.md b/uint/CHANGELOG.md new file mode 100644 index 000000000..99e30f197 --- /dev/null +++ b/uint/CHANGELOG.md @@ -0,0 +1,56 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.10.0] - 2024-09-11 +- Removed From<[u8; n]> conversions, renamed `to_big_endian` / `to_little_endian` to write_as_*, and made them return byte arrays. [#859](https://github.com/paritytech/parity-common/pull/859) + +## [0.9.5] - 2022-11-29 +- Implemented bitwise assign traits. [#690](https://github.com/paritytech/parity-common/pull/690) + +## [0.9.4] - 2022-09-20 +- Made `one` const. [#650](https://github.com/paritytech/parity-common/pull/650) +- Made `max_value` const. [#652](https://github.com/paritytech/parity-common/pull/652) +- Made `is_zero` const. [#639](https://github.com/paritytech/parity-common/pull/639) +- Added `abs_diff`. [#665](https://github.com/paritytech/parity-common/pull/665) + +## [0.9.3] - 2022-02-04 +- Simplified and faster `div_mod`. [#478](https://github.com/paritytech/parity-common/pull/478) +- Fixed `overflowing_neg`. [#611](https://github.com/paritytech/parity-common/pull/611) + +## [0.9.2] - 2022-01-28 +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Display formatting support. [#603](https://github.com/paritytech/parity-common/pull/603) + +## [0.9.1] - 2021-06-30 +- Added `integer_sqrt` method. [#554](https://github.com/paritytech/parity-common/pull/554) + +## [0.9.0] - 2021-01-05 +- Allow `0x` prefix in `from_str`. [#487](https://github.com/paritytech/parity-common/pull/487) +### Breaking +- Optimized FromStr, made it no_std-compatible. [#468](https://github.com/paritytech/parity-common/pull/468) + +## [0.8.5] - 2020-08-12 +- Make const matching work again. [#421](https://github.com/paritytech/parity-common/pull/421) + +## [0.8.4] - 2020-08-03 +- Added a manual impl of `Eq` and `Hash`. [#390](https://github.com/paritytech/parity-common/pull/390) +- Removed some unsafe code and added big-endian support. [#407](https://github.com/paritytech/parity-common/pull/407) +- Added `checked_pow`. [#417](https://github.com/paritytech/parity-common/pull/417) + +## [0.8.3] - 2020-04-27 +- Added `arbitrary` feature. [#378](https://github.com/paritytech/parity-common/pull/378) +- Fixed UB in `from_big_endian`. [#381](https://github.com/paritytech/parity-common/pull/381) + +## [0.8.2] - 2019-10-24 +### Fixed +- Fixed 2018 edition imports. [#237](https://github.com/paritytech/parity-common/pull/237) +- Removed `uninitialized` usage. [#238](https://github.com/paritytech/parity-common/pull/238) +### Dependencies +- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) +### Changed +- Modified AsRef impl. [#196](https://github.com/paritytech/parity-common/pull/196) diff --git a/uint/Cargo.toml b/uint/Cargo.toml index 2a151f87e..dbb84d18a 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -1,23 +1,26 @@ [package] -description = "Large fixed-size integers arithmetics" +description = "Large fixed-size integer arithmetic" homepage = "http://parity.io" repository = "https://github.com/paritytech/parity-common" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" name = "uint" -version = "0.8.1" +version = "0.10.0" authors = ["Parity Technologies "] readme = "README.md" -edition = "2018" +edition = "2021" +rust-version = "1.56.1" [dependencies] -byteorder = { version = "1", default-features = false } -rustc-hex = { version = "2.0", default-features = false } -quickcheck = { version = "0.6", optional = true } -crunchy = { version = "0.2", default-features = true } +byteorder = { version = "1.4.2", default-features = false } +crunchy = { version = "0.2.2", default-features = false } +quickcheck = { version = "1", optional = true } +hex = { version = "0.4", default-features = false } +static_assertions = "1.0.0" +arbitrary = { version = "1.0", optional = true } [features] default = ["std"] -std = ["byteorder/std", "rustc-hex/std", "crunchy/std"] +std = ["byteorder/std", "crunchy/std", "hex/std"] [[example]] name = "modular" @@ -27,11 +30,14 @@ name = "uint_tests" required-features = ["std"] [dev-dependencies] -criterion = "0.2.11" -num-bigint = "0.2" +criterion = "0.5.1" +num-bigint = "0.4.0" -[target.'cfg(unix)'.dev-dependencies] -rug = { version = "1.4", default-features = false, features = ["integer"] } +[target.'cfg(all(unix, target_arch = "x86_64"))'.dev-dependencies] +rug = { version = "1.6.0", default-features = false, features = [ + "integer", + "std", +] } [[bench]] name = "bigint" diff --git a/uint/README.md b/uint/README.md index e10638919..34006f83d 100644 --- a/uint/README.md +++ b/uint/README.md @@ -2,7 +2,7 @@ ## Description -Provides facilities to construct big unsigned integer types. +Provides facilities to construct big unsigned integer types which use no allocations (stack-based, fixed bit length). If you want to use a predefined `U128`, `U256` or `U512` type, take a look at the [`primitive-types`](https://github.com/paritytech/parity-common/tree/master/primitive-types) or [`ethereum-types`](https://github.com/paritytech/parity-common/tree/master/ethereum-types) crate. The focus on the provided big unsigned integer types is performance and cross-platform availability. @@ -16,11 +16,26 @@ In your `Cargo.toml` paste uint = "0.8" ``` +Import the macro + +``` +use uint::construct_uint; +``` + +If you're using pre-edition Rust in your main file + +``` +#[macro_use] +extern crate uint; +``` + Construct your own big unsigned integer type as follows. ``` // U1024 with 1024 bits consisting of 16 x 64-bit words -construct_uint!(U1024; 16); +construct_uint! { + pub struct U1024(16); +} ``` ## Tests @@ -54,3 +69,5 @@ see fuzz [README.md](fuzz/README.md) - Enabled by default. - `quickcheck`: Enable quickcheck-style property testing - Use with `cargo test --release --features=quickcheck`. +- `arbitrary`: Allow for creation of an `uint` object from random unstructured input for use with fuzzers that use the `arbitrary` crate. + - Disabled by default. diff --git a/uint/benches/bigint.rs b/uint/benches/bigint.rs index e1d45adc9..d338ccb43 100644 --- a/uint/benches/bigint.rs +++ b/uint/benches/bigint.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license @@ -12,13 +12,8 @@ //! rustup run cargo bench //! ``` -#[macro_use] -extern crate criterion; -extern crate core; -#[macro_use] -extern crate uint; -extern crate num_bigint; -extern crate rug; +use criterion::{criterion_group, criterion_main}; +use uint::{construct_uint, uint_full_mul_reg}; construct_uint! { pub struct U256(4); @@ -35,7 +30,7 @@ impl U256 { } } -use criterion::{black_box, Bencher, Criterion, ParameterizedBenchmark}; +use criterion::{black_box, Bencher, BenchmarkId, Criterion}; use num_bigint::BigUint; use rug::{integer::Order, Integer}; use std::str::FromStr; @@ -49,6 +44,7 @@ criterion_group!( u256_div, u512_div_mod, u256_rem, + u256_integer_sqrt, u256_bit_and, u256_bit_or, u256_bit_xor, @@ -63,6 +59,7 @@ criterion_group!( u512_mul, u512_div, u512_rem, + u512_integer_sqrt, u512_mul_u32_vs_u64, mulmod_u512_vs_biguint_vs_gmp, conversions, @@ -76,12 +73,12 @@ criterion_group!( u128_mul, u128_div, from_fixed_array, + from_str, ); criterion_main!(bigint); fn to_biguint(x: U256) -> BigUint { - let mut bytes = [0u8; 32]; - x.to_little_endian(&mut bytes); + let bytes = x.to_little_endian(); BigUint::from_bytes_le(&bytes) } @@ -101,135 +98,113 @@ fn from_gmp(x: Integer) -> U512 { } fn u128_div(c: &mut Criterion) { - c.bench( - "u128_div", - ParameterizedBenchmark::new( - "", - |b, (x, y, z)| { - b.iter(|| { - let x = black_box(u128::from(*x) << 64 + u128::from(*y)); - black_box(x / u128::from(*z)) - }) - }, - vec![(0u64, u64::max_value(), 100u64), (u64::max_value(), u64::max_value(), 99), (42, 42, 100500)], - ), - ); + let mut group = c.benchmark_group("u128_div"); + for input in [(0u64, u64::max_value(), 100u64), (u64::max_value(), u64::max_value(), 99), (42, 42, 100500)] { + group.bench_with_input(BenchmarkId::from_parameter(input.2), &input, |b, (x, y, z)| { + b.iter(|| { + let x = black_box(u128::from(*x) << 64 + u128::from(*y)); + black_box(x / u128::from(*z)) + }) + }); + } + group.finish(); } fn u256_add(c: &mut Criterion) { - c.bench( - "u256_add", - ParameterizedBenchmark::new( - "", - |b, (x, y)| { - b.iter(|| { - let x = U256::from(*x); - let y = U256::from(*y); - black_box(x.overflowing_add(y).0) - }) - }, - vec![(0u64, 1u64), (u64::max_value(), 1), (42, 100500)], - ), - ); + let mut group = c.benchmark_group("u256_add"); + for input in [(0u64, 1u64), (u64::max_value(), 1), (42, 100500)] { + group.bench_with_input(BenchmarkId::from_parameter(input.0), &input, |b, (x, y)| { + b.iter(|| { + let x = U256::from(*x); + let y = U256::from(*y); + black_box(x.overflowing_add(y).0) + }) + }); + } + group.finish(); } fn u256_sub(c: &mut Criterion) { - c.bench( - "u256_sub", - ParameterizedBenchmark::new( - "", - |b, (x, y)| { - b.iter(|| { - let y = U256::from(*y); - black_box(x.overflowing_sub(y).0) - }) - }, - vec![(U256::max_value(), 1u64), (U256::from(3), 2)], - ), - ); + let mut group = c.benchmark_group("hex_to_bytes"); + for input in [(U256::MAX, 1u64), (U256::from(3), 2)] { + group.bench_with_input(BenchmarkId::from_parameter(input.0), &input, |b, (x, y)| { + b.iter(|| { + let y = U256::from(*y); + black_box(x.overflowing_sub(y).0) + }) + }); + } + group.finish(); } fn u256_mul(c: &mut Criterion) { - c.bench( - "u256_mul", - ParameterizedBenchmark::new( - "", - |b, (x, y)| { - b.iter(|| { - let y = U256::from(*y); - black_box(x.overflowing_mul(y).0) - }) - }, - vec![ - (U256::max_value(), 1u64), - (U256::from(3), u64::max_value()), - ( - U256::from_dec_str("21674844646682989462120101885968193938394323990565507610662749").unwrap(), - 173, - ), - ], - ), - ); + let mut group = c.benchmark_group("u256_mul"); + for input in [ + (U256::MAX, 1u64), + (U256::from(3), u64::max_value()), + (U256::from_dec_str("21674844646682989462120101885968193938394323990565507610662749").unwrap(), 173), + ] { + group.bench_with_input(BenchmarkId::from_parameter(input.1), &input, |b, (x, y)| { + b.iter(|| { + let y = U256::from(*y); + black_box(x.overflowing_mul(y).0) + }) + }); + } + group.finish(); } fn u512_div_mod(c: &mut Criterion) { - c.bench( - "u512_div_mod", - ParameterizedBenchmark::new( - "", - |b, (x, y)| { - b.iter(|| { - let (q, r) = x.div_mod(*y); - black_box((q, r)) - }) - }, - vec![ - (U512::max_value(), U512::from(1u64)), - (U512::from(u64::max_value()), U512::from(u32::max_value())), - (U512::from(u64::max_value()), U512::from(u64::max_value() - 1)), - (U512::from(u64::max_value()), U512::from(u64::max_value() - 1)), - ( - U512::from_dec_str("3759751734479964094783137206182536765532905409829204647089173492").unwrap(), - U512::from_dec_str("21674844646682989462120101885968193938394323990565507610662749").unwrap(), - ), - ( - U512::from_str( - "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", - ) - .unwrap(), - U512::from_str( - "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0", - ) - .unwrap(), - ), - ( - U512::from_dec_str( - "204586912993508866875824356051724947013540127877691549342705710506008362274387533983037847993622361501550043477868832682875761627559574690771211649025" - ).unwrap(), - U512::from_dec_str( - "452312848583266388373324160190187140051835877600158453279131187530910662640" - ).unwrap(), - ), - ], + let mut group = c.benchmark_group("u512_div_mod"); + for input in [ + (U512::MAX, U512::from(1u64)), + (U512::from(u64::max_value()), U512::from(u32::max_value())), + (U512::from(u64::max_value()), U512::from(u64::max_value() - 1)), + ( + U512::from_dec_str("3759751734479964094783137206182536765532905409829204647089173492").unwrap(), + U512::from_dec_str("21674844646682989462120101885968193938394323990565507610662749").unwrap(), ), - ); + ( + U512::from_str( + "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + ) + .unwrap(), + U512::from_str( + "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0", + ) + .unwrap(), + ), + ( + U512::from_dec_str( + "204586912993508866875824356051724947013540127877691549342705710506008362274387533983037847993622361501550043477868832682875761627559574690771211649025" + ).unwrap(), + U512::from_dec_str( + "452312848583266388373324160190187140051835877600158453279131187530910662640" + ).unwrap(), + ), + ] { + group.bench_with_input(BenchmarkId::from_parameter(input.1), &input, |b, (x, y)| { + b.iter(|| { + let (q, r) = x.div_mod(*y); + black_box((q, r)) + }) + }); + } + group.finish(); } fn u256_mul_full(c: &mut Criterion) { - c.bench( - "u256_mul_full", - ParameterizedBenchmark::new( - "", - |b, (x, y)| { - b.iter(|| { - let y = *y; - let U512(ref u512words) = x.full_mul(U256([y, y, y, y])); - black_box(U256([u512words[0], u512words[2], u512words[2], u512words[3]])) - }) - }, - vec![(U256::from(42), 1u64), (U256::from(3), u64::max_value())], - ), - ); + let mut group = c.benchmark_group("hex_to_bytes"); + for input in [(U256::from(42), 1u64), (U256::from(3), u64::max_value())] { + group.bench_with_input(BenchmarkId::from_parameter(input.1), &input, |b, (x, y)| { + b.iter(|| { + let y = *y; + let U512(ref u512words) = x.full_mul(U256([y, y, y, y])); + black_box(U256([u512words[0], u512words[2], u512words[2], u512words[3]])) + }) + }); + } + group.finish(); } fn u256_div(c: &mut Criterion) { @@ -239,31 +214,37 @@ fn u256_div(c: &mut Criterion) { } fn u256_rem(c: &mut Criterion) { - c.bench( - "u256_rem", - ParameterizedBenchmark::new( - "", - |b, (x, y)| b.iter(|| black_box(x % y)), - vec![ - (U256::max_value(), U256::from(1u64)), - (U256::from(u64::max_value()), U256::from(u64::from(u32::max_value()) + 1)), - ( - U256([12767554894655550452, 16333049135534778834, 140317443000293558, 598963]), - U256([2096410819092764509, 8483673822214032535, 36306297304129857, 3453]), - ), - ( - U256::from_str( - "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", - ) - .unwrap(), - U256::from_str( - "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0", - ) - .unwrap(), - ), - ], + let mut group = c.benchmark_group("u256_rem"); + for input in [ + (U256::MAX, U256::from(1u64)), + (U256::from(u64::max_value()), U256::from(u64::from(u32::max_value()) + 1)), + ( + U256([12767554894655550452, 16333049135534778834, 140317443000293558, 598963]), + U256([2096410819092764509, 8483673822214032535, 36306297304129857, 3453]), + ), + ( + U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(), + U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0").unwrap(), ), - ); + ] { + group.bench_with_input(BenchmarkId::from_parameter(input.0), &input, |b, (x, y)| b.iter(|| black_box(x % y))); + } + group.finish(); +} + +fn u256_integer_sqrt(c: &mut Criterion) { + let mut group = c.benchmark_group("u256_integer_sqrt"); + for input in [ + U256::from(u64::MAX), + U256::from(u128::MAX) + 1, + U256::from(u128::MAX - 1) * U256::from(u128::MAX - 1) - 1, + U256::MAX, + ] { + group.bench_with_input(BenchmarkId::from_parameter(input), &input, |b, x| { + b.iter(|| black_box(x.integer_sqrt().0)) + }); + } + group.finish(); } fn u512_pairs() -> Vec<(U512, U512)> { @@ -271,54 +252,58 @@ fn u512_pairs() -> Vec<(U512, U512)> { (U512::from(1u64), U512::from(0u64)), (U512::from(u64::max_value()), U512::from(u64::from(u32::max_value()) + 1)), ( - U512([ - 12767554894655550452, - 16333049135534778834, - 140317443000293558, - 598963, - 0, - 0, - 0, - 0, - ]), + U512([12767554894655550452, 16333049135534778834, 140317443000293558, 598963, 0, 0, 0, 0]), U512([0, 0, 0, 0, 2096410819092764509, 8483673822214032535, 36306297304129857, 3453]), ), ( - U512::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF") - .unwrap(), - U512::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0") - .unwrap(), + U512::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(), + U512::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0").unwrap(), ), ] } fn u512_add(c: &mut Criterion) { - c.bench( - "u512_add", - ParameterizedBenchmark::new("", |b, (x, y)| b.iter(|| black_box(x + y)), u512_pairs()), - ); + let mut group = c.benchmark_group("u512_add"); + for input in u512_pairs() { + group.bench_with_input(BenchmarkId::from_parameter(input.1), &input, |b, (x, y)| b.iter(|| black_box(x + y))); + } + group.finish(); } fn u512_sub(c: &mut Criterion) { - c.bench( - "u512_sub", - ParameterizedBenchmark::new( - "", - |b, (x, y)| b.iter(|| black_box(x.overflowing_sub(*y).0)), - u512_pairs(), - ), - ); + let mut group = c.benchmark_group("u512_sub"); + for input in u512_pairs() { + group.bench_with_input(BenchmarkId::from_parameter(input.1), &input, |b, (x, y)| { + b.iter(|| black_box(x.overflowing_sub(*y).0)) + }); + } + group.finish(); } fn u512_mul(c: &mut Criterion) { - c.bench( - "u512_mul", - ParameterizedBenchmark::new( - "", - |b, (x, y)| b.iter(|| black_box(x.overflowing_mul(*y).0)), - u512_pairs(), - ), - ); + let mut group = c.benchmark_group("u512_mul"); + for input in u512_pairs() { + group.bench_with_input(BenchmarkId::from_parameter(input.1), &input, |b, (x, y)| { + b.iter(|| black_box(x.overflowing_mul(*y).0)) + }); + } + group.finish(); +} + +fn u512_integer_sqrt(c: &mut Criterion) { + let mut group = c.benchmark_group("u512_integer_sqrt"); + for input in [ + U512::from(u32::MAX) + 1, + U512::from(u64::MAX), + (U512::from(u128::MAX) + 1) * (U512::from(u128::MAX) + 1), + U256::MAX.full_mul(U256::MAX) - 1, + U512::MAX, + ] { + group.bench_with_input(BenchmarkId::from_parameter(input), &input, |b, x| { + b.iter(|| black_box(x.integer_sqrt().0)) + }); + } + group.finish(); } fn u512_div(c: &mut Criterion) { @@ -370,15 +355,12 @@ fn u512_rem(c: &mut Criterion) { } fn conversions(c: &mut Criterion) { - c.bench( - "conversions biguint vs gmp", - ParameterizedBenchmark::new( - "BigUint", - |b, i| bench_convert_to_biguit(b, *i), - vec![0, 42, u64::max_value()], - ) - .with_function("gmp", |b, i| bench_convert_to_gmp(b, *i)), - ); + let mut group = c.benchmark_group("conversions biguint vs gmp"); + for input in [0, 42, u64::MAX] { + group.bench_with_input(BenchmarkId::new("BigUint", input), &input, |b, i| bench_convert_to_biguit(b, *i)); + group.bench_with_input(BenchmarkId::new("GMP", input), &input, |b, i| bench_convert_to_gmp(b, *i)); + } + group.finish(); } fn bench_convert_to_biguit(b: &mut Bencher, i: u64) { @@ -400,28 +382,23 @@ fn bench_convert_to_gmp(b: &mut Bencher, i: u64) { } fn u512_mul_u32_vs_u64(c: &mut Criterion) { - let ms = vec![1u32, 42, 10_000_001, u32::max_value()]; - c.bench( - "multiply u512 by u32 vs u64", - ParameterizedBenchmark::new("u32", |b, i| bench_u512_mul_u32(b, *i), ms) - .with_function("u64", |b, i| bench_u512_mul_u64(b, u64::from(*i))), - ); + let ms = vec![1u32, 42, 10_000_001, u32::MAX]; + let mut group = c.benchmark_group("multiply u512 by u32 vs u64"); + for input in ms { + group.bench_with_input(BenchmarkId::new("u32", input), &input, |b, i| bench_u512_mul_u32(b, *i)); + group.bench_with_input(BenchmarkId::new("u64", input), &input, |b, i| bench_u512_mul_u64(b, u64::from(*i))); + } + group.finish(); } fn bench_u512_mul_u32(b: &mut Bencher, i: u32) { - let x = - U512::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); - b.iter(|| { - black_box(x * i) - }); + let x = U512::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + b.iter(|| black_box(x * i)); } fn bench_u512_mul_u64(b: &mut Bencher, i: u64) { - let x = - U512::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); - b.iter(|| { - black_box(x * i) - }); + let x = U512::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + b.iter(|| black_box(x * i)); } fn mulmod_u512_vs_biguint_vs_gmp(c: &mut Criterion) { @@ -431,19 +408,18 @@ fn mulmod_u512_vs_biguint_vs_gmp(c: &mut Criterion) { U256::from(u64::max_value()), U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1").unwrap(), ]; - c.bench( - "mulmod u512 vs biguint vs gmp", - ParameterizedBenchmark::new("u512", |b, i| bench_u512_mulmod(b, *i), mods) - .with_function("BigUint", |b, i| bench_biguint_mulmod(b, *i)) - .with_function("gmp", |b, i| bench_gmp_mulmod(b, *i)), - ); + let mut group = c.benchmark_group("mulmod u512 vs biguint vs gmp"); + for input in mods { + group.bench_with_input(BenchmarkId::new("u512", input), &input, |b, i| bench_u512_mulmod(b, *i)); + group.bench_with_input(BenchmarkId::new("BigUint", input), &input, |b, i| bench_biguint_mulmod(b, *i)); + group.bench_with_input(BenchmarkId::new("GMP", input), &input, |b, i| bench_gmp_mulmod(b, *i)); + } + group.finish(); } fn bench_biguint_mulmod(b: &mut Bencher, z: U256) { - let x = - U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); - let y = - U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + let x = U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + let y = U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); b.iter(|| { let w = to_biguint(x) * to_biguint(y); black_box(from_biguint(w % to_biguint(z))) @@ -451,10 +427,8 @@ fn bench_biguint_mulmod(b: &mut Bencher, z: U256) { } fn bench_gmp_mulmod(b: &mut Bencher, z: U256) { - let x = - U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); - let y = - U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + let x = U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + let y = U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); b.iter(|| { let w = to_gmp(x) * to_gmp(y); black_box(from_gmp(w % to_gmp(z))) @@ -462,10 +436,8 @@ fn bench_gmp_mulmod(b: &mut Bencher, z: U256) { } fn bench_u512_mulmod(b: &mut Bencher, z: U256) { - let x = - U512::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); - let y = - U512::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + let x = U512::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + let y = U512::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); let z = U512([z.0[0], z.0[1], z.0[2], z.0[3], 0, 0, 0, 0]); b.iter(|| { let w = x.overflowing_mul(y).0; @@ -476,9 +448,7 @@ fn bench_u512_mulmod(b: &mut Bencher, z: U256) { // NOTE: uses native `u128` and does not measure this crates performance, // but might be interesting as a comparison. fn u128_mul(c: &mut Criterion) { - c.bench_function("u128_mul", |b| { - b.iter(|| black_box(12345u128 * u128::from(u64::max_value()))) - }); + c.bench_function("u128_mul", |b| b.iter(|| black_box(12345u128 * u128::from(u64::max_value())))); } fn u256_bit_and(c: &mut Criterion) { @@ -631,7 +601,7 @@ fn u512_shr(c: &mut Criterion) { fn u256_ord(c: &mut Criterion) { let one = U256([12767554894655550452, 16333049135534778834, 140317443000293558, 598963]); let two = U256([2096410819092764509, 8483673822214032535, 36306297304129857, 3453]); - c.bench_function("u256_ord", move |b| b.iter(|| black_box(one < two))); + c.bench_function("u256_ord", move |b| b.iter(|| black_box(one) < black_box(two))); } fn u512_ord(c: &mut Criterion) { @@ -655,15 +625,15 @@ fn u512_ord(c: &mut Criterion) { 36306297304129857, 3453, ]); - c.bench_function("u512_ord", move |b| b.iter(|| black_box(one < two))); + c.bench_function("u512_ord", move |b| b.iter(|| black_box(one) < black_box(two))); } fn u256_from_le(c: &mut Criterion) { c.bench_function("u256_from_le", |b| { b.iter(|| { let raw = black_box([ - 1u8, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, - 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, + 1u8, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, + 101, 103, 107, 109, 113, 127, ]); black_box(U256::from_little_endian(&raw[..])) }) @@ -674,8 +644,8 @@ fn u256_from_be(c: &mut Criterion) { c.bench_function("u256_from_be", |b| { b.iter(|| { let raw = black_box([ - 1u8, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, - 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, + 1u8, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, + 101, 103, 107, 109, 113, 127, ]); black_box(U256::from_big_endian(&raw[..])) }) @@ -684,18 +654,25 @@ fn u256_from_be(c: &mut Criterion) { fn from_fixed_array(c: &mut Criterion) { let ary512: [u8; 64] = [ - 255, 0, 0, 123, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 121, 0, 0, 0, 0, 0, 213, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 45, 0, 0, 67, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 123, - ]; - let ary256: [u8; 32] = [ - 255, 0, 0, 123, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 121, 0, 0, 0, 0, 0, 213, 0, 0, - 0, 0, 0, 0, + 255, 0, 0, 123, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 121, 0, 0, 0, 0, 0, 213, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 45, 0, 0, 67, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 123, ]; + let ary256: [u8; 32] = + [255, 0, 0, 123, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 121, 0, 0, 0, 0, 0, 213, 0, 0, 0, 0, 0, 0]; c.bench_function("from_fixed_array", move |b| { b.iter(|| { - let _: U512 = black_box(ary512.into()); - let _: U256 = black_box(ary256.into()); + let _: U512 = black_box(U512::from_big_endian(black_box(&ary512))); + let _: U256 = black_box(U256::from_big_endian(black_box(&ary256))); + }) + }); +} + +fn from_str(c: &mut Criterion) { + c.bench_function("from_str", move |b| { + b.iter(|| { + black_box(U512::from_str(black_box("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")).unwrap()); + black_box(U512::from_str(black_box("0FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")).unwrap()); + black_box(U512::from_str(black_box("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")).unwrap()); }) }); } diff --git a/uint/examples/modular.rs b/uint/examples/modular.rs index 6cd9f7409..30b236992 100644 --- a/uint/examples/modular.rs +++ b/uint/examples/modular.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license @@ -6,9 +6,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#[cfg(feature="std")] -extern crate core; - #[macro_use] extern crate uint; @@ -22,9 +19,8 @@ fn main() { // imagine the field 0..p // where the p is defined below // (it's a prime!) - let p = U256::from_dec_str( - "38873241744847760218045702002058062581688990428170398542849190507947196700873" - ).expect("p to be a good number in the example"); + let p = U256::from_dec_str("38873241744847760218045702002058062581688990428170398542849190507947196700873") + .expect("p to be a good number in the example"); // then, on this field, // (p-1) + (p+1) = 0 @@ -51,7 +47,7 @@ fn main() { let multiplicator = 3; let mul = { let mut result = p_minus_1; - for _ in 0..multiplicator-1 { + for _ in 0..multiplicator - 1 { result = (p_minus_1 + result) % p; } result diff --git a/uint/fuzz/Cargo.toml b/uint/fuzz/Cargo.toml index 151c851cb..78119034f 100644 --- a/uint/fuzz/Cargo.toml +++ b/uint/fuzz/Cargo.toml @@ -4,7 +4,8 @@ description = "Fuzzers for uint algorithms" publish = false version = "0.1.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" +rust-version = "1.56.1" [package.metadata] cargo-fuzz = true @@ -24,3 +25,7 @@ path = "fuzz_targets/div_mod.rs" [[bin]] name = "div_mod_word" path = "fuzz_targets/div_mod_word.rs" + +[[bin]] +name = "isqrt" +path = "fuzz_targets/isqrt.rs" diff --git a/uint/fuzz/fuzz_targets/div_mod.rs b/uint/fuzz/fuzz_targets/div_mod.rs index 7bcf751a8..fdeaefa86 100644 --- a/uint/fuzz/fuzz_targets/div_mod.rs +++ b/uint/fuzz/fuzz_targets/div_mod.rs @@ -1,3 +1,11 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + #![no_main] use libfuzzer_sys::fuzz_target; @@ -15,7 +23,7 @@ fn from_gmp(x: Integer) -> U512 { } fuzz_target!(|data: &[u8]| { - if data.len() == 128 { + if data.len() == 128 { let x = U512::from_little_endian(&data[..64]); let y = U512::from_little_endian(&data[64..]); let x_gmp = Integer::from_digits(&data[..64], Order::LsfLe); @@ -24,5 +32,5 @@ fuzz_target!(|data: &[u8]| { let (a, b) = x_gmp.div_rem(y_gmp); assert_eq!((from_gmp(a), from_gmp(b)), x.div_mod(y)); } - } + } }); diff --git a/uint/fuzz/fuzz_targets/div_mod_word.rs b/uint/fuzz/fuzz_targets/div_mod_word.rs index 890774c08..d1a04ee19 100644 --- a/uint/fuzz/fuzz_targets/div_mod_word.rs +++ b/uint/fuzz/fuzz_targets/div_mod_word.rs @@ -1,3 +1,11 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + #![no_main] use libfuzzer_sys::fuzz_target; @@ -49,7 +57,7 @@ fn div_mod_word(hi: u64, lo: u64, y: u64) -> (u64, u64) { } fuzz_target!(|data: &[u8]| { - if data.len() == 24 { + if data.len() == 24 { let mut buf = [0u8; 8]; buf.copy_from_slice(&data[..8]); let x = u64::from_ne_bytes(buf); @@ -60,5 +68,5 @@ fuzz_target!(|data: &[u8]| { if x < z { assert_eq!(div_mod_word(x, y, z), div_mod_word_u128(x, y, z)); } - } + } }); diff --git a/uint/fuzz/fuzz_targets/isqrt.rs b/uint/fuzz/fuzz_targets/isqrt.rs new file mode 100644 index 000000000..63b28e8e2 --- /dev/null +++ b/uint/fuzz/fuzz_targets/isqrt.rs @@ -0,0 +1,50 @@ +// Copyright 2021 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![no_main] + +use libfuzzer_sys::fuzz_target; +use uint::*; + +construct_uint! { + pub struct U256(4); +} + +fn isqrt(mut me: U256) -> U256 { + let one = U256::one(); + if me <= one { + return me; + } + // the implementation is based on: + // https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Binary_numeral_system_(base_2) + + // "bit" starts at the highest power of four <= self. + let max_shift = 4 * 64 as u32 - 1; + let shift: u32 = (max_shift - me.leading_zeros()) & !1; + let mut bit = one << shift; + let mut result = U256::zero(); + while !bit.is_zero() { + let x = result + bit; + result >>= 1; + if me >= x { + me -= x; + result += bit; + } + bit >>= 2; + } + result +} + +fuzz_target!(|data: &[u8]| { + if data.len() == 32 { + let x = U256::from_little_endian(data); + let expected = isqrt(x); + let got = x.integer_sqrt(); + assert_eq!(got, expected); + } +}); diff --git a/uint/src/lib.rs b/uint/src/lib.rs index 352ccf70d..e259c79d7 100644 --- a/uint/src/lib.rs +++ b/uint/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license @@ -11,23 +11,30 @@ #![cfg_attr(not(feature = "std"), no_std)] #[doc(hidden)] -pub extern crate byteorder; +pub use byteorder; // Re-export libcore using an alias so that the macros can work without // requiring `extern crate core` downstream. #[doc(hidden)] -pub extern crate core as core_; +pub use core as core_; #[doc(hidden)] -pub extern crate rustc_hex; +pub use hex; -#[cfg(feature="quickcheck")] +#[cfg(feature = "quickcheck")] #[doc(hidden)] -pub extern crate quickcheck; +pub use quickcheck; + +#[cfg(feature = "arbitrary")] +#[doc(hidden)] +pub use arbitrary; + +#[doc(hidden)] +pub use static_assertions; -extern crate crunchy; pub use crunchy::unroll; #[macro_use] +#[rustfmt::skip] mod uint; pub use crate::uint::*; diff --git a/uint/src/uint.rs b/uint/src/uint.rs index c748d5a3f..3482aa6db 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license @@ -29,8 +29,106 @@ //! implementations for even more speed, hidden behind the `x64_arithmetic` //! feature flag. +use core::fmt; + +/// A list of error categories encountered when parsing numbers. +#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)] +#[non_exhaustive] +pub enum FromStrRadixErrKind { + /// A character in the input string is not valid for the given radix. + InvalidCharacter, + + /// The input length is not valid for the given radix. + InvalidLength, + + /// The given radix is not supported. + UnsupportedRadix, +} + +#[derive(Debug)] +enum FromStrRadixErrSrc { + Hex(FromHexError), + Dec(FromDecStrErr), +} + +impl fmt::Display for FromStrRadixErrSrc { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + FromStrRadixErrSrc::Dec(d) => write!(f, "{}", d), + FromStrRadixErrSrc::Hex(h) => write!(f, "{}", h), + } + } +} + +/// The error type for parsing numbers from strings. +#[derive(Debug)] +pub struct FromStrRadixErr { + kind: FromStrRadixErrKind, + source: Option, +} + +impl FromStrRadixErr { + #[doc(hidden)] + pub fn unsupported() -> Self { + Self { kind: FromStrRadixErrKind::UnsupportedRadix, source: None } + } + + /// Returns the corresponding `FromStrRadixErrKind` for this error. + pub fn kind(&self) -> FromStrRadixErrKind { + self.kind + } +} + +impl fmt::Display for FromStrRadixErr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(ref src) = self.source { + return write!(f, "{}", src); + } + + match self.kind { + FromStrRadixErrKind::UnsupportedRadix => write!(f, "the given radix is not supported"), + FromStrRadixErrKind::InvalidCharacter => write!(f, "input contains an invalid character"), + FromStrRadixErrKind::InvalidLength => write!(f, "length not supported for radix or type"), + } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for FromStrRadixErr { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self.source { + Some(FromStrRadixErrSrc::Dec(ref d)) => Some(d), + Some(FromStrRadixErrSrc::Hex(ref h)) => Some(h), + None => None, + } + } +} + +impl From for FromStrRadixErr { + fn from(e: FromDecStrErr) -> Self { + let kind = match e { + FromDecStrErr::InvalidCharacter => FromStrRadixErrKind::InvalidCharacter, + FromDecStrErr::InvalidLength => FromStrRadixErrKind::InvalidLength, + }; + + Self { kind, source: Some(FromStrRadixErrSrc::Dec(e)) } + } +} + +impl From for FromStrRadixErr { + fn from(e: FromHexError) -> Self { + let kind = match e.inner { + hex::FromHexError::InvalidHexCharacter { .. } => FromStrRadixErrKind::InvalidCharacter, + hex::FromHexError::InvalidStringLength => FromStrRadixErrKind::InvalidLength, + hex::FromHexError::OddLength => FromStrRadixErrKind::InvalidLength, + }; + + Self { kind, source: Some(FromStrRadixErrSrc::Hex(e)) } + } +} + /// Conversion from decimal string error -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Eq)] pub enum FromDecStrErr { /// Char not from range 0-9 InvalidCharacter, @@ -38,6 +136,47 @@ pub enum FromDecStrErr { InvalidLength, } +impl fmt::Display for FromDecStrErr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "{}", + match self { + FromDecStrErr::InvalidCharacter => "a character is not in the range 0-9", + FromDecStrErr::InvalidLength => "the number is too large for the type", + } + ) + } +} + +#[cfg(feature = "std")] +impl std::error::Error for FromDecStrErr {} + +#[derive(Debug)] +pub struct FromHexError { + inner: hex::FromHexError, +} + +impl fmt::Display for FromHexError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.inner) + } +} + +#[cfg(feature = "std")] +impl std::error::Error for FromHexError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + Some(&self.inner) + } +} + +#[doc(hidden)] +impl From for FromHexError { + fn from(inner: hex::FromHexError) -> Self { + Self { inner } + } +} + #[macro_export] #[doc(hidden)] macro_rules! impl_map_from { @@ -47,7 +186,7 @@ macro_rules! impl_map_from { From::from(value as $to) } } - } + }; } #[macro_export] @@ -67,113 +206,109 @@ macro_rules! impl_try_from_for_primitive { } } } - } + }; } #[macro_export] #[doc(hidden)] macro_rules! uint_overflowing_binop { - ($name:ident, $n_words: tt, $self_expr: expr, $other: expr, $fn:expr) => ({ + ($name:ident, $n_words: tt, $self_expr: expr, $other: expr, $fn:expr) => {{ + use $crate::core_ as core; let $name(ref me) = $self_expr; let $name(ref you) = $other; - let mut ret = unsafe { $crate::core_::mem::uninitialized() }; - let ret_ptr = &mut ret as *mut [u64; $n_words] as *mut u64; + let mut ret = [0u64; $n_words]; let mut carry = 0u64; + $crate::static_assertions::const_assert!(core::isize::MAX as usize / core::mem::size_of::() > $n_words); + // `unroll!` is recursive, but doesn’t use `$crate::unroll`, so we need to ensure that it + // is in scope unqualified. + use $crate::unroll; unroll! { for i in 0..$n_words { - use $crate::core_::ptr; + use core::ptr; if carry != 0 { let (res1, overflow1) = ($fn)(me[i], you[i]); let (res2, overflow2) = ($fn)(res1, carry); - unsafe { - ptr::write( - ret_ptr.offset(i as _), - res2 - ); - } + ret[i] = res2; carry = (overflow1 as u8 + overflow2 as u8) as u64; } else { let (res, overflow) = ($fn)(me[i], you[i]); - unsafe { - ptr::write( - ret_ptr.offset(i as _), - res - ); - } - + ret[i] = res; carry = overflow as u64; } } } ($name(ret), carry > 0) - }) + }}; } #[macro_export] #[doc(hidden)] macro_rules! uint_full_mul_reg { ($name:ident, 8, $self_expr:expr, $other:expr) => { - uint_full_mul_reg!($name, 8, $self_expr, $other, |a, b| a != 0 || b != 0); + $crate::uint_full_mul_reg!($name, 8, $self_expr, $other, |a, b| a != 0 || b != 0); }; ($name:ident, $n_words:tt, $self_expr:expr, $other:expr) => { - uint_full_mul_reg!($name, $n_words, $self_expr, $other, |_, _| true); + $crate::uint_full_mul_reg!($name, $n_words, $self_expr, $other, |_, _| true); }; - ($name:ident, $n_words:tt, $self_expr:expr, $other:expr, $check:expr) => ({{ - #![allow(unused_assignments)] - - let $name(ref me) = $self_expr; - let $name(ref you) = $other; - let mut ret = [0u64; $n_words * 2]; - - unroll! { - for i in 0..$n_words { - let mut carry = 0u64; - let b = you[i]; - - unroll! { - for j in 0..$n_words { - if $check(me[j], carry) { - let a = me[j]; - - let (hi, low) = Self::split_u128(a as u128 * b as u128); - - let overflow = { - let existing_low = &mut ret[i + j]; - let (low, o) = low.overflowing_add(*existing_low); - *existing_low = low; - o - }; + ($name:ident, $n_words:tt, $self_expr:expr, $other:expr, $check:expr) => {{ + { + #![allow(unused_assignments)] - carry = { - let existing_hi = &mut ret[i + j + 1]; - let hi = hi + overflow as u64; - let (hi, o0) = hi.overflowing_add(carry); - let (hi, o1) = hi.overflowing_add(*existing_hi); - *existing_hi = hi; + let $name(ref me) = $self_expr; + let $name(ref you) = $other; + let mut ret = [0u64; $n_words * 2]; - (o0 | o1) as u64 + use $crate::unroll; + unroll! { + for i in 0..$n_words { + let mut carry = 0u64; + let b = you[i]; + + unroll! { + for j in 0..$n_words { + if $check(me[j], carry) { + let a = me[j]; + + let (hi, low) = Self::split_u128(a as u128 * b as u128); + + let overflow = { + let existing_low = &mut ret[i + j]; + let (low, o) = low.overflowing_add(*existing_low); + *existing_low = low; + o + }; + + carry = { + let existing_hi = &mut ret[i + j + 1]; + let hi = hi + overflow as u64; + let (hi, o0) = hi.overflowing_add(carry); + let (hi, o1) = hi.overflowing_add(*existing_hi); + *existing_hi = hi; + + (o0 | o1) as u64 + } } } } } } - } - ret - }}); + ret + } + }}; } #[macro_export] #[doc(hidden)] macro_rules! uint_overflowing_mul { - ($name:ident, $n_words: tt, $self_expr: expr, $other: expr) => ({ - let ret: [u64; $n_words * 2] = uint_full_mul_reg!($name, $n_words, $self_expr, $other); + ($name:ident, $n_words: tt, $self_expr: expr, $other: expr) => {{ + let ret: [u64; $n_words * 2] = $crate::uint_full_mul_reg!($name, $n_words, $self_expr, $other); // The safety of this is enforced by the compiler let ret: [[u64; $n_words]; 2] = unsafe { $crate::core_::mem::transmute(ret) }; @@ -181,6 +316,7 @@ macro_rules! uint_overflowing_mul { // The compiler WILL NOT inline this if you remove this annotation. #[inline(always)] fn any_nonzero(arr: &[u64; $n_words]) -> bool { + use $crate::unroll; unroll! { for i in 0..$n_words { if arr[i] != 0 { @@ -193,25 +329,21 @@ macro_rules! uint_overflowing_mul { } ($name(ret[0]), any_nonzero(&ret[1])) - }) + }}; } #[macro_export] #[doc(hidden)] macro_rules! overflowing { - ($op: expr, $overflow: expr) => ( - { - let (overflow_x, overflow_overflow) = $op; - $overflow |= overflow_overflow; - overflow_x - } - ); - ($op: expr) => ( - { - let (overflow_x, _overflow_overflow) = $op; - overflow_x - } - ); + ($op: expr, $overflow: expr) => {{ + let (overflow_x, overflow_overflow) = $op; + $overflow |= overflow_overflow; + overflow_x + }}; + ($op: expr) => {{ + let (overflow_x, _overflow_overflow) = $op; + overflow_x + }}; } #[macro_export] @@ -221,7 +353,7 @@ macro_rules! panic_on_overflow { if $name { panic!("arithmetic operation overflow") } - } + }; } #[macro_export] @@ -234,7 +366,7 @@ macro_rules! impl_mul_from { fn mul(self, other: $other) -> $name { let bignum: $name = other.into(); let (result, overflow) = self.overflowing_mul(bignum); - panic_on_overflow!(overflow); + $crate::panic_on_overflow!(overflow); result } } @@ -245,7 +377,7 @@ macro_rules! impl_mul_from { fn mul(self, other: &'a $other) -> $name { let bignum: $name = (*other).into(); let (result, overflow) = self.overflowing_mul(bignum); - panic_on_overflow!(overflow); + $crate::panic_on_overflow!(overflow); result } } @@ -256,7 +388,7 @@ macro_rules! impl_mul_from { fn mul(self, other: &'a $other) -> $name { let bignum: $name = (*other).into(); let (result, overflow) = self.overflowing_mul(bignum); - panic_on_overflow!(overflow); + $crate::panic_on_overflow!(overflow); result } } @@ -267,7 +399,7 @@ macro_rules! impl_mul_from { fn mul(self, other: $other) -> $name { let bignum: $name = other.into(); let (result, overflow) = self.overflowing_mul(bignum); - panic_on_overflow!(overflow); + $crate::panic_on_overflow!(overflow); result } } @@ -278,7 +410,7 @@ macro_rules! impl_mul_from { *self = result } } - } + }; } #[macro_export] @@ -290,7 +422,7 @@ macro_rules! impl_mul_for_primitive { fn mul(self, other: $other) -> $name { let (result, carry) = self.overflowing_mul_u64(other as u64); - panic_on_overflow!(carry > 0); + $crate::panic_on_overflow!(carry > 0); result } } @@ -300,7 +432,7 @@ macro_rules! impl_mul_for_primitive { fn mul(self, other: &'a $other) -> $name { let (result, carry) = self.overflowing_mul_u64(*other as u64); - panic_on_overflow!(carry > 0); + $crate::panic_on_overflow!(carry > 0); result } } @@ -310,7 +442,7 @@ macro_rules! impl_mul_for_primitive { fn mul(self, other: &'a $other) -> $name { let (result, carry) = self.overflowing_mul_u64(*other as u64); - panic_on_overflow!(carry > 0); + $crate::panic_on_overflow!(carry > 0); result } } @@ -320,7 +452,7 @@ macro_rules! impl_mul_for_primitive { fn mul(self, other: $other) -> $name { let (result, carry) = self.overflowing_mul_u64(other as u64); - panic_on_overflow!(carry > 0); + $crate::panic_on_overflow!(carry > 0); result } } @@ -331,17 +463,17 @@ macro_rules! impl_mul_for_primitive { *self = result } } - } + }; } #[macro_export] macro_rules! construct_uint { ( $(#[$attr:meta])* $visibility:vis struct $name:ident (1); ) => { - construct_uint!{ @construct $(#[$attr])* $visibility struct $name (1); } + $crate::construct_uint!{ @construct $(#[$attr])* $visibility struct $name (1); } }; ( $(#[$attr:meta])* $visibility:vis struct $name:ident ( $n_words:tt ); ) => { - construct_uint! { @construct $(#[$attr])* $visibility struct $name ($n_words); } + $crate::construct_uint! { @construct $(#[$attr])* $visibility struct $name ($n_words); } impl $crate::core_::convert::From for $name { fn from(value: u128) -> $name { @@ -364,7 +496,7 @@ macro_rules! construct_uint { impl $name { /// Low 2 words (u128) #[inline] - pub fn low_u128(&self) -> u128 { + pub const fn low_u128(&self) -> u128 { let &$name(ref arr) = self; ((arr[1] as u128) << 64) + arr[0] as u128 } @@ -424,7 +556,7 @@ macro_rules! construct_uint { #[derive(Copy, Clone, Eq, PartialEq, Hash)] $visibility struct $name (pub [u64; $n_words]); - /// Get a reference to the underlying little-endian words. + /// Get a reference to the underlying little-endian words. impl AsRef<[u64]> for $name { #[inline] fn as_ref(&self) -> &[u64] { @@ -443,14 +575,25 @@ macro_rules! construct_uint { /// Maximum value. pub const MAX: $name = $name([u64::max_value(); $n_words]); + /// Converts a string slice in a given base to an integer. Only supports radixes of 10 + /// and 16. + pub fn from_str_radix(txt: &str, radix: u32) -> Result { + let parsed = match radix { + 10 => Self::from_dec_str(txt)?, + 16 => core::str::FromStr::from_str(txt)?, + _ => return Err($crate::FromStrRadixErr::unsupported()), + }; + + Ok(parsed) + } + /// Convert from a decimal string. pub fn from_dec_str(value: &str) -> $crate::core_::result::Result { - if !value.bytes().all(|b| b >= 48 && b <= 57) { - return Err($crate::FromDecStrErr::InvalidCharacter) - } - let mut res = Self::default(); - for b in value.bytes().map(|b| b - 48) { + for b in value.bytes().map(|b| b.wrapping_sub(b'0')) { + if b > 9 { + return Err($crate::FromDecStrErr::InvalidCharacter) + } let (r, overflow) = res.overflowing_mul_u64(10); if overflow > 0 { return Err($crate::FromDecStrErr::InvalidLength); @@ -466,14 +609,14 @@ macro_rules! construct_uint { /// Conversion to u32 #[inline] - pub fn low_u32(&self) -> u32 { + pub const fn low_u32(&self) -> u32 { let &$name(ref arr) = self; arr[0] as u32 } /// Low word (u64) #[inline] - pub fn low_u64(&self) -> u64 { + pub const fn low_u64(&self) -> u64 { let &$name(ref arr) = self; arr[0] } @@ -522,9 +665,10 @@ macro_rules! construct_uint { /// Whether this is zero. #[inline] - pub fn is_zero(&self) -> bool { + pub const fn is_zero(&self) -> bool { let &$name(ref arr) = self; - for i in 0..$n_words { if arr[i] != 0 { return false; } } + let mut i = 0; + while i < $n_words { if arr[i] != 0 { return false; } else { i += 1; } } return true; } @@ -553,7 +697,7 @@ macro_rules! construct_uint { /// /// Panics if `index` exceeds the bit width of the number. #[inline] - pub fn bit(&self, index: usize) -> bool { + pub const fn bit(&self, index: usize) -> bool { let &$name(ref arr) = self; arr[index / 64] & (1 << (index % 64)) != 0 } @@ -573,7 +717,7 @@ macro_rules! construct_uint { r } - /// Returns the number of leading zeros in the binary representation of self. + /// Returns the number of trailing zeros in the binary representation of self. pub fn trailing_zeros(&self) -> u32 { let mut r = 0; for i in 0..$n_words { @@ -588,20 +732,28 @@ macro_rules! construct_uint { r } - /// Return specific byte. + /// Return specific byte. Byte 0 is the least significant value (ie~ little endian). /// /// # Panics /// /// Panics if `index` exceeds the byte width of the number. #[inline] - pub fn byte(&self, index: usize) -> u8 { + pub const fn byte(&self, index: usize) -> u8 { let &$name(ref arr) = self; (arr[index / 8] >> (((index % 8)) * 8)) as u8 } + /// Convert to big-endian bytes. + #[inline] + pub fn to_big_endian(&self) -> [u8; $n_words * 8] { + let mut bytes = [0u8; $n_words * 8]; + self.write_as_big_endian(&mut bytes); + bytes + } + /// Write to the slice in big-endian format. #[inline] - pub fn to_big_endian(&self, bytes: &mut [u8]) { + pub fn write_as_big_endian(&self, bytes: &mut [u8]) { use $crate::byteorder::{ByteOrder, BigEndian}; debug_assert!($n_words * 8 == bytes.len()); for i in 0..$n_words { @@ -609,9 +761,16 @@ macro_rules! construct_uint { } } - /// Write to the slice in little-endian format. + /// Convert to little-endian bytes. + #[inline] + pub fn to_little_endian(&self) -> [u8; $n_words * 8] { + let mut bytes = [0u8; $n_words * 8]; + self.write_as_little_endian(&mut bytes); + bytes + } + #[inline] - pub fn to_little_endian(&self, bytes: &mut [u8]) { + pub fn write_as_little_endian(&self, bytes: &mut [u8]) { use $crate::byteorder::{ByteOrder, LittleEndian}; debug_assert!($n_words * 8 == bytes.len()); for i in 0..$n_words { @@ -635,29 +794,27 @@ macro_rules! construct_uint { /// Zero (additive identity) of this type. #[inline] - pub fn zero() -> Self { - From::from(0u64) + pub const fn zero() -> Self { + Self([0; $n_words]) } /// One (multiplicative identity) of this type. #[inline] - pub fn one() -> Self { - From::from(1u64) + pub const fn one() -> Self { + let mut words = [0; $n_words]; + words[0] = 1u64; + Self(words) } /// The maximum value which can be inhabited by this type. #[inline] - pub fn max_value() -> Self { - let mut result = [0; $n_words]; - for i in 0..$n_words { - result[i] = u64::max_value(); - } - $name(result) + pub const fn max_value() -> Self { + Self::MAX } fn full_shl(self, shift: u32) -> [u64; $n_words + 1] { debug_assert!(shift < Self::WORD_BITS as u32); - let mut u = [064; $n_words + 1]; + let mut u = [0u64; $n_words + 1]; let u_lo = self.0[0] << shift; let u_hi = self >> (Self::WORD_BITS as u32 - shift); u[0] = u_lo; @@ -818,7 +975,29 @@ macro_rules! construct_uint { self.div_mod_knuth(other, n, m) } - /// Fast exponentation by squaring + /// Compute the highest `n` such that `n * n <= self`. + pub fn integer_sqrt(&self) -> Self { + let one = Self::one(); + if self <= &one { + return *self; + } + + // the implementation is based on: + // https://en.wikipedia.org/wiki/Integer_square_root#Using_only_integer_division + + // Set the initial guess to something higher than √self. + let shift: u32 = (self.bits() as u32 + 1) / 2; + let mut x_prev = one << shift; + loop { + let x = (x_prev + self / x_prev) >> 1; + if x >= x_prev { + return x_prev; + } + x_prev = x; + } + } + + /// Fast exponentiation by squaring /// https://en.wikipedia.org/wiki/Exponentiation_by_squaring /// /// # Panics @@ -837,19 +1016,19 @@ macro_rules! construct_uint { while n > u_one { if is_even(&n) { x = x * x; - n = n >> 1usize; + n >>= 1usize; } else { y = x * y; x = x * x; // to reduce odd number by 1 we should just clear the last bit - n.0[$n_words-1] = n.0[$n_words-1] & ((!0u64)>>1); - n = n >> 1usize; + n.0[$n_words-1] &= (!0u64)>>1; + n >>= 1usize; } } x * y } - /// Fast exponentation by squaring. Returns result and overflow flag. + /// Fast exponentiation by squaring. Returns result and overflow flag. pub fn overflowing_pow(self, expon: Self) -> (Self, bool) { if expon.is_zero() { return (Self::one(), false) } @@ -863,22 +1042,30 @@ macro_rules! construct_uint { while n > u_one { if is_even(&n) { - x = overflowing!(x.overflowing_mul(x), overflow); - n = n >> 1usize; + x = $crate::overflowing!(x.overflowing_mul(x), overflow); + n >>= 1usize; } else { - y = overflowing!(x.overflowing_mul(y), overflow); - x = overflowing!(x.overflowing_mul(x), overflow); + y = $crate::overflowing!(x.overflowing_mul(y), overflow); + x = $crate::overflowing!(x.overflowing_mul(x), overflow); n = (n - u_one) >> 1usize; } } - let res = overflowing!(x.overflowing_mul(y), overflow); + let res = $crate::overflowing!(x.overflowing_mul(y), overflow); (res, overflow) } - /// Add with overflow. + /// Checked exponentiation. Returns `None` if overflow occurred. + pub fn checked_pow(self, expon: $name) -> Option<$name> { + match self.overflowing_pow(expon) { + (_, true) => None, + (val, _) => Some(val), + } + } + + /// Addition which overflows and returns a flag if it does. #[inline(always)] pub fn overflowing_add(self, other: $name) -> ($name, bool) { - uint_overflowing_binop!( + $crate::uint_overflowing_binop!( $name, $n_words, self, @@ -887,10 +1074,10 @@ macro_rules! construct_uint { ) } - /// Addition which saturates at the maximum value (Self::max_value()). + /// Addition which saturates at the maximum value (Self::MAX). pub fn saturating_add(self, other: $name) -> $name { match self.overflowing_add(other) { - (_, true) => $name::max_value(), + (_, true) => $name::MAX, (val, false) => val, } } @@ -906,7 +1093,7 @@ macro_rules! construct_uint { /// Subtraction which underflows and returns a flag if it does. #[inline(always)] pub fn overflowing_sub(self, other: $name) -> ($name, bool) { - uint_overflowing_binop!( + $crate::uint_overflowing_binop!( $name, $n_words, self, @@ -931,16 +1118,25 @@ macro_rules! construct_uint { } } + /// Computes the absolute difference between self and other. + pub fn abs_diff(self, other: $name) -> $name { + if self > other { + self.overflowing_sub(other).0 + } else { + other.overflowing_sub(self).0 + } + } + /// Multiply with overflow, returning a flag if it does. #[inline(always)] pub fn overflowing_mul(self, other: $name) -> ($name, bool) { - uint_overflowing_mul!($name, $n_words, self, other) + $crate::uint_overflowing_mul!($name, $n_words, self, other) } /// Multiplication which saturates at the maximum value.. pub fn saturating_mul(self, other: $name) -> $name { match self.overflowing_mul(other) { - (_, true) => $name::max_value(), + (_, true) => $name::MAX, (val, false) => val, } } @@ -976,7 +1172,7 @@ macro_rules! construct_uint { if self.is_zero() { (self, false) } else { - (!self, true) + (!self + 1, true) } } @@ -991,43 +1187,9 @@ macro_rules! construct_uint { #[inline(always)] fn div_mod_word(hi: u64, lo: u64, y: u64) -> (u64, u64) { debug_assert!(hi < y); - // NOTE: this is slow (__udivti3) - // let x = (u128::from(hi) << 64) + u128::from(lo); - // let d = u128::from(d); - // ((x / d) as u64, (x % d) as u64) - // TODO: look at https://gmplib.org/~tege/division-paper.pdf - const TWO32: u64 = 1 << 32; - let s = y.leading_zeros(); - let y = y << s; - let (yn1, yn0) = Self::split(y); - let un32 = (hi << s) | lo.checked_shr(64 - s).unwrap_or(0); - let un10 = lo << s; - let (un1, un0) = Self::split(un10); - let mut q1 = un32 / yn1; - let mut rhat = un32 - q1 * yn1; - - while q1 >= TWO32 || q1 * yn0 > TWO32 * rhat + un1 { - q1 -= 1; - rhat += yn1; - if rhat >= TWO32 { - break; - } - } - - let un21 = un32.wrapping_mul(TWO32).wrapping_add(un1).wrapping_sub(q1.wrapping_mul(y)); - let mut q0 = un21 / yn1; - rhat = un21.wrapping_sub(q0.wrapping_mul(yn1)); - - while q0 >= TWO32 || q0 * yn0 > TWO32 * rhat + un0 { - q0 -= 1; - rhat += yn1; - if rhat >= TWO32 { - break; - } - } - - let rem = un21.wrapping_mul(TWO32).wrapping_add(un0).wrapping_sub(y.wrapping_mul(q0)); - (q1 * TWO32 + q0, rem >> s) + let x = (u128::from(hi) << 64) + u128::from(lo); + let y = u128::from(y); + ((x / y) as u64, (x % y) as u64) } #[inline(always)] @@ -1059,18 +1221,18 @@ macro_rules! construct_uint { } #[inline(always)] - fn mul_u64(a: u64, b: u64, carry: u64) -> (u64, u64) { - let (hi, lo) = Self::split_u128(u128::from(a) * u128::from(b) + u128::from(carry)); + const fn mul_u64(a: u64, b: u64, carry: u64) -> (u64, u64) { + let (hi, lo) = Self::split_u128(a as u128 * b as u128 + carry as u128); (lo, hi) } #[inline(always)] - fn split(a: u64) -> (u64, u64) { + const fn split(a: u64) -> (u64, u64) { (a >> 32, a & 0xFFFF_FFFF) } #[inline(always)] - fn split_u128(a: u128) -> (u64, u64) { + const fn split_u128(a: u128) -> (u64, u64) { ((a >> 64) as _, (a & 0xFFFFFFFFFFFFFFFF) as _) } @@ -1091,18 +1253,15 @@ macro_rules! construct_uint { /// Converts from big endian representation bytes in memory. pub fn from_big_endian(slice: &[u8]) -> Self { + use $crate::byteorder::{ByteOrder, BigEndian}; assert!($n_words * 8 >= slice.len()); + let mut padded = [0u8; $n_words * 8]; + padded[$n_words * 8 - slice.len() .. $n_words * 8].copy_from_slice(&slice); + let mut ret = [0; $n_words]; - unsafe { - let ret_u8: &mut [u8; $n_words * 8] = $crate::core_::mem::transmute(&mut ret); - let mut ret_ptr = ret_u8.as_mut_ptr(); - let mut slice_ptr = slice.as_ptr().offset(slice.len() as isize - 1); - for _ in 0..slice.len() { - *ret_ptr = *slice_ptr; - ret_ptr = ret_ptr.offset(1); - slice_ptr = slice_ptr.offset(-1); - } + for i in 0..$n_words { + ret[$n_words - i - 1] = BigEndian::read_u64(&padded[8 * i..]); } $name(ret) @@ -1110,35 +1269,56 @@ macro_rules! construct_uint { /// Converts from little endian representation bytes in memory. pub fn from_little_endian(slice: &[u8]) -> Self { + use $crate::byteorder::{ByteOrder, LittleEndian}; assert!($n_words * 8 >= slice.len()); + let mut padded = [0u8; $n_words * 8]; + padded[0..slice.len()].copy_from_slice(&slice); + let mut ret = [0; $n_words]; - unsafe { - let ret_u8: &mut [u8; $n_words * 8] = $crate::core_::mem::transmute(&mut ret); - ret_u8[0..slice.len()].copy_from_slice(&slice); + for i in 0..$n_words { + ret[i] = LittleEndian::read_u64(&padded[8 * i..]); } $name(ret) } - } - impl $crate::core_::convert::From<$name> for [u8; $n_words * 8] { - fn from(number: $name) -> Self { - let mut arr = [0u8; $n_words * 8]; - number.to_big_endian(&mut arr); - arr - } - } + fn fmt_hex(&self, f: &mut $crate::core_::fmt::Formatter, is_lower: bool) -> $crate::core_::fmt::Result { + let &$name(ref data) = self; + // special case. + if self.is_zero() { + return f.pad_integral(true, "0x", "0"); + } - impl $crate::core_::convert::From<[u8; $n_words * 8]> for $name { - fn from(bytes: [u8; $n_words * 8]) -> Self { - Self::from(&bytes) - } - } + let mut latch = false; + let mut buf = [0_u8; $n_words * 16]; + let mut i = 0; + for ch in data.iter().rev() { + for x in 0..16 { + // nibble < 16 + let nibble = (ch & (15u64 << ((15 - x) * 4) as u64)) >> (((15 - x) * 4) as u64); + if !latch { + latch = nibble != 0; + } + + if latch { + // nibble is `'0'..'9' 'a'..'f' 'A'..'F'` because nibble < 16 + let nibble = match nibble { + 0..=9 => nibble as u8 + b'0', + _ if is_lower => nibble as u8 - 10 + b'a', + _ => nibble as u8 - 10 + b'A', + }; + buf[i] = nibble; + i += 1; + } + } + } - impl<'a> $crate::core_::convert::From<&'a [u8; $n_words * 8]> for $name { - fn from(bytes: &[u8; $n_words * 8]) -> Self { - Self::from(&bytes[..]) + // sequence of `'0'..'9' 'a'..'f' 'A'..'F'` chars is guaranteed to be a valid UTF8 string + let s = unsafe { + $crate::core_::str::from_utf8_unchecked(&buf[0..i]) + }; + f.pad_integral(true, "0x", s) } } @@ -1156,10 +1336,10 @@ macro_rules! construct_uint { } } - impl_map_from!($name, u8, u64); - impl_map_from!($name, u16, u64); - impl_map_from!($name, u32, u64); - impl_map_from!($name, usize, u64); + $crate::impl_map_from!($name, u8, u64); + $crate::impl_map_from!($name, u16, u64); + $crate::impl_map_from!($name, u32, u64); + $crate::impl_map_from!($name, usize, u64); impl $crate::core_::convert::From for $name { fn from(value: i64) -> $name { @@ -1170,35 +1350,28 @@ macro_rules! construct_uint { } } - impl_map_from!($name, i8, i64); - impl_map_from!($name, i16, i64); - impl_map_from!($name, i32, i64); - impl_map_from!($name, isize, i64); - - // Converts from big endian representation. - impl<'a> $crate::core_::convert::From<&'a [u8]> for $name { - fn from(bytes: &[u8]) -> $name { - Self::from_big_endian(bytes) - } - } - - impl_try_from_for_primitive!($name, u8); - impl_try_from_for_primitive!($name, u16); - impl_try_from_for_primitive!($name, u32); - impl_try_from_for_primitive!($name, usize); - impl_try_from_for_primitive!($name, u64); - impl_try_from_for_primitive!($name, i8); - impl_try_from_for_primitive!($name, i16); - impl_try_from_for_primitive!($name, i32); - impl_try_from_for_primitive!($name, isize); - impl_try_from_for_primitive!($name, i64); + $crate::impl_map_from!($name, i8, i64); + $crate::impl_map_from!($name, i16, i64); + $crate::impl_map_from!($name, i32, i64); + $crate::impl_map_from!($name, isize, i64); + + $crate::impl_try_from_for_primitive!($name, u8); + $crate::impl_try_from_for_primitive!($name, u16); + $crate::impl_try_from_for_primitive!($name, u32); + $crate::impl_try_from_for_primitive!($name, usize); + $crate::impl_try_from_for_primitive!($name, u64); + $crate::impl_try_from_for_primitive!($name, i8); + $crate::impl_try_from_for_primitive!($name, i16); + $crate::impl_try_from_for_primitive!($name, i32); + $crate::impl_try_from_for_primitive!($name, isize); + $crate::impl_try_from_for_primitive!($name, i64); impl $crate::core_::ops::Add for $name where T: Into<$name> { type Output = $name; fn add(self, other: T) -> $name { let (result, overflow) = self.overflowing_add(other.into()); - panic_on_overflow!(overflow); + $crate::panic_on_overflow!(overflow); result } } @@ -1214,7 +1387,7 @@ macro_rules! construct_uint { impl $crate::core_::ops::AddAssign<$name> for $name { fn add_assign(&mut self, other: $name) { let (result, overflow) = self.overflowing_add(other); - panic_on_overflow!(overflow); + $crate::panic_on_overflow!(overflow); *self = result } } @@ -1225,7 +1398,7 @@ macro_rules! construct_uint { #[inline] fn sub(self, other: T) -> $name { let (result, overflow) = self.overflowing_sub(other.into()); - panic_on_overflow!(overflow); + $crate::panic_on_overflow!(overflow); result } } @@ -1241,23 +1414,23 @@ macro_rules! construct_uint { impl $crate::core_::ops::SubAssign<$name> for $name { fn sub_assign(&mut self, other: $name) { let (result, overflow) = self.overflowing_sub(other); - panic_on_overflow!(overflow); + $crate::panic_on_overflow!(overflow); *self = result } } // all other impls - impl_mul_from!($name, $name); - impl_mul_for_primitive!($name, u8); - impl_mul_for_primitive!($name, u16); - impl_mul_for_primitive!($name, u32); - impl_mul_for_primitive!($name, u64); - impl_mul_for_primitive!($name, usize); - impl_mul_for_primitive!($name, i8); - impl_mul_for_primitive!($name, i16); - impl_mul_for_primitive!($name, i32); - impl_mul_for_primitive!($name, i64); - impl_mul_for_primitive!($name, isize); + $crate::impl_mul_from!($name, $name); + $crate::impl_mul_for_primitive!($name, u8); + $crate::impl_mul_for_primitive!($name, u16); + $crate::impl_mul_for_primitive!($name, u32); + $crate::impl_mul_for_primitive!($name, u64); + $crate::impl_mul_for_primitive!($name, usize); + $crate::impl_mul_for_primitive!($name, i8); + $crate::impl_mul_for_primitive!($name, i16); + $crate::impl_mul_for_primitive!($name, i32); + $crate::impl_mul_for_primitive!($name, i64); + $crate::impl_mul_for_primitive!($name, isize); impl $crate::core_::ops::Div for $name where T: Into<$name> { type Output = $name; @@ -1323,6 +1496,12 @@ macro_rules! construct_uint { } } + impl $crate::core_::ops::BitAndAssign<$name> for $name { + fn bitand_assign(&mut self, rhs: $name) { + *self = *self & rhs; + } + } + impl $crate::core_::ops::BitXor<$name> for $name { type Output = $name; @@ -1338,6 +1517,12 @@ macro_rules! construct_uint { } } + impl $crate::core_::ops::BitXorAssign<$name> for $name { + fn bitxor_assign(&mut self, rhs: $name) { + *self = *self ^ rhs; + } + } + impl $crate::core_::ops::BitOr<$name> for $name { type Output = $name; @@ -1353,6 +1538,12 @@ macro_rules! construct_uint { } } + impl $crate::core_::ops::BitOrAssign<$name> for $name { + fn bitor_assign(&mut self, rhs: $name) { + *self = *self | rhs; + } + } + impl $crate::core_::ops::Not for $name { type Output = $name; @@ -1445,15 +1636,7 @@ macro_rules! construct_uint { impl $crate::core_::cmp::Ord for $name { fn cmp(&self, other: &$name) -> $crate::core_::cmp::Ordering { - let &$name(ref me) = self; - let &$name(ref you) = other; - let mut i = $n_words; - while i > 0 { - i -= 1; - if me[i] < you[i] { return $crate::core_::cmp::Ordering::Less; } - if me[i] > you[i] { return $crate::core_::cmp::Ordering::Greater; } - } - $crate::core_::cmp::Ordering::Equal + self.as_ref().iter().rev().cmp(other.as_ref().iter().rev()) } } @@ -1483,7 +1666,7 @@ macro_rules! construct_uint { loop { let digit = (current % ten).low_u64() as u8; buf[i] = digit + b'0'; - current = current / ten; + current /= ten; if current.is_zero() { break; } @@ -1494,62 +1677,54 @@ macro_rules! construct_uint { let s = unsafe { $crate::core_::str::from_utf8_unchecked(&buf[i..]) }; - f.write_str(s) + f.pad_integral(true, "", s) } } impl $crate::core_::fmt::LowerHex for $name { fn fmt(&self, f: &mut $crate::core_::fmt::Formatter) -> $crate::core_::fmt::Result { - let &$name(ref data) = self; - if f.alternate() { - $crate::core_::write!(f, "0x")?; - } - // special case. - if self.is_zero() { - return $crate::core_::write!(f, "0"); - } - - let mut latch = false; - for ch in data.iter().rev() { - for x in 0..16 { - let nibble = (ch & (15u64 << ((15 - x) * 4) as u64)) >> (((15 - x) * 4) as u64); - if !latch { - latch = nibble != 0; - } - - if latch { - $crate::core_::write!(f, "{:x}", nibble)?; - } - } - } - Ok(()) + self.fmt_hex(f, true) } } - impl_std_for_uint!($name, $n_words); - // `$n_words * 8` because macro expects bytes and - // uints use 64 bit (8 byte) words - impl_quickcheck_arbitrary_for_uint!($name, ($n_words * 8)); - } -} + impl $crate::core_::fmt::UpperHex for $name { + fn fmt(&self, f: &mut $crate::core_::fmt::Formatter) -> $crate::core_::fmt::Result { + self.fmt_hex(f, false) + } + } -#[cfg(feature = "std")] -#[macro_export] -#[doc(hidden)] -macro_rules! impl_std_for_uint { - ($name: ident, $n_words: tt) => { impl $crate::core_::str::FromStr for $name { - type Err = $crate::rustc_hex::FromHexError; + type Err = $crate::FromHexError; fn from_str(value: &str) -> $crate::core_::result::Result<$name, Self::Err> { - use $crate::rustc_hex::FromHex; - let bytes: Vec = match value.len() % 2 == 0 { - true => value.from_hex()?, - false => ("0".to_owned() + value).from_hex()?, - }; + let value = value.strip_prefix("0x").unwrap_or(value); + const BYTES_LEN: usize = $n_words * 8; + const MAX_ENCODED_LEN: usize = BYTES_LEN * 2; + + let mut bytes = [0_u8; BYTES_LEN]; + + let encoded = value.as_bytes(); + + if encoded.len() > MAX_ENCODED_LEN { + return Err($crate::hex::FromHexError::InvalidStringLength.into()); + } + + if encoded.len() % 2 == 0 { + let out = &mut bytes[BYTES_LEN - encoded.len() / 2..]; + + $crate::hex::decode_to_slice(encoded, out).map_err(Self::Err::from)?; + } else { + // Prepend '0' by overlaying our value on a scratch buffer filled with '0' characters. + let mut s = [b'0'; MAX_ENCODED_LEN]; + s[MAX_ENCODED_LEN - encoded.len()..].copy_from_slice(encoded); + let encoded = &s[MAX_ENCODED_LEN - encoded.len() - 1..]; + + let out = &mut bytes[BYTES_LEN - encoded.len() / 2..]; - let bytes_ref: &[u8] = &bytes; - Ok(From::from(bytes_ref)) + $crate::hex::decode_to_slice(encoded, out).map_err(Self::Err::from)?; + } + + Ok(Self::from_big_endian(&bytes)) } } @@ -1558,14 +1733,12 @@ macro_rules! impl_std_for_uint { s.parse().unwrap() } } - } -} -#[cfg(not(feature = "std"))] -#[macro_export] -#[doc(hidden)] -macro_rules! impl_std_for_uint { - ($name: ident, $n_words: tt) => {} + // `$n_words * 8` because macro expects bytes and + // uints use 64 bit (8 byte) words + $crate::impl_quickcheck_arbitrary_for_uint!($name, ($n_words * 8)); + $crate::impl_arbitrary_for_uint!($name, ($n_words * 8)); + } } #[cfg(feature = "quickcheck")] @@ -1574,36 +1747,64 @@ macro_rules! impl_std_for_uint { macro_rules! impl_quickcheck_arbitrary_for_uint { ($uint: ty, $n_bytes: tt) => { impl $crate::quickcheck::Arbitrary for $uint { - fn arbitrary(g: &mut G) -> Self { - let mut res = [0u8; $n_bytes]; - - let p = g.next_f64(); + fn arbitrary(g: &mut $crate::quickcheck::Gen) -> Self { + let p = usize::arbitrary(g) % 100; // make it more likely to generate smaller numbers that // don't use up the full $n_bytes let range = // 10% chance to generate number that uses up to $n_bytes - if p < 0.1 { + if p < 10 { $n_bytes // 10% chance to generate number that uses up to $n_bytes / 2 - } else if p < 0.2 { + } else if p < 20 { $n_bytes / 2 // 80% chance to generate number that uses up to $n_bytes / 5 } else { $n_bytes / 5 }; - let size = g.gen_range(0, range); - g.fill_bytes(&mut res[..size]); + let range = $crate::core_::cmp::max(range, 1); + let size: usize = usize::arbitrary(g) % range; + + let res: [u8; $n_bytes] = $crate::core_::array::from_fn(|i| { + if i > size { + 0 + } else { + u8::arbitrary(g) + } + }); - res.as_ref().into() + Self::from_big_endian(res.as_ref()) } } - } + }; } #[cfg(not(feature = "quickcheck"))] #[macro_export] #[doc(hidden)] macro_rules! impl_quickcheck_arbitrary_for_uint { - ($uint: ty, $n_bytes: tt) => {} + ($uint: ty, $n_bytes: tt) => {}; +} + +#[cfg(feature = "arbitrary")] +#[macro_export] +#[doc(hidden)] +macro_rules! impl_arbitrary_for_uint { + ($uint: ty, $n_bytes: tt) => { + impl $crate::arbitrary::Arbitrary<'_> for $uint { + fn arbitrary(u: &mut $crate::arbitrary::Unstructured<'_>) -> $crate::arbitrary::Result { + let mut res = [0u8; $n_bytes]; + u.fill_buffer(&mut res)?; + Ok(Self::from_big_endian(&res)) + } + } + }; +} + +#[cfg(not(feature = "arbitrary"))] +#[macro_export] +#[doc(hidden)] +macro_rules! impl_arbitrary_for_uint { + ($uint: ty, $n_bytes: tt) => {}; } diff --git a/uint/tests/uint_tests.rs b/uint/tests/uint_tests.rs index de77b828e..61c03c86d 100644 --- a/uint/tests/uint_tests.rs +++ b/uint/tests/uint_tests.rs @@ -1,19 +1,14 @@ -extern crate core; - -#[macro_use] -extern crate uint; - -#[cfg(feature = "quickcheck")] -#[macro_use] -extern crate quickcheck; - -#[cfg_attr(all(test, feature = "quickcheck"), macro_use(unroll))] -extern crate crunchy; - -use core::u64::MAX; -use core::str::FromStr; -use core::convert::TryInto; -use uint::{FromDecStrErr}; +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use core::{convert::TryInto, str::FromStr, u64::MAX}; +use crunchy::unroll; +use uint::{construct_uint, overflowing, FromDecStrErr}; construct_uint! { pub struct U256(4); @@ -23,6 +18,94 @@ construct_uint! { pub struct U512(8); } +#[cfg(feature = "std")] +#[test] +fn hash_impl_is_the_same_as_for_a_slice() { + use core::hash::{Hash, Hasher as _}; + use std::collections::hash_map::DefaultHasher; + + let uint_hash = { + let mut h = DefaultHasher::new(); + let uint = U256::from(123u64); + Hash::hash(&uint, &mut h); + h.finish() + }; + let slice_hash = { + let mut h = DefaultHasher::new(); + Hash::hash(&[123u64, 0, 0, 0], &mut h); + h.finish() + }; + assert_eq!(uint_hash, slice_hash); +} + +// https://github.com/paritytech/parity-common/issues/420 +#[test] +fn const_matching_works() { + const ONE: U256 = U256([1, 0, 0, 0]); + match U256::zero() { + ONE => unreachable!(), + _ => {}, + } +} + +#[test] +fn max() { + let max = U256::MAX; + assert_eq!(max.0, [0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF]); + + let max = U512::MAX; + assert_eq!( + max.0, + [ + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF + ] + ); +} + +#[test] +fn one() { + let one = U256::one(); + assert_eq!(one.0, [1, 0, 0, 0]); + + let one = U512::one(); + assert_eq!(one.0, [1, 0, 0, 0, 0, 0, 0, 0]); + + let any = U256::from(123456789); + assert_eq!(any * U256::one(), any); + + let any = U512::from(123456789); + assert_eq!(any * U512::one(), any); +} + +#[test] +#[allow(deprecated)] +fn max_value() { + let max = U256::max_value(); + assert_eq!(max.0, [0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF]); + + let max = U512::max_value(); + assert_eq!( + max.0, + [ + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF + ] + ); +} + #[test] fn u128_conversions() { let mut a = U256::from(u128::max_value()); @@ -39,6 +122,14 @@ fn uint256_checked_ops() { let a = U256::from(10); let b = !U256::from(1); + assert_eq!(U256::from(10).checked_pow(U256::from(0)), Some(U256::from(1))); + assert_eq!(U256::from(10).checked_pow(U256::from(1)), Some(U256::from(10))); + assert_eq!(U256::from(10).checked_pow(U256::from(2)), Some(U256::from(100))); + assert_eq!(U256::from(10).checked_pow(U256::from(3)), Some(U256::from(1000))); + assert_eq!(U256::from(10).checked_pow(U256::from(20)), Some(U256::exp10(20))); + assert_eq!(U256::from(2).checked_pow(U256::from(0x100)), None); + assert_eq!(U256::MAX.checked_pow(U256::from(2)), None); + assert_eq!(a.checked_add(b), None); assert_eq!(a.checked_add(a), Some(20.into())); @@ -58,6 +149,17 @@ fn uint256_checked_ops() { assert_eq!(z.checked_neg(), Some(z)); } +#[test] +fn uint256_abs_diff() { + let zero = U256::zero(); + let max = U256::MAX; + + assert_eq!(zero.abs_diff(zero), zero); + assert_eq!(max.abs_diff(max), zero); + assert_eq!(zero.abs_diff(max), max); + assert_eq!(max.abs_diff(zero), max); +} + #[test] fn uint256_from() { let e = U256([10, 0, 0, 0]); @@ -73,120 +175,49 @@ fn uint256_from() { assert_eq!(e, ud); // test initialization from bytes - let va = U256::from(&[10u8][..]); + let va = U256::from_big_endian(&[10u8][..]); assert_eq!(e, va); // more tests for initialization from bytes - assert_eq!(U256([0x1010, 0, 0, 0]), U256::from(&[0x10u8, 0x10][..])); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from(&[0x12u8, 0xf0][..])); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from(&[0, 0x12u8, 0xf0][..])); - assert_eq!(U256([0x12f0, 0 , 0, 0]), U256::from(&[0, 0, 0, 0, 0, 0, 0, 0x12u8, 0xf0][..])); - assert_eq!(U256([0x12f0, 1 , 0, 0]), U256::from(&[1, 0, 0, 0, 0, 0, 0, 0x12u8, 0xf0][..])); + assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_big_endian(&[0x10u8, 0x10][..])); + assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_big_endian(&[0x12u8, 0xf0][..])); + assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_big_endian(&[0, 0x12u8, 0xf0][..])); + assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_big_endian(&[0, 0, 0, 0, 0, 0, 0, 0x12u8, 0xf0][..])); + assert_eq!(U256([0x12f0, 1, 0, 0]), U256::from_big_endian(&[1, 0, 0, 0, 0, 0, 0, 0x12u8, 0xf0][..])); assert_eq!( - U256([0x12f0, 1 , 0x0910203040506077, 0x8090a0b0c0d0e0f0]), - U256::from(& - [ - 0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, - 0x09, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x77, - 0, 0, 0, 0, 0, 0, 0, 1, - 0, 0, 0, 0, 0, 0, 0x12u8, 0xf0 + U256([0x12f0, 1, 0x0910203040506077, 0x8090a0b0c0d0e0f0]), + U256::from_big_endian( + &[ + 0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0x09, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x77, 0, 0, + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0x12u8, 0xf0 ][..] ) ); assert_eq!( U256([0x00192437100019fa, 0x243710, 0, 0]), - U256::from(&[0x24u8, 0x37, 0x10,0, 0x19, 0x24, 0x37, 0x10, 0, 0x19, 0xfa][..]) + U256::from_big_endian(&[0x24u8, 0x37, 0x10, 0, 0x19, 0x24, 0x37, 0x10, 0, 0x19, 0xfa][..]) ); // test initializtion from string let sa = U256::from_str("0a").unwrap(); + let sa2 = U256::from_str("0x0a").unwrap(); + assert_eq!(sa2, sa); assert_eq!(e, sa); + assert_eq!(U256([0, 0, 0, 0]), U256::from_str("").unwrap()); + assert_eq!(U256([0x1, 0, 0, 0]), U256::from_str("1").unwrap()); + assert_eq!(U256([0x101, 0, 0, 0]), U256::from_str("101").unwrap()); assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_str("1010").unwrap()); assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0 , 0, 0]), U256::from_str("0000000012f0").unwrap()); - assert_eq!(U256([0x12f0, 1 , 0, 0]), U256::from_str("0100000000000012f0").unwrap()); + assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("0000000012f0").unwrap()); + assert_eq!(U256([0x12f0, 1, 0, 0]), U256::from_str("0100000000000012f0").unwrap()); assert_eq!( - U256([0x12f0, 1 , 0x0910203040506077, 0x8090a0b0c0d0e0f0]), - U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").unwrap() - ); - let sa = U256::from_str("0a").unwrap(); - assert_eq!(e, sa); - assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_str("1010").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0 , 0, 0]), U256::from_str("0000000012f0").unwrap()); - assert_eq!(U256([0x12f0, 1 , 0, 0]), U256::from_str("0100000000000012f0").unwrap()); - assert_eq!( - U256([0x12f0, 1 , 0x0910203040506077, 0x8090a0b0c0d0e0f0]), - U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").unwrap() - ); - let sa = U256::from_str("0a").unwrap(); - assert_eq!(e, sa); - assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_str("1010").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0 , 0, 0]), U256::from_str("0000000012f0").unwrap()); - assert_eq!(U256([0x12f0, 1 , 0, 0]), U256::from_str("0100000000000012f0").unwrap()); - assert_eq!( - U256([0x12f0, 1 , 0x0910203040506077, 0x8090a0b0c0d0e0f0]), - U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").unwrap() - ); - let sa = U256::from_str("0a").unwrap(); - assert_eq!(e, sa); - assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_str("1010").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0 , 0, 0]), U256::from_str("0000000012f0").unwrap()); - assert_eq!(U256([0x12f0, 1 , 0, 0]), U256::from_str("0100000000000012f0").unwrap()); - assert_eq!( - U256([0x12f0, 1 , 0x0910203040506077, 0x8090a0b0c0d0e0f0]), - U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").unwrap() - ); - let sa = U256::from_str("0a").unwrap(); - assert_eq!(e, sa); - assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_str("1010").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0 , 0, 0]), U256::from_str("0000000012f0").unwrap()); - assert_eq!(U256([0x12f0, 1 , 0, 0]), U256::from_str("0100000000000012f0").unwrap()); - assert_eq!( - U256([0x12f0, 1 , 0x0910203040506077, 0x8090a0b0c0d0e0f0]), - U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").unwrap() - ); - let sa = U256::from_str("0a").unwrap(); - assert_eq!(e, sa); - assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_str("1010").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0 , 0, 0]), U256::from_str("0000000012f0").unwrap()); - assert_eq!(U256([0x12f0, 1 , 0, 0]), U256::from_str("0100000000000012f0").unwrap()); - assert_eq!( - U256([0x12f0, 1 , 0x0910203040506077, 0x8090a0b0c0d0e0f0]), - U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").unwrap() - ); - let sa = U256::from_str("0a").unwrap(); - assert_eq!(e, sa); - assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_str("1010").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0 , 0, 0]), U256::from_str("0000000012f0").unwrap()); - assert_eq!(U256([0x12f0, 1 , 0, 0]), U256::from_str("0100000000000012f0").unwrap()); - assert_eq!( - U256([0x12f0, 1 , 0x0910203040506077, 0x8090a0b0c0d0e0f0]), - U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").unwrap() - ); - let sa = U256::from_str("0a").unwrap(); - assert_eq!(e, sa); - assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_str("1010").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0 , 0, 0]), U256::from_str("0000000012f0").unwrap()); - assert_eq!(U256([0x12f0, 1 , 0, 0]), U256::from_str("0100000000000012f0").unwrap()); - assert_eq!( - U256([0x12f0, 1 , 0x0910203040506077, 0x8090a0b0c0d0e0f0]), + U256([0x12f0, 1, 0x0910203040506077, 0x8090a0b0c0d0e0f0]), U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").unwrap() ); + + // This string contains more bits than what fits in a U256. + assert!(U256::from_str("000000000000000000000000000000000000000000000000000000000000000000").is_err()); + assert!(U256::from_str("100000000000000000000000000000000000000000000000000000000000000000").is_err()); } #[test] @@ -194,7 +225,7 @@ fn uint256_try_into_primitives() { macro_rules! try_into_uint_primitive_ok { ($primitive: ty) => { assert_eq!(U256::from(10).try_into() as Result<$primitive, _>, Ok(<$primitive>::from(10u8))); - } + }; } try_into_uint_primitive_ok!(u8); try_into_uint_primitive_ok!(u16); @@ -206,7 +237,7 @@ fn uint256_try_into_primitives() { macro_rules! try_into_iint_primitive_ok { ($primitive: ty) => { assert_eq!(U256::from(10).try_into() as Result<$primitive, _>, Ok(<$primitive>::from(10i8))); - } + }; } try_into_iint_primitive_ok!(i8); try_into_iint_primitive_ok!(i16); @@ -221,7 +252,7 @@ fn uint256_try_into_primitives() { U256::from(<$small>::max_value() as $big + 1).try_into() as Result<$small, _>, Err(concat!("integer overflow when casting to ", stringify!($small))) ); - } + }; } try_into_primitive_err!(u8, u16); try_into_primitive_err!(u16, u32); @@ -242,9 +273,8 @@ fn uint256_try_into_primitives() { fn uint256_to() { let hex = "8090a0b0c0d0e0f00910203040506077583a2cf8264910e1436bda32571012f0"; let uint = U256::from_str(hex).unwrap(); - let mut bytes = [0u8; 32]; - uint.to_big_endian(&mut bytes); - let uint2 = U256::from(&bytes[..]); + let bytes = uint.to_big_endian(); + let uint2 = U256::from_big_endian(&bytes[..]); assert_eq!(uint, uint2); } @@ -286,7 +316,6 @@ fn uint256_bits_test() { } #[test] -#[cfg_attr(feature="dev", allow(eq_op))] fn uint256_comp_test() { let small = U256([10u64, 0, 0, 0]); let big = U256([0x8C8C3EE70C644118u64, 0x0209E7378231E632, 0, 0]); @@ -301,6 +330,10 @@ fn uint256_comp_test() { assert!(bigger >= big); assert!(bigger >= small); assert!(small <= small); + assert_eq!(small, small); + assert_eq!(biggest, biggest); + assert_ne!(big, biggest); + assert_ne!(big, bigger); } #[test] @@ -440,10 +473,7 @@ fn uint256_overflowing_pow() { U256::from(2).overflowing_pow(U256::from(0xff)), (U256::from_str("8000000000000000000000000000000000000000000000000000000000000000").unwrap(), false) ); - assert_eq!( - U256::from(2).overflowing_pow(U256::from(0x100)), - (U256::zero(), true) - ); + assert_eq!(U256::from(2).overflowing_pow(U256::from(0x100)), (U256::zero(), true)); } #[test] @@ -462,9 +492,9 @@ fn uint256_mul2() { #[test] fn uint256_overflowing_mul() { assert_eq!( - U256::from_str("100000000000000000000000000000000").unwrap().overflowing_mul( - U256::from_str("100000000000000000000000000000000").unwrap() - ), + U256::from_str("100000000000000000000000000000000") + .unwrap() + .overflowing_mul(U256::from_str("100000000000000000000000000000000").unwrap()), (U256::zero(), true) ); } @@ -482,10 +512,11 @@ fn uint512_mul() { #[test] fn uint256_mul_overflow() { assert_eq!( - U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() - .overflowing_mul( - U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() - ), + U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + .unwrap() + .overflowing_mul( + U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() + ), (U256::from_str("1").unwrap(), true) ); } @@ -494,36 +525,62 @@ fn uint256_mul_overflow() { #[should_panic] #[allow(unused_must_use)] fn uint256_mul_overflow_panic() { - U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() - * - U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap(); + U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() * + U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap(); } #[test] fn uint256_sub_overflow() { assert_eq!( - U256::from_str("0").unwrap() - .overflowing_sub( - U256::from_str("1").unwrap() - ), + U256::from_str("0").unwrap().overflowing_sub(U256::from_str("1").unwrap()), (U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap(), true) - ); + ); +} + +#[test] +fn uint256_neg_overflow() { + assert_eq!(U256::from_str("0").unwrap().overflowing_neg(), (U256::from_str("0").unwrap(), false)); + assert_eq!( + U256::from_str("1").unwrap().overflowing_neg(), + (U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap(), true) + ); + assert_eq!( + U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + .unwrap() + .overflowing_neg(), + (U256::from_str("1").unwrap(), true) + ); + assert_eq!( + U256::from_str("8000000000000000000000000000000000000000000000000000000000000000") + .unwrap() + .overflowing_neg(), + (U256::from_str("8000000000000000000000000000000000000000000000000000000000000000").unwrap(), true) + ); + assert_eq!( + U256::from_str("ffffffffffffffff0000000000000000ffffffffffffffff0000000000000000") + .unwrap() + .overflowing_neg(), + (U256::from_str("0000000000000000ffffffffffffffff00000000000000010000000000000000").unwrap(), true) + ); + assert_eq!( + U256::from_str("0000000000000000ffffffffffffffff0000000000000000ffffffffffffffff") + .unwrap() + .overflowing_neg(), + (U256::from_str("ffffffffffffffff0000000000000000ffffffffffffffff0000000000000001").unwrap(), true) + ); } #[test] #[should_panic] #[allow(unused_must_use)] fn uint256_sub_overflow_panic() { - U256::from_str("0").unwrap() - - - U256::from_str("1").unwrap(); + U256::from_str("0").unwrap() - U256::from_str("1").unwrap(); } #[test] fn uint256_shl() { assert_eq!( - U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() - << 4, + U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() << 4, U256::from_str("fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0").unwrap() ); } @@ -531,13 +588,11 @@ fn uint256_shl() { #[test] fn uint256_shl_words() { assert_eq!( - U256::from_str("0000000000000001ffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() - << 64, + U256::from_str("0000000000000001ffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() << 64, U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000").unwrap() ); assert_eq!( - U256::from_str("0000000000000000ffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() - << 64, + U256::from_str("0000000000000000ffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() << 64, U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000").unwrap() ); } @@ -545,18 +600,17 @@ fn uint256_shl_words() { #[test] fn uint256_mul() { assert_eq!( - U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() - * - U256::from_str("2").unwrap(), + U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() * + U256::from_str("2").unwrap(), U256::from_str("fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe").unwrap() - ); + ); } #[test] fn uint256_div() { - assert_eq!(U256::from(10u64) / U256::from(1u64), U256::from(10u64)); - assert_eq!(U256::from(10u64) / U256::from(2u64), U256::from(5u64)); - assert_eq!(U256::from(10u64) / U256::from(3u64), U256::from(3u64)); + assert_eq!(U256::from(10u64) / U256::from(1u64), U256::from(10u64)); + assert_eq!(U256::from(10u64) / U256::from(2u64), U256::from(5u64)); + assert_eq!(U256::from(10u64) / U256::from(3u64), U256::from(3u64)); } #[test] @@ -569,19 +623,36 @@ fn uint256_rem() { fn uint256_from_dec_str() { assert_eq!(U256::from_dec_str("10").unwrap(), U256::from(10u64)); assert_eq!(U256::from_dec_str("1024").unwrap(), U256::from(1024u64)); - assert_eq!(U256::from_dec_str("115792089237316195423570985008687907853269984665640564039457584007913129639936"), Err(FromDecStrErr::InvalidLength)); + assert_eq!( + U256::from_dec_str("115792089237316195423570985008687907853269984665640564039457584007913129639936"), + Err(FromDecStrErr::InvalidLength) + ); assert_eq!(U256::from_dec_str("0x11"), Err(FromDecStrErr::InvalidCharacter)); } #[test] fn display_uint() { - let s = "12345678987654321023456789"; - assert_eq!(format!("{}", U256::from_dec_str(s).unwrap()), s); + let s = U256::from_dec_str("12345678987654321023456789").unwrap(); + assert_eq!(format!("{}", s), "12345678987654321023456789"); + assert_eq!(format!("{:x}", s), "a364c995584f929f39615"); + assert_eq!(format!("{:X}", s), "A364C995584F929F39615"); + assert_eq!(format!("{:032}", s), "00000012345678987654321023456789"); + assert_eq!(format!("{:032x}", s), "00000000000a364c995584f929f39615"); + assert_eq!(format!("{:032X}", s), "00000000000A364C995584F929F39615"); + assert_eq!(format!("{:#032x}", s), "0x000000000a364c995584f929f39615"); + assert_eq!(format!("{:#032X}", s), "0x000000000A364C995584F929F39615"); } #[test] fn display_uint_zero() { - assert_eq!(format!("{}", U256::from(0)), "0"); + let s = U256::from(0); + assert_eq!(format!("{}", s), "0"); + assert_eq!(format!("{:x}", s), "0"); + assert_eq!(format!("{:X}", s), "0"); + assert_eq!(format!("{:032x}", s), "00000000000000000000000000000000"); + assert_eq!(format!("{:032X}", s), "00000000000000000000000000000000"); + assert_eq!(format!("{:#032x}", s), "0x000000000000000000000000000000"); + assert_eq!(format!("{:#032X}", s), "0x000000000000000000000000000000"); } #[test] @@ -604,16 +675,14 @@ fn u512_multi_adds() { let (_, overflow) = U512([0, 0, 0, 0, 0, 0, 2, 1]).overflowing_add(U512([0, 0, 0, 0, 0, 0, 3, 1])); assert!(!overflow); - let (_, overflow) = U512([MAX, MAX, MAX, MAX, MAX, MAX, MAX, MAX]) - .overflowing_add(U512([MAX, MAX, MAX, MAX, MAX, MAX, MAX, MAX])); + let (_, overflow) = + U512([MAX, MAX, MAX, MAX, MAX, MAX, MAX, MAX]).overflowing_add(U512([MAX, MAX, MAX, MAX, MAX, MAX, MAX, MAX])); assert!(overflow); - let (_, overflow) = U512([0, 0, 0, 0, 0, 0, 0, MAX]) - .overflowing_add(U512([0, 0, 0, 0, 0, 0, 0, MAX])); + let (_, overflow) = U512([0, 0, 0, 0, 0, 0, 0, MAX]).overflowing_add(U512([0, 0, 0, 0, 0, 0, 0, MAX])); assert!(overflow); - let (_, overflow) = U512([0, 0, 0, 0, 0, 0, 0, MAX]) - .overflowing_add(U512([0, 0, 0, 0, 0, 0, 0, 0])); + let (_, overflow) = U512([0, 0, 0, 0, 0, 0, 0, MAX]).overflowing_add(U512([0, 0, 0, 0, 0, 0, 0, 0])); assert!(!overflow); } @@ -629,8 +698,7 @@ fn u256_multi_adds() { assert_eq!(result, U256([0, 0, 5, 2])); assert!(!overflow); - let (_, overflow) = U256([MAX, MAX, MAX, MAX]) - .overflowing_add(U256([MAX, MAX, MAX, MAX])); + let (_, overflow) = U256([MAX, MAX, MAX, MAX]).overflowing_add(U256([MAX, MAX, MAX, MAX])); assert!(overflow); let (_, overflow) = U256([0, 0, 0, MAX]).overflowing_add(U256([0, 0, 0, MAX])); @@ -648,12 +716,10 @@ fn u256_multi_subs() { let (_, overflow) = U256([0, 0, 2, 1]).overflowing_sub(U256([0, 0, 3, 1])); assert!(overflow); - let (result, overflow) = - U256([MAX, MAX, MAX, MAX]) - .overflowing_sub(U256([MAX/2, MAX/2, MAX/2, MAX/2])); + let (result, overflow) = U256([MAX, MAX, MAX, MAX]).overflowing_sub(U256([MAX / 2, MAX / 2, MAX / 2, MAX / 2])); assert!(!overflow); - assert_eq!(U256([MAX/2+1, MAX/2+1, MAX/2+1, MAX/2+1]), result); + assert_eq!(U256([MAX / 2 + 1, MAX / 2 + 1, MAX / 2 + 1, MAX / 2 + 1]), result); let (result, overflow) = U256([0, 0, 0, 1]).overflowing_sub(U256([0, 0, 1, 0])); assert!(!overflow); @@ -682,61 +748,51 @@ fn u512_multi_subs() { #[test] fn u256_multi_carry_all() { let (result, _) = U256([MAX, 0, 0, 0]).overflowing_mul(U256([MAX, 0, 0, 0])); - assert_eq!(U256([1, MAX-1, 0, 0]), result); + assert_eq!(U256([1, MAX - 1, 0, 0]), result); let (result, _) = U256([0, MAX, 0, 0]).overflowing_mul(U256([MAX, 0, 0, 0])); - assert_eq!(U256([0, 1, MAX-1, 0]), result); + assert_eq!(U256([0, 1, MAX - 1, 0]), result); let (result, _) = U256([MAX, MAX, 0, 0]).overflowing_mul(U256([MAX, 0, 0, 0])); - assert_eq!(U256([1, MAX, MAX-1, 0]), result); + assert_eq!(U256([1, MAX, MAX - 1, 0]), result); let (result, _) = U256([MAX, 0, 0, 0]).overflowing_mul(U256([MAX, MAX, 0, 0])); - assert_eq!(U256([1, MAX, MAX-1, 0]), result); + assert_eq!(U256([1, MAX, MAX - 1, 0]), result); - let (result, _) = U256([MAX, MAX, 0, 0]) - .overflowing_mul(U256([MAX, MAX, 0, 0])); - assert_eq!(U256([1, 0, MAX-1, MAX]), result); + let (result, _) = U256([MAX, MAX, 0, 0]).overflowing_mul(U256([MAX, MAX, 0, 0])); + assert_eq!(U256([1, 0, MAX - 1, MAX]), result); let (result, _) = U256([MAX, 0, 0, 0]).overflowing_mul(U256([MAX, MAX, MAX, 0])); - assert_eq!(U256([1, MAX, MAX, MAX-1]), result); + assert_eq!(U256([1, MAX, MAX, MAX - 1]), result); let (result, _) = U256([MAX, MAX, MAX, 0]).overflowing_mul(U256([MAX, 0, 0, 0])); - assert_eq!(U256([1, MAX, MAX, MAX-1]), result); + assert_eq!(U256([1, MAX, MAX, MAX - 1]), result); - let (result, _) = U256([MAX, 0, 0, 0]).overflowing_mul( - U256([MAX, MAX, MAX, MAX])); + let (result, _) = U256([MAX, 0, 0, 0]).overflowing_mul(U256([MAX, MAX, MAX, MAX])); assert_eq!(U256([1, MAX, MAX, MAX]), result); - let (result, _) = U256([MAX, MAX, MAX, MAX]) - .overflowing_mul(U256([MAX, 0, 0, 0])); + let (result, _) = U256([MAX, MAX, MAX, MAX]).overflowing_mul(U256([MAX, 0, 0, 0])); assert_eq!(U256([1, MAX, MAX, MAX]), result); - let (result, _) = U256([MAX, MAX, MAX, 0]) - .overflowing_mul(U256([MAX, MAX, 0, 0])); - assert_eq!(U256([1, 0, MAX, MAX-1]), result); + let (result, _) = U256([MAX, MAX, MAX, 0]).overflowing_mul(U256([MAX, MAX, 0, 0])); + assert_eq!(U256([1, 0, MAX, MAX - 1]), result); - let (result, _) = U256([MAX, MAX, 0, 0]) - .overflowing_mul(U256([MAX, MAX, MAX, 0])); - assert_eq!(U256([1, 0, MAX, MAX-1]), result); + let (result, _) = U256([MAX, MAX, 0, 0]).overflowing_mul(U256([MAX, MAX, MAX, 0])); + assert_eq!(U256([1, 0, MAX, MAX - 1]), result); - let (result, _) = U256([MAX, MAX, MAX, MAX]) - .overflowing_mul(U256([MAX, MAX, 0, 0])); + let (result, _) = U256([MAX, MAX, MAX, MAX]).overflowing_mul(U256([MAX, MAX, 0, 0])); assert_eq!(U256([1, 0, MAX, MAX]), result); - let (result, _) = U256([MAX, MAX, 0, 0]) - .overflowing_mul(U256([MAX, MAX, MAX, MAX])); + let (result, _) = U256([MAX, MAX, 0, 0]).overflowing_mul(U256([MAX, MAX, MAX, MAX])); assert_eq!(U256([1, 0, MAX, MAX]), result); - let (result, _) = U256([MAX, MAX, MAX, 0]) - .overflowing_mul(U256([MAX, MAX, MAX, 0])); - assert_eq!(U256([1, 0, 0, MAX-1]), result); + let (result, _) = U256([MAX, MAX, MAX, 0]).overflowing_mul(U256([MAX, MAX, MAX, 0])); + assert_eq!(U256([1, 0, 0, MAX - 1]), result); - let (result, _) = U256([MAX, MAX, MAX, 0]) - .overflowing_mul(U256([MAX, MAX, MAX, MAX])); + let (result, _) = U256([MAX, MAX, MAX, 0]).overflowing_mul(U256([MAX, MAX, MAX, MAX])); assert_eq!(U256([1, 0, 0, MAX]), result); - let (result, _) = U256([MAX, MAX, MAX, MAX]) - .overflowing_mul(U256([MAX, MAX, MAX, 0])); + let (result, _) = U256([MAX, MAX, MAX, MAX]).overflowing_mul(U256([MAX, MAX, MAX, 0])); assert_eq!(U256([1, 0, 0, MAX]), result); let (result, _) = U256([0, 0, 0, MAX]).overflowing_mul(U256([0, 0, 0, MAX])); @@ -745,8 +801,7 @@ fn u256_multi_carry_all() { let (result, _) = U256([1, 0, 0, 0]).overflowing_mul(U256([0, 0, 0, MAX])); assert_eq!(U256([0, 0, 0, MAX]), result); - let (result, _) = U256([MAX, MAX, MAX, MAX]) - .overflowing_mul(U256([MAX, MAX, MAX, MAX])); + let (result, _) = U256([MAX, MAX, MAX, MAX]).overflowing_mul(U256([MAX, MAX, MAX, MAX])); assert_eq!(U256([1, 0, 0, 0]), result); } @@ -815,7 +870,14 @@ fn u256_multi_muls_overflow() { #[test] fn u512_div() { - let fuzz_data = [0x38,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xff,0xff,0xff,0x7,0x0,0x0,0x0,0x0,0xc1,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x8,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xfe,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x80,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0]; + let fuzz_data = [ + 0x38, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0x7, 0x0, 0x0, 0x0, 0x0, 0xc1, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xfe, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + ]; let a = U512::from_little_endian(&fuzz_data[..64]); let b = U512::from_little_endian(&fuzz_data[64..]); let (x, y) = (a / b, a % b); @@ -830,41 +892,52 @@ fn big_endian() { assert_eq!(source, U256::from(1)); - source.to_big_endian(&mut target); + source.write_as_big_endian(&mut target); assert_eq!( - vec![0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, - 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 1u8], - target); + vec![ + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 1u8 + ], + target + ); let source = U256([512, 0, 0, 0]); let mut target = vec![0u8; 32]; - source.to_big_endian(&mut target); + source.write_as_big_endian(&mut target); assert_eq!( - vec![0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, - 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 2u8, 0u8], - target); + vec![ + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 2u8, 0u8 + ], + target + ); let source = U256([0, 512, 0, 0]); let mut target = vec![0u8; 32]; - source.to_big_endian(&mut target); + source.write_as_big_endian(&mut target); assert_eq!( - vec![0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, - 0u8, 0u8, 0u8, 0u8, 0u8, 2u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8], - target); + vec![ + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, + 0u8, 2u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8 + ], + target + ); let source = U256::from_str("0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20").unwrap(); - source.to_big_endian(&mut target); + source.write_as_big_endian(&mut target); assert_eq!( - vec![0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, - 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20], - target); + vec![ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, + 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20 + ], + target + ); } #[test] fn u256_multi_muls2() { - let (result, _) = U256([0, 0, 0, 0]).overflowing_mul(U256([0, 0, 0, 0])); assert_eq!(U256([0, 0, 0, 0]), result); @@ -920,7 +993,9 @@ fn u256_multi_muls2() { #[test] fn example() { let mut val: U256 = 1023.into(); - for _ in 0..200 { val = val * U256::from(2) } + for _ in 0..200 { + val = val * U256::from(2) + } assert_eq!(&format!("{}", val), "1643897619276947051879427220465009342380213662639797070513307648"); } @@ -928,34 +1003,26 @@ fn example() { fn little_endian() { let number: U256 = "00022cca1da3f6e5722b7d3cc5bbfb486465ebc5a708dd293042f932d7eee119".into(); let expected = [ - 0x19, 0xe1, 0xee, 0xd7, - 0x32, 0xf9, 0x42, 0x30, - 0x29, 0xdd, 0x08, 0xa7, - 0xc5, 0xeb, 0x65, 0x64, - 0x48, 0xfb, 0xbb, 0xc5, - 0x3c, 0x7d, 0x2b, 0x72, - 0xe5, 0xf6, 0xa3, 0x1d, - 0xca, 0x2c, 0x02, 0x00 + 0x19, 0xe1, 0xee, 0xd7, 0x32, 0xf9, 0x42, 0x30, 0x29, 0xdd, 0x08, 0xa7, 0xc5, 0xeb, 0x65, 0x64, 0x48, 0xfb, + 0xbb, 0xc5, 0x3c, 0x7d, 0x2b, 0x72, 0xe5, 0xf6, 0xa3, 0x1d, 0xca, 0x2c, 0x02, 0x00, ]; let mut result = [0u8; 32]; - number.to_little_endian(&mut result); + number.write_as_little_endian(&mut result); assert_eq!(expected, result); } #[test] fn slice_roundtrip() { let raw = [ - 1u8, 2, 3, 5, 7, 11, 13, 17, - 19, 23, 29, 31, 37, 41, 43, 47, - 53, 59, 61, 67, 71, 73, 79, 83, - 89, 97, 101, 103, 107, 109, 113, 127 + 1u8, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, + 107, 109, 113, 127, ]; - let u256: U256 = (&raw[..]).into(); + let u256 = U256::from_big_endian(&raw[..]); let mut new_raw = [0u8; 32]; - u256.to_big_endian(&mut new_raw); + u256.write_as_big_endian(&mut new_raw); assert_eq!(&raw, &new_raw); } @@ -963,17 +1030,15 @@ fn slice_roundtrip() { #[test] fn slice_roundtrip_le() { let raw = [ - 1u8, 2, 3, 5, 7, 11, 13, 17, - 19, 23, 29, 31, 37, 41, 43, 47, - 53, 59, 61, 67, 71, 73, 79, 83, - 89, 97, 101, 103, 107, 109, 113, 127 + 1u8, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, + 107, 109, 113, 127, ]; let u256 = U256::from_little_endian(&raw[..]); let mut new_raw = [0u8; 32]; - u256.to_little_endian(&mut new_raw); + u256.write_as_little_endian(&mut new_raw); assert_eq!(&raw, &new_raw); } @@ -981,29 +1046,23 @@ fn slice_roundtrip_le() { #[test] fn slice_roundtrip_le2() { let raw = [ - 2, 3, 5, 7, 11, 13, 17, - 19, 23, 29, 31, 37, 41, 43, 47, - 53, 59, 61, 67, 71, 73, 79, 83, - 89, 97, 101, 103, 107, 109, 113, 127 + 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, + 109, 113, 127, ]; let u256 = U256::from_little_endian(&raw[..]); let mut new_raw = [0u8; 32]; - u256.to_little_endian(&mut new_raw); + u256.write_as_little_endian(&mut new_raw); assert_eq!(&raw, &new_raw[..31]); } #[test] fn from_little_endian() { - let source: [u8; 32] = [ - 1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - ]; + let source: [u8; 32] = + [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; let number = U256::from_little_endian(&source[..]); @@ -1012,54 +1071,43 @@ fn from_little_endian() { #[test] fn from_big_endian() { - let source: [u8; 32] = [ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1, - ]; + let source: [u8; 32] = + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; let number = U256::from_big_endian(&source[..]); assert_eq!(U256::from(1), number); + + let number = U256::from_big_endian(&[]); + assert_eq!(U256::zero(), number); + + let number = U256::from_big_endian(&[1]); + assert_eq!(U256::from(1), number); } #[test] fn into_fixed_array() { - let expected: [u8; 32] = [ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1, - ]; - let ary : [u8; 32] = U256::from(1).into(); + let expected: [u8; 32] = + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; + let ary: [u8; 32] = U256::from(1).to_big_endian(); assert_eq!(ary, expected); } #[test] fn test_u256_from_fixed_array() { - let ary = [ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1, - 0, 0, 0, 0, 0, 0, 0, 123, - ]; - let num : U256 = ary.into(); - assert_eq!( num, U256::from(std::u64::MAX) + 1 + 123); + let ary = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 123]; + let num = U256::from_big_endian(&ary); + assert_eq!(num, U256::from(core::u64::MAX) + 1 + 123); - let a_ref : &U256 = &ary.into(); - assert_eq!( a_ref, &(U256::from(std::u64::MAX) + 1 + 123)); + let a_ref = &U256::from_big_endian(&ary); + assert_eq!(a_ref, &(U256::from(core::u64::MAX) + 1 + 123)); } #[test] fn test_from_ref_to_fixed_array() { - let ary : &[u8; 32] = &[ - 1,0,1,2,1,0,1,2, - 3,0,3,4,3,0,3,4, - 5,0,5,6,5,0,5,6, - 7,0,7,8,7,0,7,8 - ]; - let big : U256 = ary.into(); + let ary: &[u8; 32] = + &[1, 0, 1, 2, 1, 0, 1, 2, 3, 0, 3, 4, 3, 0, 3, 4, 5, 0, 5, 6, 5, 0, 5, 6, 7, 0, 7, 8, 7, 0, 7, 8]; + let big = U256::from_big_endian(ary); // the numbers are each row of 8 bytes reversed and cast to u64 assert_eq!(big, U256([504410889324070664, 360293493601469702, 216176097878868740, 72058702156267778u64])); } @@ -1067,20 +1115,14 @@ fn test_from_ref_to_fixed_array() { #[test] fn test_u512_from_fixed_array() { let ary = [ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 123 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 123, ]; - let num : U512 = ary.into(); - assert_eq!( num, U512::from(123) ); + let num = U512::from_big_endian(&ary); + assert_eq!(num, U512::from(123)); - let a_ref : &U512 = &ary.into(); - assert_eq!( a_ref, &U512::from(123) ); + let a_ref = U512::from_big_endian(&ary); + assert_eq!(a_ref, U512::from(123)); } #[test] @@ -1091,6 +1133,15 @@ fn leading_zeros() { assert_eq!(U256::from("0000000000000000000000000000000000000000000000000000000000000000").leading_zeros(), 256); } +#[test] +fn issue_507_roundtrip() { + let mut b32 = <[u8; 32]>::default(); + let a = U256::from(10); + a.write_as_little_endian(&mut b32); + let b = U256::from_little_endian(&b32[..]); + assert_eq!(a, b); +} + #[test] fn trailing_zeros() { assert_eq!(U256::from("1adbdd6bd6ff027485484b97f8a6a4c7129756dd100000000000000000000000").trailing_zeros(), 92); @@ -1099,13 +1150,55 @@ fn trailing_zeros() { assert_eq!(U256::from("0000000000000000000000000000000000000000000000000000000000000000").trailing_zeros(), 256); } -#[cfg(feature="quickcheck")] +#[test] +fn bit_assign() { + fn check(a: U256, b: U256) { + // and + { + let mut x = a; + x &= b; + assert_eq!(x, a & b); + } + // or + { + let mut x = a; + x |= b; + assert_eq!(x, a | b); + } + // xor + { + let mut x = a; + x ^= b; + assert_eq!(x, a ^ b); + } + // shr + { + let mut x = a; + x >>= b; + assert_eq!(x, a >> b); + } + // shl + { + let mut x = a; + x <<= b; + assert_eq!(x, a << b); + } + } + + check(U256::from(9), U256::from(999999)); + check(U256::from(0), U256::from(0)); + check(U256::from(23432), U256::from(u32::MAX)); + check(U256::MAX, U256::zero()); +} + +#[cfg(feature = "quickcheck")] pub mod laws { + use super::construct_uint; macro_rules! uint_laws { ($mod_name:ident, $uint_ty:ident) => { mod $mod_name { - use quickcheck::TestResult; - use super::{$uint_ty}; + use quickcheck::{TestResult, quickcheck}; + use super::$uint_ty; quickcheck! { fn associative_add(x: $uint_ty, y: $uint_ty, z: $uint_ty) -> TestResult { @@ -1203,9 +1296,29 @@ pub mod laws { } } + quickcheck! { + fn isqrt(x: $uint_ty) -> TestResult { + let s = x.integer_sqrt(); + let higher = s + 1; + if let Some(y) = higher.checked_mul(higher) { + TestResult::from_bool( + (s * s <= x) && (y > x) + ) + } else { + TestResult::from_bool( + s * s <= x + ) + } + } + } + quickcheck! { fn pow_mul(x: $uint_ty) -> TestResult { if x.overflowing_pow($uint_ty::from(2)).1 || x.overflowing_pow($uint_ty::from(3)).1 { + // On overflow `checked_pow` should return `None`. + assert_eq!(x.checked_pow($uint_ty::from(2)), None); + assert_eq!(x.checked_pow($uint_ty::from(3)), None); + return TestResult::discard(); }